123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990 |
- import numpy as np
- class NeuralNetwork:
- def __init__(self, input_layer_size=3, hidden_layer_size=4, output_layer_size=2):
- self.input_layer_size = input_layer_size
- self.hidden_layer_size = hidden_layer_size
- self.output_layer_size = output_layer_size
- self.weights_input_to_hidden = np.random.uniform(-0.5, 0.5, (self.hidden_layer_size, self.input_layer_size))
- self.weights_hidden_to_output = np.random.uniform(-0.5, 0.5, (self.output_layer_size, self.hidden_layer_size))
- self.bias_input_to_hidden = np.zeros((self.hidden_layer_size, 1))
- self.bias_hidden_to_output = np.zeros((self.output_layer_size, 1))
- self.epochs = 3000 # Default
- self.learning_rate = 0.1 # Default
- return
-
- def feedforward(self, data):
- # Forward propagation (to hidden layer)
- hidden_raw = self.bias_input_to_hidden + self.weights_input_to_hidden @ data
- self.hidden = 1 / (1 + np.exp(-hidden_raw)) # sigmoid
- # Forward propagation (to output layer)
- output_raw = self.bias_hidden_to_output + self.weights_hidden_to_output @ self.hidden
- output = 1 / (1 + np.exp(-output_raw))
- return output
-
- def backprop(self, data, output, result):
- # Backpropagation (output layer)
- delta_output = output - result
- self.weights_hidden_to_output += -self.learning_rate * delta_output @ np.transpose(self.hidden)
- self.bias_hidden_to_output += -self.learning_rate * delta_output
- # Backpropagation (hidden layer)
- delta_hidden = np.transpose(self.weights_hidden_to_output) @ delta_output * (self.hidden * (1 - self.hidden))
- self.weights_input_to_hidden += -self.learning_rate * delta_hidden @ np.transpose(data)
- self.bias_input_to_hidden += -self.learning_rate * delta_hidden
- return
-
- def get(self, data):
- data = np.reshape(data, (-1, 1))
- return self.feedforward(data)
- def learning(self, dataset, results, epochs, learning_rate):
- self.epochs = epochs
- self.learning_rate = learning_rate
- e_loss = 0
- e_correct = 0
- # Learning
- for epoch in range(epochs):
- print(f"Epoch {epoch}")
- for data, result in zip(dataset, results):
- data = np.reshape(data, (-1, 1))
- result = np.reshape(result, (-1, 1))
-
- output = self.feedforward(data)
- # Loss / Error calculation
- e_loss += 1 / len(output) * np.sum((output - result) ** 2, axis=0)
- e_correct += int(np.argmax(output) == np.argmax(result))
- self.backprop(data, output, result)
- # print some debug info between epochs
- print(f"Loss: {round((e_loss[0] / len(dataset)) * 100, 3)}%")
- print(f"Accuracy: {round((e_correct / len(dataset)) * 100, 3)}%")
- e_loss = 0
- e_correct = 0
- return
-
- def save(self, filename):
- np.savez(filename,
- weights_input_to_hidden=self.weights_input_to_hidden,
- weights_hidden_to_output=self.weights_hidden_to_output,
- bias_input_to_hidden=self.bias_input_to_hidden,
- bias_hidden_to_output=self.bias_hidden_to_output
- )
- return
- def load(self, filename):
- with np.load(filename) as f:
- self.weights_input_to_hidden = f['weights_input_to_hidden'],
- self.weights_hidden_to_output = f['weights_hidden_to_output'],
- self.bias_input_to_hidden = f['bias_input_to_hidden'],
- self.bias_hidden_to_output = f['bias_hidden_to_output']
- return
|