Jul-10-2018, 04:46 PM
Hello,
I am very new to the topic of NN and to Python, so please forgive me in case my question is silly. I tried to work on it myself but couldn't find the solution.
I started off with a simple two layer network, while I got the code from a tutorial. While playing around with different training data and activation functions, I wanted to add another hidden layer to see, if it leads to better results. I thought I adjusted all necessary lines / added lines, however I receive the following error:
"ValueError: too many values to unpack (expected 2)"
--> Even though I understand what the error is referring to, I have a hard time understanding what to adjust.
I would appreciate a hint, thank you very much.
I am very new to the topic of NN and to Python, so please forgive me in case my question is silly. I tried to work on it myself but couldn't find the solution.
I started off with a simple two layer network, while I got the code from a tutorial. While playing around with different training data and activation functions, I wanted to add another hidden layer to see, if it leads to better results. I thought I adjusted all necessary lines / added lines, however I receive the following error:
"ValueError: too many values to unpack (expected 2)"
--> Even though I understand what the error is referring to, I have a hard time understanding what to adjust.
I would appreciate a hint, thank you very much.
from numpy import exp, array, random, dot import numpy as np class NeuronLayer(): def __init__(self, number_of_neurons, number_of_inputs_per_neuron): self.synaptic_weights = 2 * random.random((number_of_inputs_per_neuron, number_of_neurons)) - 1 class NeuralNetwork(): def __init__(self, layer1, layer2, layer3): self.layer1 = layer1 self.layer2 = layer2 self.layer3 = layer3 # The Sigmoid function, which describes an S shaped curve. # We pass the weighted sum of the inputs through this function to # normalise them between 0 and 1. def __sigmoid(self, x): return 1 / (1 + exp(-x)) # The derivative of the Sigmoid function. # This is the gradient of the Sigmoid curve. # It indicates how confident we are about the existing weight. def __sigmoid_derivative(self, x): return x * (1 - x) # We train the neural network through a process of trial and error. # Adjusting the synaptic weights each time. def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations): for iteration in range(number_of_training_iterations): # Pass the training set through our neural network output_from_layer_1, output_from_layer_2, output_from_layer_3 = self.think(training_set_inputs) # Calculate the error for layer 3 (The difference between the desired output # and the predicted output). layer3_error = training_set_outputs - output_from_layer_3 layer3_delta = layer3_error * self.__sigmoid_derivative(output_from_layer_3) # Calculate the error for layer 2 (The difference between the desired output # and the predicted output). layer2_error = training_set_outputs - output_from_layer_2 layer2_delta = layer2_error * self.__sigmoid_derivative(output_from_layer_2) # Calculate the error for layer 1 (By looking at the weights in layer 1, # we can determine by how much layer 1 contributed to the error in layer 2). layer1_error = layer2_delta.dot(self.layer2.synaptic_weights.T) layer1_delta = layer1_error * self.__sigmoid_derivative(output_from_layer_1) # Calculate how much to adjust the weights by layer1_adjustment = training_set_inputs.T.dot(layer1_delta) layer2_adjustment = output_from_layer_1.T.dot(layer2_delta) layer3_adjustment = output_from_layer_2.T.dot(layer3_delta) # Adjust the weights. self.layer1.synaptic_weights += layer1_adjustment self.layer2.synaptic_weights += layer2_adjustment self.layer3.synaptic_weights += layer3_adjustment # The neural network thinks. def think(self, inputs): output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1.synaptic_weights)) output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.layer2.synaptic_weights)) output_from_layer3 = self.__sigmoid(dot(output_from_layer2, self.layer3.synaptic_weights)) return output_from_layer1, output_from_layer2, output_from_layer3 # The neural network prints its weights def print_weights(self): print (" Layer 1 (3 neurons, with 3 inputs): ") print (self.layer1.synaptic_weights) print (" Layer 2 (3 neurons, with 3 inputs):") print (self.layer2.synaptic_weights) print (" Layer 3 (1 neuron, with 3 inputs):") print (self.layer3.synaptic_weights) if __name__ == "__main__": #Seed the random number generator random.seed(1) # Create layer 1 (3 neurons, each with 3 inputs) layer1 = NeuronLayer(3, 3) # Create layer 2 (3 neurons with 3 inputs) layer2 = NeuronLayer(3, 3) # Create layer 3 (a single neuron with 3 inputs) layer3 = NeuronLayer(1, 3) # Combine the layers to create a neural network neural_network = NeuralNetwork(layer1, layer2, layer3) print ("Stage 1) Random starting synaptic weights: ") neural_network.print_weights() # The training set. We have 7 examples, each consisting of 3 input values # and 1 output value. training_set_inputs = array([[0, 0, 7], [0, 7, 7], [7, 0, 7], [0, 7, 0], [7, 0, 0], [7, 7, 7], [0, 0, 0]]) training_set_outputs = array([[0, 1, 1, 1, 1, 0, 0]]).T # Train the neural network using the training set. # Do it 10,000 times and make small adjustments each time. neural_network.train(training_set_inputs, training_set_outputs, 10000) print ("Stage 2) New synaptic weights after training: ") neural_network.print_weights() # Test the neural network with a new situation. print ("Stage 3) Considering a new situation [0, 7, 7] -> ?: ") hidden_state, output = neural_network.think(array([0, 7, 7])) print (output)