Python Forum

Full Version: Neural network
You're currently viewing a stripped down version of our content. View the full version with proper formatting.
Hello ,

I have this code of neural network:

 import numpy as np

#X = np.array(([1 , 0],),)
#yd = np.array(([1],), )
#xPredicted = np.array(([1 , 1]),)

for i in range (1,200):
  for j in range (1,200):
   x1 = -10 +(0.1)*(i-1) 
   x2 = -10 +(0.1)*(j-1)
   X= np.array(([x1, x2],),)
   #print(X)
   yd = np.array(([x1**2 + x2 +3],),)
   #print(yd)
xPredicted = np.array(([1,1]), )

class Neural_Network(object):
  def __init__(self):
     #parameters
    self.inputSize = 2
    self.outputSize = 1
    self.hiddenSize = 2
 
    #weights
    self.W1 =  np.random.randn(self.inputSize, self.hiddenSize) # (2x2) weight matrix from input to hidden layer
    self.W2 =  np.random.randn(self.hiddenSize, self.outputSize) # (2x1) weight matrix from hidden to output layer
 
  def forward(self, X):
     
    #forward propagation through our network
     
    self.H1 = np.dot(X, self.W1) # dot product of X (input) and  the first set of 3x3 weights
    self.Y1 = self.sigmoid(self.H1) # the first activation function
    self.H2 = np.dot(self.Y1, self.W2) # dot product of the  hidden layer and the second set of 3x1 weights
    Z = self.sigmoid(self.H2) # final activation function
    return Z

  def sigmoid(self, s):
    # activation function
    return 1/(1+np.exp(-s))

  def sigmoidPrime(self, s):
    #derivative of sigmoid
    return s * (1 - s)

  def backward(self, X, yd, Z):
      
    # backward propagate through the network
    self.E_error = yd- Z  # error in output
    self.Z_delta = self.E_error *self.sigmoidPrime(Z) # applying derivative of sigmoid to error

    self.Y1_error = self.Z_delta.dot(self.W2.T) # Y1 error: how much our hidden layer weights contributed to output error
    self.Y1_delta = self.Y1_error*self.sigmoidPrime(self.Y1) # applying derivative of sigmoid to Y2 error

    
    self.W1 += X.T.dot(self.Y1_delta) # adjusting the set (hidden --> input) weights
    self.W2 += self.Y1.T.dot(self.Z_delta) # adjusting the set (hidden --> output) weights

  def train(self, X, Z):
    Z = self.forward(X)
    self.backward(X, yd, Z)


  def predict(self):
    print ("Predicted data based on trained weights: ");
    print ("Input (scaled): \n" + str(xPredicted));
    print ("Output: \n" + str(self.forward(xPredicted)));

NN = Neural_Network()
for k in range(100): # trains the NN 1,000 times
  print ("# " + str(k) + "\n")
  print ("Input (scaled): \n" + str(X))
  print ("Actual Output: \n" + str(yd))
  print ("Predicted Output: \n" + str(NN.forward(X)))
  print ("Loss: \n" + str((yd - NN.forward(X)))) 
  print ("\n")
  NN.train(X, yd)
  NN.predict()
I want to test the code for different values of X and Y for this recent I used the for loop :

for i in range (1,200):
  for j in range (1,200):
   x1 = -10 +(0.1)*(i-1) 
   x2 = -10 +(0.1)*(j-1)
   X= np.array(([x1, x2],),)
   #print(X)
   yd = np.array(([x1**2 + x2 +3],),)
   #print(yd)
My problem I don't obtain all the possibility of X and Y I have 200 possibility and one I run the code I just obtain one possibility .

Output:
# 0 Input (scaled): [[9.8 9.8]] Actual Output: [[108.84]] Predicted Output: [[0.56752375]] Loss: [[108.27247625]] Predicted data based on trained weights: Input (scaled): [1 1] Output: [1.] # 1 Input (scaled): [[9.8 9.8]] Actual Output: [[108.84]] Predicted Output: [[1.]] Loss: [[107.84]] Predicted data based on trained weights: Input (scaled): [1 1] Output: [1.] # 2 Input (scaled): [[9.8 9.8]] Actual Output: [[108.84]] Predicted Output: [[1.]] Loss: [[107.84]] Predicted data based on trained weights: Input (scaled): [1 1] Output: [1.] # 3 Input (scaled): [[9.8 9.8]] Actual Output: [[108.84]] Predicted Output: [[1.]] Loss: [[107.84]] Predicted data based on trained weights: Input (scaled): [1 1] Output: [1.]
Please who can help me.