Python Forum

Full Version: Single layer perceptron not outputting correct results
You're currently viewing a stripped down version of our content. View the full version with proper formatting.
Hello guys,

I'm doing a very simple project for my machine learning class. We're implementing a single layer perceptron to classify the letters "L" and "I" in a grid of 3x3 pixels. This means the perceptron has 9 input nodes representing the pixels, 0 being nothing and 1 representing a pixel which is part of a letter. I've completed the code, but the outputs are incorrect after I train and test the network. I'm very new to neural networks so my inexperience is making it hard to debug. I've tested the program with different numbers of training iterations and learning rates yet it fails to classify correctly at least half every time. Here's the code:

import numpy as np
import random
import matplotlib.pyplot as plt


def train(data, weights):
  # Learning rate
  n = 0.05
  # Iterate through sample dataset once
  for i in range(0, len(data)):
    output = compute_activation(data[i][0], weights)
    # Update weights
    for j in range(0, len(weights)):
      expected = data[i][1]
      weights[j] -= n * ((output - expected) * data[i][0][j])
      weights[j] = round(weights[j], 3)
  return weights


def compute_activation(input, weights):
  weighted_sum = np.dot(input, weights)
  output = activation(weighted_sum)
  return output


# ReLU (Rectified Linear Unit)
def activation(output):
  return 1 if output > 0 else 0
  

def feedforward(data, weights):
  # Iterate through sample dataset once
  for i in range(0, len(data)):
    output = compute_activation(data[i][0], weights)
    print("Result: " + str(output == data[i][1]))

##############################################################################################
##############################################################################################
##############################################################################################

# 0 = L, 1 = I
data = (
        # L
        ((1, 0, 0, 1, 0, 0, 1, 1, 1), 0),
        ((1, 0, 0, 1, 0, 0, 1, 1, 0), 0),
        ((1, 0, 0, 1, 1, 0, 0, 0, 0), 0), 
        ((0, 1, 0, 0, 1, 0, 0, 1, 1), 0),
        ((0, 1, 0, 0, 1, 1, 0, 0, 0), 0),
        ((0, 0, 0, 1, 0, 0, 1, 1, 0), 0),
        ((0, 0, 0, 0, 1, 0, 0, 1, 1), 0),
        ((1, 0, 0, 1, 0, 0, 1, 1, 1), 0), 
        ((0, 1, 0, 0, 1, 0, 0, 1, 1), 0),
        # I
        ((1, 0, 0, 1, 0, 0, 1, 0, 0), 1),
        ((0, 1, 0, 0, 1, 0, 0, 1, 0), 1),
        ((0, 0, 1, 0, 0, 1, 0, 0, 1), 1),
        ((1, 0, 0, 1, 0, 0, 0, 0, 0), 1),
        ((0, 1, 0, 0, 1, 0, 0, 0, 0), 1),
        ((0, 0, 1, 0, 0, 1, 0, 0, 0), 1),
        ((0, 0, 0, 1, 0, 0, 1, 0, 0), 1),
        ((0, 0, 0, 0, 1, 0, 0, 1, 0), 1),
        ((0, 0, 0, 0, 0, 1, 0, 0, 1), 1))

# Initialize random weights between -1 and 1
weights = []
for i in range(0, len(data[0][0])):
  weights.append(random.uniform(0, 2) - 1)

# Train neural network
num_iterations = 10
for i in range(0, num_iterations):
  weights = train(data, weights)
  print(weights)

feedforward(data, weights)