Python Forum
Thread Rating:
  • 0 Vote(s) - 0 Average
  • 1
  • 2
  • 3
  • 4
  • 5
why not accuracy working
#1
p
import math; #For pow and sqrt
from random import shuffle;


###_Read Data_###
def ReadData(fileName):
    #Read the file, splitting by lines
    f = open(fileName,'r');
    lines = f.read().splitlines();
    f.close();

    #Split the first line by commas, remove the last element
    #and save the length of the rest.
    featuresNumber = len(lines[0].split(','));

    items = [];
    classes = [];
    features = lines[0].split(',')[:-1];

    for i in range(1, len(lines)):
        line = lines[i].split(',');

        if(line[-1] not in classes):
            classes.append(line[-1]);

        itemFeatures = {"Class" : line[-1], "Bias" : 1};

        for j in range(len(features)):
            f = features[j]; #Get the feature at index j
            v = float(line[j]);

            itemFeatures[f] = v;
    
        items.append(itemFeatures);

    shuffle(items);

    return items,classes,features;


###_Evaluation Functions_###
def K_FoldValidation(K, Items, rate, epochs, classes, features):
    if(K > len(Items)):
        return -1;

    correct = 0; #The number of correct classifications
    total = len(Items)*(K-1); #The total number of classifications

    l = len(Items)/K; #The length of a fold

    for i in range(K):
        #Split data set into training and testing
        trainingSet = Items[i*l:(i+1)*l];
        testSet = Items[:i*l] + Items[(i+1)*l:];

        weights = CalculateWeights(trainingSet, rate, epochs, classes, features);

        for item in testSet:
            itemClass = item["Class"];

            itemFeatures = {};

            for key in item:
                if(key != "Class"):
                    #If key isn't "Class", add it to itemFeatures
                    itemFeatures[key] = item[key];
          
            guess = Perceptron(itemFeatures, weights);

            if(guess == itemClass):
                #Guessed correctly
                correct += 1;

    return correct/float(total);

def Evaluate(times, K, Items, rate, epochs, classes, features):
    accuracy = 0;
    for t in range(times):
        #shuffle(Items);
        accuracy += K_FoldValidation(K, Items, rate, epochs, classes, features);

    print (accuracy/float(times));


###_Auxiliary Functions_###
def AddDictionaries(d1, d2, rate):
    d3 = {};
    for i in d1:
        d3[i] = d1[i] + rate*d2[i];

    return d3;

def SubDictionaries(d1, d2, rate):
    d3 = {};
    for i in d1:
        d3[i] = d1[i] - rate*d2[i];

    return d3;

def CalculateConfidence(item, weight):
    #Add the product of the weight and item values for each feature
    confidence = 0;

    for k in weight:
        confidence += weight[k]*item[k];

    return confidence;


###_Core Functions_###
def CalculateWeights(trainingSet, rate, epochs, classes, features):
    #Initialize weights at 0
    weights = {};

    #Initialize weights dictionary. Weights is divided in classes.
    #Each class has its own dictionary, which is numerical values/weights
    #for the features.
    for c in classes:
        weights[c] = {"Bias":0};
        for f in features:
            weights[c][f] = 0;

    for epoch in range(epochs):
        for item in trainingSet:
            #Iterate through trainingSet
            #Guess where item belongs
            y = -1;
            guess = "";
            for w in weights:
                confidence = CalculateConfidence(item, weights[w]);

                if(confidence > y):
                    y = confidence;
                    guess = w;

            correct = item["Class"];
            if(correct != guess):
                weights[guess] = SubDictionaries(weights[guess], item, rate);
                weights[correct] = AddDictionaries(weights[correct], item, rate);

    return weights;

def Perceptron(item, weights):
    item["Bias"] = 1; #Augment item vector with bias
    m = -1; #Hold the maximum
    classification = "";

    #Calculate chance of item being in each class,
    #pick the maximum.
    for w in weights:
        #Multiply the item vector with the class weights vector
        guess = CalculateConfidence(item, weights[w]);
        if(guess > m):
            #Our guess is better than our current best guess,
            #update max and classification
            m = guess;
            classification = w;

    return classification;


###_Main_###
def main():
    items, classes, features = ReadData('data.txt');

    lRate = 0.1;
    epochs = 50;
    weights = CalculateWeights(items, lRate, epochs, classes, features);

    item = {'PW' : 1.4, 'PL' : 4.7, 'SW' : 3.2, 'SL' : 7.0};
    print (Perceptron(item, weights));

    Evaluate(100, 5, items, lRate, epochs, classes, features);

if __name__ == "__main__":
    main();
lIris-versicolor
Traceback (most recent call last):
File "C:\python\program\Machine Learning\Classifiers\Perceptron\Perceptron.py", line 176, in <module>
main();
File "C:\python\program\Machine Learning\Classifiers\Perceptron\Perceptron.py", line 173, in main
Evaluate(100, 5, items, lRate, epochs, classes, features);
File "C:\python\program\Machine Learning\Classifiers\Perceptron\Perceptron.py", line 80, in Evaluate
accuracy += K_FoldValidation(K, Items, rate, epochs, classes, features);
File "C:\python\program\Machine Learning\Classifiers\Perceptron\Perceptron.py", line 53, in K_FoldValidation
trainingSet = Items[i*l:(i+1)*l];
TypeError: slice indices must be integers or None or have an __index__ method
Reply
#2
The variable l is likely a float, since it comes from division on line 49. That's making your slices on line 53 floats as well, which is not allowed. Use int() to convert l into an integer so you can use it as an index in a slice.
Craig "Ichabod" O'Brien - xenomind.com
I wish you happiness.
Recommended Tutorials: BBCode, functions, classes, text adventures
Reply


Possibly Related Threads…
Thread Author Replies Views Last Post
  number accuracy problem? roym 5 1,795 Dec-24-2021, 07:57 AM
Last Post: roym
  Calculating accuracy on RNN-LSTM model hobbyist 0 6,035 May-14-2021, 08:55 AM
Last Post: hobbyist
  Measure accuracy of Object Detection Shameendra 2 2,644 Nov-19-2018, 01:04 PM
Last Post: Shameendra
  Accuracy of sqrt vndywarhol 1 2,505 Aug-29-2018, 10:14 AM
Last Post: Gribouillis

Forum Jump:

User Panel Messages

Announcements
Announcement #1 8/1/2020
Announcement #2 8/2/2020
Announcement #3 8/6/2020