Hi scidam,
Thanks for the reply. I am a student. I am trying to create a probability for decision tree and multilayer perceptron. I am facing an issue in creating an aggregate method for each class using the above-mentioned classifiers. I want to know how to create a method for classes that are in the data set.
1. here class means four classes which are given in the dataset
Thanks for the reply. I am a student. I am trying to create a probability for decision tree and multilayer perceptron. I am facing an issue in creating an aggregate method for each class using the above-mentioned classifiers. I want to know how to create a method for classes that are in the data set.
1. here class means four classes which are given in the dataset
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split, cross_val_score from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import plot_confusion_matrix from sklearn.naive_bayes import GaussianNB, MultinomialNB from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier from sklearn.metrics import precision_score, recall_score,auc from sklearn.metrics import roc_curve,roc_auc_score, plot_roc_curve from sklearn.preprocessing import LabelEncoder path="G:\Forest.xlsx" rawdata= pd.read_excel(path) print("data summary") print(rawdata.describe()) nrow, ncol = rawdata.shape X, y = rawdata.iloc[:, 1:].values, rawdata.iloc[:, 0].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=0) clf = DecisionTreeClassifier(criterion='entropy') clf=clf.fit(X_train, y_train) tar_pred = clf.predict(X_test) prob = clf.predict_proba(X_test) #print("Accuracy score of our model with Decision Tree:",accuracy_score(y_test, tar_pred)) #precision = precision_score(y_true=y_test, y_pred=tar_pred, average='micro') #print("Precision score of our model with Decision Tree :", precision) #recall = recall_score(y_true=y_test, y_pred=tar_pred, average='micro') #print("Recall score of our model with Decision Tree :", recall) print("probability:",prob) print("Accuracy score of our model with DT :", accuracy_score(y_test, tar_pred)) scores = cross_val_score(clf, X, y, cv=10) print("Accuracy score of our model with DT under cross validation :", scores.mean()) print('Mean Accuracy of DT: %.3f%%' % (sum(scores)/float(len(scores)))) clf_MLP = MLPClassifier(hidden_layer_sizes=(5, 2), activation='logistic', solver='sgd', learning_rate='constant', learning_rate_init=0.1) clf_MLP.fit(X_train, np.ravel(y_train, order='C')) predictions = clf_MLP.predict(X_test) pro=clf_MLP.predict_proba(X_test) print("Accuracy score of our model with MLP :", accuracy_score(y_test, predictions)) scores = cross_val_score(clf_MLP, X, y, cv=10) print("Accuracy score of our model with MLP under cross validation :", scores.mean()) print("probability:",pro) print('Mean Accuracy Of MLP: %.3f%%' % (sum(scores)/float(len(scores))))