I used this code for my project, but i want to calculate or plot error of each class.I have 6 class. how can i do?
def plot_history(net_history):
history = network_history.history
losses = history['loss']
accuracies = history['acc']
plt.xlabel('Epochs')
plt.ylabel('loss')
plt.plot(losses)
plt.figure()
plt.xlabel('Epochs')
plt.ylabel('accuracy')
plt.plot(accuracies)
myinput = layers.Input(shape=(100,200))
conv1 = layers.Conv1D(16, 3, activation='relu', padding='same', strides=2)(myinput)
conv2 = layers.Conv1D(32, 3, activation='relu', padding='same', strides=2)(conv1)
flat = layers.Flatten()(conv2)
out_layer = layers.Dense(6, activation='softmax')(flat)
mymodel = Model(myinput, out_layer)
mymodel.summary()
mymodel.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
network_history = mymodel.fit(X_train, Y_train, batch_size=128,epochs=5, validation_split=0.2)
plot_history(network_history)
test_loss, test_acc = mymodel.evaluate(X_test, Y_test)
test_labels_p = mymodel.predict(X_test)
A simple way to evaluate a classifier is the classification_report
in scikit-learn:
from sklearn.metrics import classification_report
....
# Actual predictions here, not just probabilities
pred = numpy.round(mymodel.predict(X_test))
print(classification_report(Y_test, pred))
where Y_test
is a list of one-hot vectors.
This will show you precision, recall and f1 measure for each class. The downside is that it only considers whether the prediction was correct or incorrect and doesn't incorporate the certainty of the model.