Search code examples
pythonjupyter-notebookconv-neural-networkmnistimage-classification

Running multiple times, is there any impact to accuracy of Image classification on CNN?


I'm a beginner. I try Image classification on CNN model using MNIST on Jupyter notebook by the following program. When running two(or multiple) times from "model.fit_generator" in a row, the accuracy of second-running is affected by the first-running's result? Seeing the indicated results, the second running seems to be already a better accuracy, I think. Please give me some advice...

results

# import library
import keras
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sn
import shutil
import tensorflow as tf
from datetime import datetime, timedelta, timezone
from keras import backend as ke
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping
from keras.datasets import mnist
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, BatchNormalization
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from tqdm import tqdm

# MNIST
mnist=keras.datasets.mnist
(x_train,y_train),(x_test,y_test)=mnist.load_data()
(x_train,y_train),(x_test,y_test)=(x_train[:80],y_train[:80]),(x_test[:20], y_test[:20])
#(x_train,y_train),(x_test,y_test)=(x_train[:160],y_train[:160]),(x_test[:40], y_test[:40])
#(x_train,y_train),(x_test,y_test)=(x_train[:800],y_train[:800]),(x_test[:200], y_test[:200])
#(x_train,y_train),(x_test,y_test)=(x_train[:8000],y_train[:8000]),(x_test[:2000], y_test[:2000])
x_train=x_train.reshape(x_train.shape[0],28,28,1)
x_test=x_test.reshape(x_test.shape[0],28,28,1)
x_train=x_train/255
x_test=x_test/255
print("x_train",x_train.shape)
print("x_test",x_test.shape)

# model
model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=(28,28,1), padding='same'))
BatchNormalization(axis=-1)
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), padding='same'))
BatchNormalization(axis=-1)
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.20))
model.add(Conv2D(64, (3, 3), padding='same'))
BatchNormalization(axis=-1)
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), padding='same'))
BatchNormalization(axis=-1)
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.20))
model.add(Conv2D(128, (3, 3), padding='same'))
BatchNormalization(axis=-1)
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.summary()

# model compile
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

# model fit
model.fit(x_train,y_train,epochs=40)

# evoluate for test data
loss,acc=model.evaluate(x_test,y_test,verbose=2)
print('loss:','{:.3f}'.format(loss),'accuracy:','{:.3f}'.format(acc))

# ImageDataGenerator
datagen = ImageDataGenerator( 
    featurewise_center=False,
    samplewise_center=False, 
    featurewise_std_normalization=False, 
    samplewise_std_normalization=False, 
    zca_whitening=False,  
    rotation_range=10, 
    width_shift_range=0.1, 
    height_shift_range=0.1,  
    zoom_range=[2.0,0.1], 
    horizontal_flip=False, 
    vertical_flip=False)  
datagen.fit(x_train) 

datagent = ImageDataGenerator(
    featurewise_center=False,
    samplewise_center=False,  
    featurewise_std_normalization=False, 
    samplewise_std_normalization=False,  
    zca_whitening=False,  
    rotation_range=10, 
    width_shift_range=0.1,  
    height_shift_range=0.1,
    zoom_range=[2.0,0.1],
    horizontal_flip=False,
    vertical_flip=False)
datagent.fit(x_test)

# parameter
epochs = 100
iteration_train = 5
iteration_test = 2
batch_size_train = int(x_train.shape[0] / iteration_train)
batch_size_test = int(x_test.shape[0] / iteration_test)

gen_train_flow = datagen.flow(x_train, y_train, batch_size=batch_size_train) 
gen_test_flow  = datagent.flow(x_test, y_test, batch_size=batch_size_test) 
history = model.fit(gen_train_flow,
                    steps_per_epoch=iteration_train,
                    epochs=epochs,
                    validation_data=gen_test_flow,
                    validation_steps=iteration_test)#,
                    #callbacks=callbacks)

# evoluate for test data
loss,acc=model.evaluate(x_test,y_test,verbose=2)
print('loss:','{:.3f}'.format(loss),'accuracy:','{:.3f}'.format(acc))

# graph for training
acc=history.history['accuracy']#acc
val_acc=history.history['val_accuracy']#val_acc
epochs=range(1,len(acc)+1)
plt.plot(epochs,acc,'b',label='Training accuracy')
plt.plot(epochs,val_acc,'r',label='Val accuracy')
plt.legend()
plt.show()

Solution

  • The more you train your CNN, the more it will start learning the noise.

    Due to this, it will start performing really well on the training accuracy, but the validation accuracy will go down as it cannot generalize well on the data it has not seen.

    Check this out for more info: bias-variance tradeoff