Search code examples
pythontensorflowkerasgoogle-colaboratory

Model trained on google colab not saving to my drive


I am building a model for face mask detection on Google colab. I'm unable to save the model to my drive. I ran the following code on Google Colab:

!echo 'Installing required software'
!apt-get install -y -qq software-properties-common module-init-tools 2>&1 > /dev/null
!echo 'Add apt-repository with Google. Drive Fuse'
!add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
!echo 'Updating packages...'
!apt-get update -y -qq
!echo 'Installing google-drive-ocamlfuse fuse...'
!apt-get install -y -qq google-drive-ocamlfuse fuse
!echo 'Authenticate Fuse in Google.Drive...'
from google.colab import auth
from oauth2client.client import GoogleCredentials
import getpass
auth.authenticate_user()
creds = GoogleCredentials.get_application_default()
!google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
vcode = getpass.getpass('Enter auth code here: ')
!echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}

#Mount the google drive
import os
from google.colab import drive
drive.mount('/content/drive')
Dataset='/content/drive/MyDrive/data'
Data_Dir=os.listdir(Dataset)
print(Data_Dir)

#Import necessary libraries
import cv2
import numpy as np
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split

img_rows, img_cols = 112, 112

images = []
labels = []

for category in Data_Dir:
  folder_path = os.path.join(Dataset, category)
  for img in os.listdir(folder_path):
    img_path = os.path.join(folder_path, img)
    img=cv2.imread(img_path)
    
    try:
      grayscale_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
      resized_img = cv2.resize(grayscale_img,(img_rows, img_cols))
      images.append(resized_img)
      labels.append(category)
    except Exception as e:
      print('Exception:',e)

images = np.array(images)/255.0
images = np.reshape(images,(images.shape[0],img_rows, img_cols,1))

lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
labels = np.array(labels)

(train_X, test_X, train_y, test_y) = train_test_split(images, labels, test_size=0.25, random_state=0)

# Import Necessary Keras Libraries

from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout
from keras.layers import Conv2D,MaxPooling2D

# Define model paramters

num_classes = 2
batch_size=32

# Build CNN model using Sequential API 
model=Sequential()

#First layer group containing Convolution, Relu and MaxPooling layers 
model.add(Conv2D(64,(3,3), input_shape=(img_rows, img_cols, 1))) 
model.add(Activation ('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

#Second layer group containing Convolution, Relu and MaxPooling layers 
model.add(Conv2D(128, (3,3)))
model.add(Activation ('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

#Flatten and Dropout Layer to stack the output convolutions above as well as cater overfitting

model.add(Flatten()) 
model.add(Dropout(0.5))

model.add(Dense(64,activation = 'relu'))
model.add(Dense(num_classes, activation = 'softmax'))

print(model.summary())

from keras.utils.vis_utils import plot_model
plot_model(model, to_file = 'face_mask_detection_arch.png')

from keras.optimizers import Adam

epochs = 45
model.compile(loss='categorical_crossentropy',
              optimizer=Adam(lr=0.001),
              metrics=['accuracy'])

fitted_model=model.fit(
    train_X,
    train_y,
    epochs=epochs, 
    validation_split=0.25)

from matplotlib import pyplot as plt

# Plot Training and Validation Loss

plt.plot(fitted_model.history['loss'], 'r',label='training loss')
plt.plot(fitted_model.history['val_loss'], label='validation loss')
plt.xlabel('Number of Epochs') 
plt.ylabel('Loss Value')
plt.legend()
plt.show()

# Plot Training and Validation Accuracy

plt.plot(fitted_model.history['accuracy'], 'r',label='training accuracy') 
plt.plot(fitted_model.history['val_accuracy'], label='validation accuracy') 
plt.xlabel('Number of Epochs')
plt.ylabel('Accuracy Value') 
plt.legend() 
plt.show()

#Save or Serialize the model
model.save('face_mask_detection_alert_system.h5')

There is no error when I run the last cell for saving the model, however the model isn't getting saved. Source Code: https://theaiuniversity.com/courses/face-mask-detection-alert-system/


Solution

  • Change your directory to Google drive and perform model.save()

    Add %cd /content/drive/MyDrive/ to change your directory inside drive.

    Another option is to give the full path while using model.save() Like this model.save('/content/drive/MyDrive/face_mask_detection_alert_system.h5')

    Full code

    !echo 'Installing required software'
    !apt-get install -y -qq software-properties-common module-init-tools 2>&1 > /dev/null
    !echo 'Add apt-repository with Google.Drive Fuse'
    !add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
    !echo 'Updating packages...'
    !apt-get update -y -qq
    !echo 'Installing google-drive-ocamlfuse fuse...'
    !apt-get install -y -qq google-drive-ocamlfuse fuse
    !echo 'Authenticate Fuse in Google.Drive...'
    from google.colab import auth
    from oauth2client.client import GoogleCredentials
    import getpass
    auth.authenticate_user()
    creds = GoogleCredentials.get_application_default()
    !google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
    vcode = getpass.getpass('Enter auth code here: ')
    !echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}
    
    #Mount the google drive
    import os
    from google.colab import drive
    drive.mount('/content/drive')
    Dataset='/content/drive/MyDrive/data'
    %cd /content/drive/MyDrive/
    Data_Dir=os.listdir(Dataset)
    print(Data_Dir)
    
    #Import necessary libraries
    import cv2
    import numpy as np
    from tensorflow.keras.utils import to_categorical
    from sklearn.preprocessing import LabelBinarizer
    from sklearn.model_selection import train_test_split
    
    img_rows, img_cols = 112, 112
    
    images = []
    labels = []
    
    for category in Data_Dir:
      folder_path = os.path.join(Dataset, category)
      for img in os.listdir(folder_path):
        img_path = os.path.join(folder_path, img)
        img=cv2.imread(img_path)
        
        try:
          grayscale_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
          resized_img = cv2.resize(grayscale_img,(img_rows, img_cols))
          images.append(resized_img)
          labels.append(category)
        except Exception as e:
          print('Exception:',e)
    
    images = np.array(images)/255.0
    images = np.reshape(images,(images.shape[0],img_rows, img_cols,1))
    
    lb = LabelBinarizer()
    labels = lb.fit_transform(labels)
    labels = to_categorical(labels)
    labels = np.array(labels)
    
    (train_X, test_X, train_y, test_y) = train_test_split(images, labels, test_size=0.25, random_state=0)
    
    # Import Necessary Keras Libraries
    
    from keras.models import Sequential
    from keras.layers import Dense, Activation, Flatten, Dropout
    from keras.layers import Conv2D,MaxPooling2D
    
    # Define model paramters
    
    num_classes = 2
    batch_size=32
    
    # Build CNN model using Sequential API 
    model=Sequential()
    
    #First layer group containing Convolution, Relu and MaxPooling layers 
    model.add(Conv2D(64,(3,3), input_shape=(img_rows, img_cols, 1))) 
    model.add(Activation ('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    
    #Second layer group containing Convolution, Relu and MaxPooling layers 
    model.add(Conv2D(128, (3,3)))
    model.add(Activation ('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    
    #Flatten and Dropout Layer to stack the output convolutions above as well as cater overfitting
    
    model.add(Flatten()) 
    model.add(Dropout(0.5))
    
    model.add(Dense(64,activation = 'relu'))
    model.add(Dense(num_classes, activation = 'softmax'))
    
    print(model.summary())
    
    from keras.utils.vis_utils import plot_model
    plot_model(model, to_file = 'face_mask_detection_arch.png')
    
    from keras.optimizers import Adam
    
    epochs = 45
    model.compile(loss='categorical_crossentropy',
                  optimizer=Adam(lr=0.001),
                  metrics=['accuracy'])
    
    fitted_model=model.fit(
        train_X,
        train_y,
        epochs=epochs, 
        validation_split=0.25)
    
    from matplotlib import pyplot as plt
    
    # Plot Training and Validation Loss
    
    plt.plot(fitted_model.history['loss'], 'r',label='training loss')
    plt.plot(fitted_model.history['val_loss'], label='validation loss')
    plt.xlabel('Number of Epochs') 
    plt.ylabel('Loss Value')
    plt.legend()
    plt.show()
    
    # Plot Training and Validation Accuracy
    
    plt.plot(fitted_model.history['accuracy'], 'r',label='training accuracy') 
    plt.plot(fitted_model.history['val_accuracy'], label='validation accuracy') 
    plt.xlabel('Number of Epochs')
    plt.ylabel('Accuracy Value') 
    plt.legend() 
    plt.show()
    
    #Save or Serialize the model
    model.save('face_mask_detection_alert_system.h5')