Search code examples
pythonmachine-learningpytorchconv-neural-networkartificial-intelligence

What am I doing wrong with my CNN that it is gaining accuracy so slowly


I am using this CNN to detect information in eeg scans. It is gaining accuracy really slowly and I am wondering if I am mmissing anything in any of the layers or am doing anything wrong

class Net(Module):
    def __init__(self):
        super(Net, self).__init__()
        self.cnn_layers = Sequential(
            Conv1d(1,14, kernel_size=5, padding=1),
            BatchNorm1d(14),
            LeakyReLU(0.1),
            MaxPool1d(kernel_size=5, stride=1),

        )
        self.cnn_layer2 = Sequential(
            Conv1d(14, 10,kernel_size=5, padding=1),
            BatchNorm1d(10),
            LeakyReLU(0.1),
            MaxPool1d(kernel_size=5, stride=1),
            Dropout(0.2),
        )
        self.cnn_layer3 = Sequential(
            Conv1d(10, 10, kernel_size=5, padding=1),
            BatchNorm1d(10),
            LeakyReLU(0.1),
            MaxPool1d(kernel_size=5, stride=1),
            Dropout(0.2),
        )
        self.linear_layer1 = Sequential(
            Linear(in_features=35660,out_features=3500),
            BatchNorm1d(3500),
            LeakyReLU(0.1),
            Dropout(0.2)

        )
        self.linear_layer2 = Sequential(
            Linear(in_features=3500,out_features=2500),
            BatchNorm1d(2500),
            LeakyReLU(0.1),
            Dropout(0.2)
        )
        self.linear_layer3 = Sequential(
            Linear(in_features=2500, out_features=250),
            BatchNorm1d(250),
            LeakyReLU(0.1),
            Dropout(0.2)
        )
        self.linear_layer4 = Sequential(
            Linear(in_features=250, out_features=10)
        )
        self.logsoft = Sequential(
            LogSoftmax(dim=1)
        )
        self.flatten = Sequential(
            Flatten() # probably has to be changed
        )

    def forward(self, x):
        x = self.cnn_layers(x)

        x = self.cnn_layer2(x)

        x = self.cnn_layer3(x)

        x = self.flatten(x)
        x = self.linear_layer1(x)
        x = self.linear_layer2(x)
        x = self.linear_layer3(x)
        x = self.linear_layer4(x)
        x = self.logsoft(x)
        return x



model = Net()

#Build a dataset by taking 255 columns and grouping them into 14 * 255 channels
class CustomDataSet():
    def __init__(self, csv_file, label,  transform=None):
        self.df = csv_file
        self.transform = transform
        self.label = label

    def __len__(self):
        return self.df.shape[0]

    def __getitem__(self, index):
        scan = (self.df[index])
        label = self.label[index]
        if self.transform:
            scan = self.transform(scan)

        return scan, label

train_dataset = rows
print(train_dataset.shape)

train_dataset = CustomDataSet(csv_file=rows, label=(labels))

optimizer = SGD(model.parameters(), lr=0.001, weight_decay=5.0e-5)
criterion = CrossEntropyLoss()
num_epochs = 500
train_loss_list = []
batch_size = 500
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
for epoch in range(num_epochs):
    print(f'Epoch {epoch + 1}/{num_epochs}:', end=' ')
    train_loss = 0

    # Iterating over the training dataset in batches
    total_correct = 0
    total_samples = 0
    model.train()
    for i, (scan, labels) in enumerate(train_loader):
        # Extracting images and target labels for the batch being iterated



        # Calculating the model output and the cross entropy loss

        outputs = model(scan)
        print(outputs.shape)

        loss = criterion(outputs, labels)

        # Updating weights according to calculated loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        _, predicted = torch.max(outputs, 1)

        total_correct += (predicted == labels).sum().item()
        total_samples += labels.size(0)
        # Printing loss for each epoch
    accuracy = 100 * total_correct / total_samples
    print("Accuracy: ", accuracy)
    train_loss_list.append(train_loss / len(train_loader))
    print(f"Training loss = {train_loss_list[-1]}")

I have tried adding batch normalizations on each layer and dropout layers. Each epoch is being trained on 20,000 scans but I have access to 51,000 so I might try it with more data. After 100 epochs it only reached 13 percent accuracy. Is this normal or have I made a mistake?


Solution

  • You're using CrossEntropyLoss incorrectly.

    Read the pytorch docs - CrossEntropyLoss combines a LogSoftmax and NLLLoss in a single operation.

    You apply a log softmax at the end of your model, then send it to CrossEntropyLoss. This means you are softmaxing twice.