my database has grayscale images of size 128 * 128* 1 each with batch size =10
i am using cnn model but I got this error in BatchNorm2d
expected 4D input (got 2D input)
I posted the way which i used to transform my image (gray scale - tensor - normalize ) and divide it to batches
data_transforms = {
'train': transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.Resize(128),
transforms.CenterCrop(128),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
]),
'val': transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.Resize(128),
transforms.CenterCrop(128),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
]),
}
data_dir = '/content/drive/My Drive/Colab Notebooks/pytorch'
dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']}
dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=10,
shuffle=True, num_workers=25)
for x in ['train', 'val']}
dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']}
dset_classes = dsets['train'].classes
I used this model
class HeartNet(nn.Module):
def __init__(self, num_classes=7):
super(HeartNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
nn.BatchNorm2d(64),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
nn.BatchNorm2d(64),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
nn.BatchNorm2d(128),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
nn.BatchNorm2d(128),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
nn.BatchNorm2d(256),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.ELU(inplace=True),
nn.BatchNorm2d(256),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(16*16*256, 2048),
nn.ELU(inplace=True),
nn.BatchNorm2d(2048),
nn.Linear(2048, num_classes)
)
nn.init.xavier_uniform_(self.classifier[1].weight)
nn.init.xavier_uniform_(self.classifier[4].weight)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 16 * 16 * 256)
x = self.classifier(x)
return x
How can I solve this problem ?
You have a problem with the batch norm layer inside your self.classifier
sub network: While your self.features
sub network is fully convolutional and required BatchNorm2d
, the self.classifier
sub network is a fully-connected multi-layer perceptron (MLP) network and is 1D in nature. Note the how the forward
function removes the spatial dimensions from the feature map x
before feeding it to the classifier.
Try replacing the BatchNorm2d
in self.classifier
with BatchNorm1d
.