How can I solve this problem?
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3,8,11, padding=0) # in_channel, out_channel, kernel size
self.pool = nn.MaxPool2d(2,2) # kernel_size, stride
self.conv2 = nn.Conv2d(8, 36, 5, padding=0)
self.fc1 = nn.Linear(36*291*291, 30) # in_features, out_features
self.fc2 = nn.Linear(30, 20)
self.fc3 = nn.Linear(20, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
I wrote my code like this, but I got "Runtime Error: mat1 and mat2 shapes cannot be multiplied".
The input shape is:'torch.Size([3,600,600])'
,with 3 channels.
Please help me!
756900
Just change the model definition, the output shape of your last convolution layer does not have the shape 36x291x291
. Just change the model definition to:
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3,8,11, padding=0) # in_channel, out_channel, kernel size
self.pool = nn.MaxPool2d(2,2) # kernel_size, stride
self.conv2 = nn.Conv2d(8, 36, 5, padding=0)
self.fc1 = nn.Linear(756900, 30) # in_features, out_features
self.fc2 = nn.Linear(30, 20)
self.fc3 = nn.Linear(20, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
Tried the same with your input size, and it works.