Edit Question:
I'm using my own black and white tiff images dataset and I created the model_0 as the videos I will put my code That's the error I got
Given groups=1, weight of size [10, 1, 3, 3], expected input[32, 3, 128, 128] to have 1 channels, but got 3 channels instead
here is the full code:
# Create simple transform
simple_transform = transforms.Compose([
transforms.Resize(size=(128, 128)),
transforms.ToTensor(),
])
# Load data and transform data
train_data_simple = datasets.ImageFolder(root=train_dir,
transform=simple_transform)
test_data_simple = datasets.ImageFolder(root=test_dir,
transform=simple_transform)
# Turn dataset into DataLoader
BATCH_SIZE = 32
NUM_WORKERS = 2
train_dataloader_simple = DataLoader(dataset=train_data_simple,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
shuffle=True)
test_dataloader_simple = DataLoader(dataset=test_data_simple,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
shuffle=False)
class TingVGG(nn.Module):
def __init__(self, input_shape: int, hidden_units: int, output_shape: int) -> None:
super().__init__()
self.conv_block1 = nn.Sequential(nn.Conv2d(in_channels=input_shape,out_channels=hidden_units,kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=hidden_units, out_channels=hidden_units, kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.conv_block2 = nn.Sequential(nn.Conv2d(in_channels=hidden_units,out_channels=hidden_units,kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=hidden_units, out_channels=hidden_units, kernel_size=3,stride=1,padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.classifier = nn.Sequential(nn.Flatten(), nn.Linear(in_features=hidden_units ,out_features=output_shape))
def forward(self, x: torch.Tensor):
x = self.conv_block1(x)
#print(x.shape)
x = self.conv_block2(x)
#print(x.shape)
x = self.classifier(x)
#print(x.shape)
return x
torch.manual_seed(42)
model_0 = TingVGG(input_shape=1, # Number of Color channel in the input image (c, h, w)
hidden_units=10,
output_shape=len(train_data.classes)).to(device)
model_0
image_batch, label_batch = next(iter(train_dataloader_simple))
image_batch.shape, label_batch.shape
The output is:
(torch.Size([32, 3, 128, 128]), torch.Size([32]))
I think it should be
(torch.Size([32, 1, 128, 128]), torch.Size([32]))
model_0(image_batch.to(device)) # when I run this code I got the error
I don't know where is the fault in my code. I just began to learn PyTorch, please help me and excuse me if my question is not that good
Meh, you said you images are black and white so you only have one channel.
So, Why did you put input_shape=3? Just change that to 1.