pyTorch mat1 and mat2 cannot be multiplied

Issue

I am getting the following error:

RuntimeError: mat1 and mat2 shapes cannot be multiplied (32x33856 and 640000x256)

I don’t understand how do I need to change the parameters of my net. I took the net created in this paper and tried to modify the parameters to meet my needs.This is the code, I changed the parameters of the first convolution but still get the error:

class ChordClassificationNetwork(nn.Module):
    def __init__(self, train_model=False):
        super(ChordClassificationNetwork, self).__init__()
        self.train_model = train_model
        self.flatten = nn.Flatten()
        self.firstConv = nn.Conv2d(3, 64, (3, 3))
        self.secondConv = nn.Conv2d(64, 64, (3, 3))
        self.pool = nn.MaxPool2d(2)
        self.drop = nn.Dropout(0.25)
        self.fc1 = nn.Linear(100*100*64, 256)
        self.fc2 = nn.Linear(256, 256)
        self.outLayer = nn.Linear(256, 7)

    def forward(self, x):
        x = self.firstConv(x)
        x = F.relu(x)
        x = self.pool(x)
        x = self.secondConv(x)
        x = F.relu(x)
        x = self.pool(x)
        x = self.drop(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.drop(x)
        x = self.fc2(x)
        x = F.relu(x)
        x = self.drop(x)
        x = self.outLayer(x)
        output = F.softmax(x, dim=1)
        return output 

and this is the training file:

device = ("cuda" if torch.cuda.is_available() else "cpu")

transformations = transforms.Compose([
     transforms.Resize((100, 100))
])

num_epochs = 10
learning_rate = 0.001
train_CNN = False
batch_size = 32
shuffle = True
pin_memory = True
num_workers = 1

dataset = GuitarDataset("../chords_data/cropped_images/train", transform=transformations)
train_set, validation_set = torch.utils.data.random_split(dataset, [int(0.8 * len(dataset)), len(dataset) - int(0.8*len(dataset))])
train_loader = DataLoader(dataset=train_set, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers,
                          pin_memory=pin_memory)
validation_loader = DataLoader(dataset=validation_set, shuffle=shuffle, batch_size=batch_size, num_workers=num_workers,
                               pin_memory=pin_memory)

model = ChordClassificationNetwork().to(device)

criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)


def check_accuracy(loader, model):
    if loader == train_loader:
        print("Checking accuracy on training data")
    else:
        print("Checking accuracy on validation data")

    num_correct = 0
    num_samples = 0
    model.eval()

    with torch.no_grad():
        for x, y in loader:
            x = x.to(device=device)
            y = y.to(device=device)

            scores = model(x)
            predictions = torch.tensor([1.0 if i >= 0.5 else 0.0 for i in scores]).to(device)
            num_correct += (predictions == y).sum()
            num_samples += predictions.size(0)
            print(
                f"Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}"
            )
    return f"{float(num_correct) / float(num_samples) * 100:.2f}"


def train():
    model.train()
    for epoch in range(num_epochs):
        loop = tqdm(train_loader, total=len(train_loader), leave=True)
        if epoch % 2 == 0:
            loop.set_postfix(val_acc=check_accuracy(validation_loader, model))
        for imgs, labels in loop:
            imgs = imgs.to(device)
            labels = labels.to(device)
            outputs = model(imgs)
            loss = criterion(outputs, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            loop.set_description(f"Epoch [{epoch}/{num_epochs}]")
            loop.set_postfix(loss=loss.item())


if __name__ == "__main__":
    train()

What am I doing wrong?

Solution

Look at the error message, the issue comes from the fc1 layer which doesn’t have the required number of neurons. It is receiving a tensor of shape (batch_size, 33856) but expects (batch_size, 640000). The reduction in dimensionality is caused by the different layers you have applied to your input tensor before fc1.

You can fix this by defining fc1 with:

        self.fc1 = nn.Linear(33856, 256)

Alternatively, you can use nn.LazyLinear which will initialize its weights with the appropriate number of neurons at runtime depending on the input it receives. But that’s lazy:

        self.fc1 = nn.LazyLinear(256)

Answered By – Ivan

Answer Checked By – Katrina (AngularFixing Volunteer)

Leave a Reply

Your email address will not be published.