PytorchCNN训练中的“RuntimeError:预期标量类型为Double但发现Float”
我刚刚开始学习 Pytorch 并创建了我的第一个 CNN。该数据集包含 3360 张 RGB 图像,我将它们转换为[3360, 3, 224, 224]张量。数据和标签在dataset(torch.utils.data.TensorDataset). 下面是训练代码。
def train_net():
dataset = ld.load()
data_iter = Data.DataLoader(dataset, batch_size=168, shuffle=True)
net = model.VGG_19()
summary(net, (3, 224, 224), device="cpu")
loss_func = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9, dampening=0.1)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)
for epoch in range(5):
print("epoch:", epoch + 1)
train_loss = 0
for i, data in enumerate(data_iter, 0):
x, y = data
print(x.dtype)
optimizer.zero_grad()
out = net(x)
loss = loss_func(out, y)
loss.backward()
optimizer.step()
train_loss += loss.item()
if i % 100 == 99:
print("loss:", train_loss / 100)
train_loss = 0.0
print("finish train")
然后我有这个错误:
Traceback (most recent call last):
File "D:/python/DeepLearning/VGG/train.py", line 52, in <module>
train_net()
File "D:/python/DeepLearning/VGG/train.py", line 29, in train_net
out = net(x)
File "D:pythonlibsite-packagestorchnnmodulesmodule.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "D:pythonDeepLearningVGGmodel.py", line 37, in forward
out = self.conv3_64(x)
File "D:pythonlibsite-packagestorchnnmodulesmodule.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "D:pythonlibsite-packagestorchnnmodulescontainer.py", line 117, in forward
input = module(input)
File "D:pythonlibsite-packagestorchnnmodulesmodule.py", line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "D:pythonlibsite-packagestorchnnmodulesconv.py", line 423, in forward
return self._conv_forward(input, self.weight)
File "D:pythonlibsite-packagestorchnnmodulesconv.py", line 419, in _conv_forward
return F.conv2d(input, weight, self.bias, self.stride,
RuntimeError: expected scalar type Double but found Float
我认为 x 有问题,我通过print(x.dtype)以下方式打印它的类型:
torch.float64
这是double而不是float。你知道怎么回事吗?谢谢你的帮助!
回答
该错误实际上是指在float32调用矩阵乘法时默认情况下conv 层的权重。由于您的输入是double(float64在 pytorch 中)而 conv 中的权重是float
因此您的情况的解决方案是:
def train_net():
dataset = ld.load()
data_iter = Data.DataLoader(dataset, batch_size=168, shuffle=True)
net = model.VGG_19()
summary(net, (3, 224, 224), device="cpu")
loss_func = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9, dampening=0.1)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)
for epoch in range(5):
print("epoch:", epoch + 1)
train_loss = 0
for i, data in enumerate(data_iter, 0):
x, y = data # //_______________
x = x.float() # HERE IS THE CHANGE
print(x.dtype)
optimizer.zero_grad()
out = net(x)
loss = loss_func(out, y)
loss.backward()
optimizer.step()
train_loss += loss.item()
if i % 100 == 99:
print("loss:", train_loss / 100)
train_loss = 0.0
print("finish train")
这肯定会起作用
THE END
二维码