我正在使用这个 CNN 来检测脑电图扫描中的信息。它获得准确性的速度非常缓慢,我想知道我是否遗漏了任何层中的任何内容或做错了什么
class Net(Module):
def __init__(self):
super(Net, self).__init__()
self.cnn_layers = Sequential(
Conv1d(1,14, kernel_size=5, padding=1),
BatchNorm1d(14),
LeakyReLU(0.1),
MaxPool1d(kernel_size=5, stride=1),
)
self.cnn_layer2 = Sequential(
Conv1d(14, 10,kernel_size=5, padding=1),
BatchNorm1d(10),
LeakyReLU(0.1),
MaxPool1d(kernel_size=5, stride=1),
Dropout(0.2),
)
self.cnn_layer3 = Sequential(
Conv1d(10, 10, kernel_size=5, padding=1),
BatchNorm1d(10),
LeakyReLU(0.1),
MaxPool1d(kernel_size=5, stride=1),
Dropout(0.2),
)
self.linear_layer1 = Sequential(
Linear(in_features=35660,out_features=3500),
BatchNorm1d(3500),
LeakyReLU(0.1),
Dropout(0.2)
)
self.linear_layer2 = Sequential(
Linear(in_features=3500,out_features=2500),
BatchNorm1d(2500),
LeakyReLU(0.1),
Dropout(0.2)
)
self.linear_layer3 = Sequential(
Linear(in_features=2500, out_features=250),
BatchNorm1d(250),
LeakyReLU(0.1),
Dropout(0.2)
)
self.linear_layer4 = Sequential(
Linear(in_features=250, out_features=10)
)
self.logsoft = Sequential(
LogSoftmax(dim=1)
)
self.flatten = Sequential(
Flatten() # probably has to be changed
)
def forward(self, x):
x = self.cnn_layers(x)
x = self.cnn_layer2(x)
x = self.cnn_layer3(x)
x = self.flatten(x)
x = self.linear_layer1(x)
x = self.linear_layer2(x)
x = self.linear_layer3(x)
x = self.linear_layer4(x)
x = self.logsoft(x)
return x
model = Net()
#Build a dataset by taking 255 columns and grouping them into 14 * 255 channels
class CustomDataSet():
def __init__(self, csv_file, label, transform=None):
self.df = csv_file
self.transform = transform
self.label = label
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
scan = (self.df[index])
label = self.label[index]
if self.transform:
scan = self.transform(scan)
return scan, label
train_dataset = rows
print(train_dataset.shape)
train_dataset = CustomDataSet(csv_file=rows, label=(labels))
optimizer = SGD(model.parameters(), lr=0.001, weight_decay=5.0e-5)
criterion = CrossEntropyLoss()
num_epochs = 500
train_loss_list = []
batch_size = 500
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
for epoch in range(num_epochs):
print(f'Epoch {epoch + 1}/{num_epochs}:', end=' ')
train_loss = 0
# Iterating over the training dataset in batches
total_correct = 0
total_samples = 0
model.train()
for i, (scan, labels) in enumerate(train_loader):
# Extracting images and target labels for the batch being iterated
# Calculating the model output and the cross entropy loss
outputs = model(scan)
print(outputs.shape)
loss = criterion(outputs, labels)
# Updating weights according to calculated loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = torch.max(outputs, 1)
total_correct += (predicted == labels).sum().item()
total_samples += labels.size(0)
# Printing loss for each epoch
accuracy = 100 * total_correct / total_samples
print("Accuracy: ", accuracy)
train_loss_list.append(train_loss / len(train_loader))
print(f"Training loss = {train_loss_list[-1]}")
我尝试在每个层和丢弃层上添加批量标准化。每个时期都接受 20,000 次扫描的训练,但我可以访问 51,000 次,所以我可能会尝试使用更多数据。 100 个 epoch 后,准确率仅达到 13%。这是正常现象还是我弄错了?
您错误地使用了
CrossEntropyLoss
。
阅读 pytorch 文档 -
CrossEntropyLoss
在单个操作中结合了 LogSoftmax
和 NLLLoss
。
您在模型末尾应用 log softmax,然后将其发送到
CrossEntropyLoss
。这意味着您进行了两次 softmax 操作。