“张量的元素 0 不需要 grad 并且没有 grad_fn”

问题描述 投票:0回答:1

我在使用 PyTorch Lightning 训练深度学习模型时遇到问题。在训练过程中,我遇到了以下错误:

RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn

上下文:我正在研究文本分类任务,并实现了自定义数据集类 UCC_Dataset 以及 LightningDataModule 子类 UCC_Data_Module。我的模型被定义为 pl.LightningModule 的子类,称为 UCC_Comment_Classifier。调用trainer.fit(model, ucc_data_module)时出现错误。

以下是发生错误的相关代码片段:

class UCC_Comment_Classifier(pl.LightningModule):

  def __init__(self):
    super().__init__()
    self.config = config
    self.pretrained_model = AutoModel.from_pretrained(config['model_name'], return_dict = True)
    self.hidden = torch.nn.Linear(self.pretrained_model.config.hidden_size, self.pretrained_model.config.hidden_size)
    self.classifier = torch.nn.Linear(self.pretrained_model.config.hidden_size, self.config['n_labels'])
    torch.nn.init.xavier_uniform_(self.classifier.weight)
    self.loss_func = nn.BCEWithLogitsLoss(reduction='mean')
    self.dropout = nn.Dropout()
    
  # Activer les gradients pour tous les paramètres du modèle
    for param in self.parameters():
        param.requires_grad = True
    
  def forward(self, input_ids, attention_mask, labels=None):
    # roberta layer
    output = self.pretrained_model(input_ids=input_ids, attention_mask=attention_mask)
    pooled_output = torch.mean(output.last_hidden_state, 1)
    # final logits
    pooled_output = self.dropout(pooled_output)
    pooled_output = self.hidden(pooled_output)
    pooled_output = F.relu(pooled_output)
    pooled_output = self.dropout(pooled_output)
    logits = self.classifier(pooled_output)
    # calculate loss
    loss = None
    if labels is not None:
      loss = self.loss_func(logits.view(-1, self.config['n_labels']), labels.view(-1, self.config['n_labels']))
    return loss, logits

  def training_step(self, batch, batch_index):
    loss, outputs = self(**batch)
    self.log("train loss ", loss, prog_bar = True, logger=True)
    return {"loss":loss, "predictions":outputs, "labels": batch["labels"]}

  def validation_step(self, batch, batch_index):
    loss, outputs = self(**batch)
    self.log("validation loss ", loss, prog_bar = True, logger=True)
    return {"val_loss": loss, "predictions":outputs, "labels": batch["labels"]}

  def predict_step(self, batch, batch_index , dataloader_idx: int = None):
    loss, outputs = self(**batch)
    return outputs



  def configure_optimizers(self):
    optimizer = AdamW(self.parameters(), lr=self.config['lr'], weight_decay=self.config['weight_decay'], no_deprecation_warning=True)
    total_steps = config['train_size'] * self.config['n_epochs']
    warmup_steps = math.floor(total_steps * self.config['warmup'])

    scheduler = get_cosine_schedule_with_warmup(optimizer, warmup_steps, total_steps)
    return [optimizer],[scheduler]```

我已经验证所有模型参数都将requires_grad设置为True,但我不确定为什么会发生这个错误。

machine-learning pytorch nlp kaggle
1个回答
0
投票

训练器在验证/测试循环中禁用梯度跟踪(为了速度)。如果您想启用它们,您可以在验证步骤中执行此操作:

def validation_step(self, batch, batch_idx):
    torch.set_grad_enabled(True)
    ...

更重要的一步是在训练器中禁用 inference_mode

trainer = Trainer(inference_mode=False)  # true by default

完整示例:

import torch
from torch.utils.data import DataLoader, Dataset

from lightning.pytorch import LightningModule, Trainer


class RandomDataset(Dataset):
    def __init__(self, size, length):
        self.len = length
        self.data = torch.randn(length, size)

    def __getitem__(self, index):
        return self.data[index]

    def __len__(self):
        return self.len


class BoringModel(LightningModule):
    def __init__(self):
        super().__init__()
        self.layer = torch.nn.Linear(32, 2)

    def forward(self, x):
        return self.layer(x)

    def test_step(self, batch, batch_idx):
        torch.set_grad_enabled(True)
        assert torch.is_grad_enabled()
        assert all(p.requires_grad for p in self.parameters())
        loss = self(batch).sum()
        loss.backward()
        self.log("test_loss", loss)

    def configure_optimizers(self):
        return torch.optim.SGD(self.layer.parameters(), lr=0.1)


def run():
    test_data = DataLoader(RandomDataset(32, 64), batch_size=2)
    model = BoringModel()
    trainer = Trainer(max_epochs=1, accelerator="cpu", inference_mode=False)
    trainer.test(model, dataloaders=test_data)


if __name__ == "__main__":
    run()
© www.soinside.com 2019 - 2024. All rights reserved.