如何知道张量尺寸

问题描述 投票:0回答:1

我曾尝试使用CNN进行情感分析,但无法弄清楚张量如何在正向方法内转换。我试图实现我的模型,但它的错误应为“预期的3维张量,但得到4维张量用于论证”任何人都可以告诉我如何知道张量进出的形状是什么吗?

我在互联网上找到了此代码以供参考,但无法弄清楚张量形状的变化是如何精确的,我如何知道下一层/函数将其用作参数的尺寸格式张量。谁能解释我的用法此代码的正向方法有效吗?

class CNN(nn.Module):
 def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim, 
             dropout, pad_idx):

    super().__init__()

    self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)

    self.conv_0 = nn.Conv2d(in_channels = 1, 
                            out_channels = n_filters, 
                            kernel_size = (filter_sizes[0], embedding_dim))

    self.conv_1 = nn.Conv2d(in_channels = 1, 
                            out_channels = n_filters, 
                            kernel_size = (filter_sizes[1], embedding_dim))

    self.conv_2 = nn.Conv2d(in_channels = 1, 
                            out_channels = n_filters, 
                            kernel_size = (filter_sizes[2], embedding_dim))

    self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim)

    self.dropout = nn.Dropout(dropout)

def forward(self, text):

    #text = [batch size, sent len]

    embedded = self.embedding(text)

    #embedded = [batch size, sent len, emb dim]

    embedded = embedded.unsqueeze(1)

    #embedded = [batch size, 1, sent len, emb dim]

    conved_0 = F.relu(self.conv_0(embedded).squeeze(3))
    conved_1 = F.relu(self.conv_1(embedded).squeeze(3))
    conved_2 = F.relu(self.conv_2(embedded).squeeze(3))

    #conved_n = [batch size, n_filters, sent len - filter_sizes[n] + 1]

    pooled_0 = F.max_pool1d(conved_0, conved_0.shape[2]).squeeze(2)
    pooled_1 = F.max_pool1d(conved_1, conved_1.shape[2]).squeeze(2)
    pooled_2 = F.max_pool1d(conved_2, conved_2.shape[2]).squeeze(2)

    #pooled_n = [batch size, n_filters]

    cat = self.dropout(torch.cat((pooled_0, pooled_1, pooled_2), dim = 1))

    #cat = [batch size, n_filters * len(filter_sizes)]

    return self.fc(cat)

python deep-learning pytorch sentiment-analysis tensor
1个回答
0
投票

最简单的方法是在每个前进步骤之后打印张量的形状,

def forward(self, text):

   print(text.shape)

   embedded = self.embedding(text)

   print(embedded.shape)

   embedded = embedded.unsqueeze(1)

   print(embedded.shape)

   conved_0 = F.relu(self.conv_0(embedded).squeeze(3))
   conved_1 = F.relu(self.conv_1(embedded).squeeze(3))
   conved_2 = F.relu(self.conv_2(embedded).squeeze(3))

   print(conved_2.shape)

   ....

顺便说一句,我认为您的代码有几个问题,我想您想使用cnv1d而不是conv2d,并且每个池cmaes在相应的卷积之后,都为conv-> pool-> conv-> pool。 ..,不是conv-> conv-> conv->池->池->池

考虑使用此代码

class CNN(nn.Module):
   def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim, dropout, pad_idx):

   super().__init__()

   self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)

   self.conv_0 = nn.Conv1d(in_channels = 1, 
                        out_channels = n_filters, 
                        kernel_size = filter_sizes[0])

   self.conv_1 = nn.Conv2d(in_channels = 1, 
                        out_channels = n_filters, 
                        kernel_size = filter_sizes[1])

   self.conv_2 = nn.Conv2d(in_channels = 1, 
                        out_channels = n_filters, 
                        kernel_size = filter_sizes[2]

   self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim)

   self.dropout = nn.Dropout(dropout)

def forward(self, text):

   #text = [batch size, sent len]

   embedded = self.embedding(text)

   #embedded = [batch size, sent len, emb dim]


   conved_0 = F.relu(self.conv_0(embedded))
   conved_1 = F.relu(self.conv_1(embedded))
   conved_2 = F.relu(self.conv_2(embedded))


   pooled_0 = F.max_pool1d(conved_0, 5)
   pooled_1 = F.max_pool1d(conved_1, 5)
   pooled_2 = F.max_pool1d(conved_2, 5)


   cat = self.dropout(torch.cat((pooled_0, pooled_1, pooled_2), dim = 1))


   return self.fc(cat)
© www.soinside.com 2019 - 2024. All rights reserved.