ValueError:不推荐使用与输入大小 (torch.Size([4, 2, 1024, 1024])) 不同的目标大小 (torch.Size([4, 1, 1024, 1024]))

问题描述 投票:0回答:1

我正在研究分割问题。我有图像文件夹和标签文件夹。图像是 RGB 图像,标签是黑白图像,白色是我唯一的标签,黑色用于背景。认为白色是猫,黑色是除猫以外的所有其他东西。

我的架构如下。 这是我调用所有函数并启动一切的 main.py。

import torch
import torch.nn as nn
import torch.optim as optim


from train import train
from validate import validate
from dataset import load_data
from engine import Model


def main():
    # Set your parameters
    num_classes = 2
    batch_size = 4
    num_epochs = 10
    learning_rate = 0.001

    # Set device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # Data loaders from dataset.py
    train_loader, val_loader = load_data(batch_size)

    # Create model, criterion, optimizer
    model = Model(num_classes).to(device)
    criterion = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # Training loop
    best_loss = float('inf')
    best_iou = 0.0

    for epoch in range(num_epochs):
        train_loss, train_iou = train(model, train_loader, criterion, optimizer, device)
        val_loss, val_iou = validate(model, val_loader, criterion, device)

        if val_loss < best_loss:
            best_loss = val_loss
            torch.save(model.state_dict(), "best_loss_model.pth")

        if val_iou > best_iou:
            best_iou = val_iou
            torch.save(model.state_dict(), "best_iou_model.pth")

        torch.save(model.state_dict(), f"last_model_epoch_{epoch + 1}.pth")

        print(f"Epoch [{epoch + 1}/{num_epochs}] - "
              f"Train Loss: {train_loss:.8f}, "
              f"Train IoU: {train_iou:.8f}, "
              f"Validation Loss: {val_loss:.8f}, "
              f"Validation IoU: {val_iou:.8f}")

    print(f"Best Loss: {best_loss:.8f}")
    print(f"Best IoU: {best_iou:.8f}")

if __name__ == "__main__":
    main()

这是train.py:

from tqdm import tqdm
import calculate_metrics
from sklearn.metrics import confusion_matrix
import torch

def train(model, train_loader, criterion, optimizer, device):
    model.train()
    running_loss = 0.0
    all_preds = []
    all_labels = []

    for inputs, labels in tqdm(train_loader, desc="Training"):
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()

        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        preds = torch.argmax(outputs, dim=1)
        all_preds.extend(preds.cpu().numpy())
        all_labels.extend(labels.cpu().numpy())

        running_loss += loss.item()

        conf_matrix = confusion_matrix(all_labels, all_preds)
        precision, recall, iou = calculate_metrics(conf_matrix)

    return running_loss / len(train_loader), iou

engine.py:

import torch.nn as nn
from torchvision.models.segmentation import deeplabv3_resnet101

class Model(nn.Module):
    def __init__(self, num_classes):
        super(Model, self).__init__()
        self.model = deeplabv3_resnet101(pretrained=True)
        self.model.classifier[-1] = nn.Conv2d(256, num_classes, kernel_size=(1, 1), stride=(1, 1))

    def forward(self, x):
        return self.model(x)['out'] 

这是调用数据集和数据加载器的 dataset.py。

import os
from PIL import Image

from torchvision import transforms
from torch.utils.data import Dataset, DataLoader

class CustomDataset(Dataset):
    def __init__(self, image_folder, label_folder, transform=None):
        self.image_folder = image_folder
        self.label_folder = label_folder
        self.transform = transform

        self.images = sorted(os.listdir(image_folder))
        self.labels = sorted(os.listdir(label_folder))

    def __len__(self):
        return len(self.images)

    def __getitem__(self, idx):
        img_name = os.path.join(self.image_folder, self.images[idx])
        label_name = os.path.join(self.label_folder, self.labels[idx])

        image = Image.open(img_name).convert("RGB")
        label = Image.open(label_name)

        if self.transform:
            image = self.transform(image)
            label = self.transform(label)

        return image, label


def load_data(batch_size):
    # Set your paths
    train_image_folder = "path/to/train_images"
    train_label_folder = "path/to/train_labels"
    val_image_folder = "path/to/valid_images"
    val_label_folder = "path/to/valid_labels"


    train_transform = transforms.Compose([
            transforms.ToTensor(),
        ])
    val_transform = transforms.Compose([
            transforms.ToTensor(),
        ])



    # Create datasets and dataloaders
    train_dataset = CustomDataset(train_image_folder, train_label_folder, transform=train_transform)
    val_dataset = CustomDataset(val_image_folder, val_label_folder, transform=val_transform)

    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)

    return train_loader, val_loader

我收到以下错误。我认为这是关于我将我的标签称为 RGB 但实际上它必须是二进制(标准化)。但不确定。

Traceback (most recent call last):
File "/architecture/main.py", line 57, in <module> main()
File "/architecture/main.py", line 34, in main 
train_loss, train_iou = train(model, train_loader, criterion, optimizer, device)
File "/architecture/train.py", line 18, in train
loss = criterion(outputs, labels)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line1527, in _call_impl
return forward_call(*args, **kwargs)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/loss.py", line618,in forward
return F.binary_cross_entropy(input, target, weight=self.weight, reduction=self.reduction)
File "/usr/local/lib/python3.10/dist-packages/torch/nn/functional.py", line 3113, in binary_cross_entropy raise ValueError(
ValueError: Using a target size (torch.Size([4, 1, 1024, 1024])) that is different to the input size (torch.Size([4, 2, 1024, 1024])) is deprecated. Please ensure they have the same size.
python image-processing pytorch computer-vision image-segmentation
1个回答
0
投票

以下是我通过 GPT 解决问题的方法! 更新了dataset.py:

class CustomDataset(Dataset):
    # ...

    def __getitem__(self, idx):
        # ...

        # Convert label to NumPy array and then to binary mask
        label_np = np.array(label)
        label = (label_np > 0).astype(np.float32)

        if self.transform:
            image = self.transform(image)
            label = self.transform(label)

        return image, label
© www.soinside.com 2019 - 2024. All rights reserved.