pytorch实现自己的深度神经网络(公共数据集)

发布于:2024-04-24 ⋅ 阅读:(23) ⋅ 点赞:(0)

一、训练文件——train.py

  注意:在运行此代码之前,需要配置好pytorch-GPU版本的环境,具体再次不谈。

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms

# 检查GPU是否可用
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device:", device)

# 数据预处理的转换
transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

# 加载CIFAR-10训练数据集
train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True,
                                               download=True, transform=transform)

# 创建数据加载器
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=8,
                                           shuffle=True, num_workers=0)

# 定义神经网络模型
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
        self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
        self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(128 * 32 * 32, 512)
        self.fc2 = nn.Linear(512, 10)

    def forward(self, x):
        x = self.pool(torch.relu(self.conv1(x)))
        x = self.pool(torch.relu(self.conv2(x)))
        x = self.pool(torch.relu(self.conv3(x)))
        x = x.view(-1, 128 * 32 * 32)
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# 实例化模型,并将其移动到可用设备上
model = CNN().to(device)

# 定义损失函数
criterion = nn.CrossEntropyLoss()

# 定义优化器
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

if __name__ == '__main__':
    # 训练神经网络
    for epoch in range(5):
        running_loss = 0.0
        for i, data in enumerate(train_loader, 0):
            inputs, labels = data[0].to(device), data[1].to(device)

            # 梯度清零
            optimizer.zero_grad()

            # 正向传播
            outputs = model(inputs)
            loss = criterion(outputs, labels)

            # 反向传播 + 优化
            loss.backward()
            optimizer.step()

            # 打印统计信息
            running_loss += loss.item()
            if i % 200 == 199:
                print('[%d, %5d] loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / 200))
                running_loss = 0.0

    print('Finished Training')

    # 保存模型至文件
    torch.save(model.state_dict(), 'cifar10_cnn_model.pth')

二、测试文件——val.py

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import cv2

# 检查GPU是否可用
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device:", device)

# 数据预处理的转换
transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

# 加载CIFAR-10测试数据集
test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False,
                                              download=True, transform=transform)

# 创建测试数据加载器
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=8,
                                             shuffle=False, num_workers=0)

# 加载模型并将其移动到可用设备上
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
        self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
        self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(128 * 32 * 32, 512)
        self.fc2 = nn.Linear(512, 10)

    def forward(self, x):
        x = self.pool(torch.relu(self.conv1(x)))
        x = self.pool(torch.relu(self.conv2(x)))
        x = self.pool(torch.relu(self.conv3(x)))
        x = x.view(-1, 128 * 32 * 32)
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x
# 显示函数
def imshow(img):
    img = img / 2 + 0.5
    npimg = img.numpy()
    # 坐标转换
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()


model = CNN().to(device)
model.load_state_dict(torch.load('cifar10_cnn_model.pth'))
model.eval()


if __name__ == '__main__':
    # 在测试集上测试模型
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data[0].to(device), data[1].to(device)
            outputs = model(images)
            # 预测值的最大值以及最大值的类别索引
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    print('Accuracy on the test images: %d %%' % (100 * correct / total))

    # 显示测试集中的一些图片及其预测结果


    # 生成一个迭代器,从数据加载器中取出数据
    dataiter = iter(test_loader)
    # 从迭代器中获取下一个批次的数据
    images, labels = dataiter.next()
    # 将获取到的批次数据移动到device上,在这里也就是GPU上
    images, labels = images.to(device), labels.to(device)

    dip_flag = False
    if dip_flag == True:
        # -------------------------------------------
        # 可以选择 使用opencv显示
        # -------------------------------------------
        np_images = images.cpu().numpy()
        # 循环遍历并显示所有测试集图片
        for i in range(len(np_images)):
            # 从归一化中还原图像数据
            np_image = np.transpose(np_images[i], (1, 2, 0))   # 从CHW转换为HWC
            np_image = np_image * 0.5 + 0.5

            # 将图像数据从float类型转换为unit8类型
            np_image = (np_image * 255).astype(np.uint8)
            # 使用opencv显示图像
            cv2.imshow("Image {}".format(i+1), np_image)
            cv2.waitKey(0)
            # 等待用户按下任意键继续显示下一张图像
        cv2.destroyAllWindows()

    imshow(torchvision.utils.make_grid(images.cpu()))
    print('GroundTruth: ', ' '.join('%5s' % test_dataset.classes[labels[j]] for j in range(8)))
    outputs = model(images)
    _, predicted = torch.max(outputs, 1)
    print('Predicted: ', ' '.join('%5s' % test_dataset.classes[predicted[j]]
                                  for j in range(8)))




直接运行即可,亲测可以运行