Python打卡第52天

发布于:2025-06-13 ⋅ 阅读:(19) ⋅ 点赞:(0)

@浙大疏锦行

作业:

对于day'41的简单cnn,看看是否可以借助调参指南进一步提高精度。

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np

# 设置中文字体支持
plt.rcParams["font.family"] = ["SimHei"]
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 1. 改进数据预处理 - 增加数据增强
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),  # 随机裁剪
    transforms.RandomHorizontalFlip(),     # 随机水平翻转
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))  # 使用CIFAR-10的真实统计数据
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])

# 2. 加载CIFAR-10数据集
train_dataset = datasets.CIFAR10(
    root='./data',
    train=True,
    download=True,
    transform=transform_train
)

test_dataset = datasets.CIFAR10(
    root='./data',
    train=False,
    transform=transform_test
)

# 3. 增加Batch Size - 从64增加到128
batch_size = 128
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)

# 4. 改进模型结构 - 更深、更宽的网络
class ImprovedMLP(nn.Module):
    def __init__(self):
        super(ImprovedMLP, self).__init__()
        self.flatten = nn.Flatten()
        # 增加网络宽度和深度
        self.layer1 = nn.Linear(3072, 1024)
        self.bn1 = nn.BatchNorm1d(1024)  # 添加批归一化
        self.relu1 = nn.ReLU()
        self.dropout1 = nn.Dropout(0.3)
        
        self.layer2 = nn.Linear(1024, 1024)
        self.bn2 = nn.BatchNorm1d(1024)
        self.relu2 = nn.ReLU()
        self.dropout2 = nn.Dropout(0.3)
        
        self.layer3 = nn.Linear(1024, 512)
        self.bn3 = nn.BatchNorm1d(512)
        self.relu3 = nn.ReLU()
        self.dropout3 = nn.Dropout(0.3)
        
        self.layer4 = nn.Linear(512, 256)
        self.bn4 = nn.BatchNorm1d(256)
        self.relu4 = nn.ReLU()
        self.dropout4 = nn.Dropout(0.3)
        
        self.layer5 = nn.Linear(256, 10)
        
    def forward(self, x):
        x = self.flatten(x)
        
        x = self.layer1(x)
        x = self.bn1(x)
        x = self.relu1(x)
        x = self.dropout1(x)
        
        x = self.layer2(x)
        x = self.bn2(x)
        x = self.relu2(x)
        x = self.dropout2(x)
        
        x = self.layer3(x)
        x = self.bn3(x)
        x = self.relu3(x)
        x = self.dropout3(x)
        
        x = self.layer4(x)
        x = self.bn4(x)
        x = self.relu4(x)
        x = self.dropout4(x)
        
        x = self.layer5(x)
        
        return x

# 检查GPU是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 初始化模型
model = ImprovedMLP()
model = model.to(device)

# 5. 优化器与学习率调度 - 使用学习率预热和余弦退火
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-4)  # 添加L2正则化
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)  # 余弦退火调度器

# 6. 早停机制
class EarlyStopping:
    def __init__(self, patience=10, delta=0):
        self.patience = patience
        self.delta = delta
        self.counter = 0
        self.best_score = None
        self.early_stop = False
        
    def __call__(self, val_acc):
        score = val_acc
        
        if self.best_score is None:
            self.best_score = score
        elif score < self.best_score + self.delta:
            self.counter += 1
            if self.counter >= self.patience:
                self.early_stop = True
        else:
            self.best_score = score
            self.counter = 0
            
        return self.early_stop

# 7. 训练模型(改进版,记录训练和验证准确率)
def train(model, train_loader, test_loader, criterion, optimizer, scheduler, device, epochs):
    model.train()
    
    # 记录每个 epoch 的准确率和损失
    train_acc_history = []
    train_loss_history = []
    test_acc_history = []
    test_loss_history = []
    
    # 早停实例
    early_stopping = EarlyStopping(patience=15)
    
    for epoch in range(epochs):
        running_loss = 0.0
        correct = 0
        total = 0
        
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)
            
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            
            running_loss += loss.item()
            _, predicted = output.max(1)
            total += target.size(0)
            correct += predicted.eq(target).sum().item()
            
            # 每100个批次打印一次训练信息
            if (batch_idx + 1) % 100 == 0:
                print(f'Epoch: {epoch+1}/{epochs} | Batch: {batch_idx+1}/{len(train_loader)} '
                      f'| 损失: {loss.item():.4f} | 准确率: {100.*correct/total:.2f}%')
        
        # 计算当前epoch的平均训练损失和准确率
        epoch_train_loss = running_loss / len(train_loader)
        epoch_train_acc = 100. * correct / total
        train_loss_history.append(epoch_train_loss)
        train_acc_history.append(epoch_train_acc)
        
        # 测试阶段
        model.eval()
        test_loss = 0
        correct_test = 0
        total_test = 0
        
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(device), target.to(device)
                output = model(data)
                test_loss += criterion(output, target).item()
                _, predicted = output.max(1)
                total_test += target.size(0)
                correct_test += predicted.eq(target).sum().item()
        
        epoch_test_loss = test_loss / len(test_loader)
        epoch_test_acc = 100. * correct_test / total_test
        test_loss_history.append(epoch_test_loss)
        test_acc_history.append(epoch_test_acc)
        
        print(f'Epoch {epoch+1}/{epochs} 完成 | 训练准确率: {epoch_train_acc:.2f}% | 测试准确率: {epoch_test_acc:.2f}%')
        
        # 更新学习率
        scheduler.step()
        
        # 检查早停
        if early_stopping(epoch_test_acc):
            print(f"早停触发!在 epoch {epoch+1} 停止训练")
            break
    
    # 绘制训练和测试准确率曲线
    plot_accuracy(train_acc_history, test_acc_history, epochs)
    # 绘制训练和测试损失曲线
    plot_loss(train_loss_history, test_loss_history, epochs)
    
    return epoch_test_acc, epoch_test_loss

# 8. 绘制准确率曲线
def plot_accuracy(train_acc, test_acc, epochs):
    plt.figure(figsize=(10, 5))
    plt.plot(range(1, len(train_acc)+1), train_acc, 'b-', label='训练准确率')
    plt.plot(range(1, len(test_acc)+1), test_acc, 'r-', label='测试准确率')
    plt.xlabel('Epoch')
    plt.ylabel('准确率 (%)')
    plt.title('训练和测试准确率')
    plt.legend()
    plt.grid(True)
    plt.tight_layout()
    plt.show()

# 9. 绘制损失曲线
def plot_loss(train_loss, test_loss, epochs):
    plt.figure(figsize=(10, 5))
    plt.plot(range(1, len(train_loss)+1), train_loss, 'b-', label='训练损失')
    plt.plot(range(1, len(test_loss)+1), test_loss, 'r-', label='测试损失')
    plt.xlabel('Epoch')
    plt.ylabel('损失')
    plt.title('训练和测试损失')
    plt.legend()
    plt.grid(True)
    plt.tight_layout()
    plt.show()

# 10. 执行训练和测试
epochs = 200  # 增加训练轮次
print("开始训练模型...")
final_accuracy, final_loss = train(model, train_loader, test_loader, criterion, optimizer, scheduler, device, epochs)
print(f"训练完成!最终测试准确率: {final_accuracy:.2f}% | 最终测试损失: {final_loss:.4f}")

# 保存模型
torch.save(model.state_dict(), 'cifar10_improved_mlp_model.pth')
print("模型已保存为: cifar10_improved_mlp_model.pth")

训练完成!最终测试准确率: 93.98%