DAY 49 CBAM注意力

发布于:2025-07-05 ⋅ 阅读:(19) ⋅ 点赞:(0)

@浙大疏锦行https://blog.csdn.net/weixin_45655710

知识点回顾:
  1. 通道注意力模块复习
  2. 空间注意力模块
  3. CBAM的定义

作业:尝试对今天的模型检查参数数目,并用tensorboard查看训练过程

核心修改:

  1. 在主执行流程中,初始化模型后,立刻使用 torchsummary.summary 来打印模型的详细参数信息。

  2. 在主执行流程中,初始化 SummaryWriter,创建唯一的日志目录。

  3. writer 对象传递给 train 函数。

  4. train 函数内部,添加了全面的TensorBoard日志记录代码,包括:

    • add_graph:记录模型结构。

    • add_image:记录样本图像。

    • add_scalar:记录训练/测试的损失和准确率,以及学习率。

    • add_histogram:定期记录权重和梯度的分布。

import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter # 导入TensorBoard的核心类
from torchsummary import summary # 导入torchsummary
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from tqdm import tqdm
import torchvision

# --- 步骤 1: CBAM 模块定义 (与笔记中一致) ---
class ChannelAttention(nn.Module):
    def __init__(self, in_channels, ratio=16):
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(in_channels, in_channels // ratio, bias=False),
            nn.ReLU(),
            nn.Linear(in_channels // ratio, in_channels, bias=False)
        )
        self.sigmoid = nn.Sigmoid()
    def forward(self, x):
        b, c, h, w = x.shape
        avg_out = self.fc(self.avg_pool(x).view(b, c))
        max_out = self.fc(self.max_pool(x).view(b, c))
        attention = self.sigmoid(avg_out + max_out).view(b, c, 1, 1)
        return x * attention

class SpatialAttention(nn.Module):
    def __init__(self, kernel_size=7):
        super().__init__()
        self.conv = nn.Conv2d(2, 1, kernel_size, padding=kernel_size//2, bias=False)
        self.sigmoid = nn.Sigmoid()
    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        pool_out = torch.cat([avg_out, max_out], dim=1)
        attention = self.conv(pool_out)
        return x * self.sigmoid(attention)

class CBAM(nn.Module):
    def __init__(self, in_channels, ratio=16, kernel_size=7):
        super().__init__()
        self.channel_attn = ChannelAttention(in_channels, ratio)
        self.spatial_attn = SpatialAttention(kernel_size)
    def forward(self, x):
        x = self.channel_attn(x)
        x = self.spatial_attn(x)
        return x

# --- 步骤 2: 定义带有CBAM的CNN模型 (与笔记中一致) ---
class CBAM_CNN(nn.Module):
    def __init__(self):
        super(CBAM_CNN, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
        self.bn1 = nn.BatchNorm2d(32)
        self.relu1 = nn.ReLU()
        self.pool1 = nn.MaxPool2d(2)
        self.cbam1 = CBAM(32)
        
        self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
        self.bn2 = nn.BatchNorm2d(64)
        self.relu2 = nn.ReLU()
        self.pool2 = nn.MaxPool2d(2)
        self.cbam2 = CBAM(64)
        
        self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
        self.bn3 = nn.BatchNorm2d(128)
        self.relu3 = nn.ReLU()
        self.pool3 = nn.MaxPool2d(2)
        self.cbam3 = CBAM(128)
        
        self.fc1 = nn.Linear(128 * 4 * 4, 512)
        self.dropout = nn.Dropout(0.5)
        self.fc2 = nn.Linear(512, 10)

    def forward(self, x):
        x = self.pool1(self.relu1(self.bn1(self.conv1(x))))
        x = self.cbam1(x)
        x = self.pool2(self.relu2(self.bn2(self.conv2(x))))
        x = self.cbam2(x)
        x = self.pool3(self.relu3(self.bn3(self.conv3(x))))
        x = self.cbam3(x)
        x = x.view(-1, 128 * 4 * 4)
        x = self.relu1(self.fc1(x)) # 复用relu
        x = self.dropout(x)
        x = self.fc2(x)
        return x

# --- 步骤 3: 数据加载 (与笔记中一致) ---
def get_cifar10_loaders(batch_size=128):
    train_transform = transforms.Compose([
        transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    ])
    test_transform = transforms.Compose([
        transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    ])
    train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=train_transform)
    test_dataset = datasets.CIFAR10(root='./data', train=False, transform=test_transform)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
    return train_loader, test_loader

# --- 步骤 4: 训练与评估函数 (集成TensorBoard) ---
def train_and_evaluate(model, device, train_loader, test_loader, epochs, writer):
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, factor=0.5, verbose=True)
    
    # 记录模型图和样本图像
    dataiter = iter(train_loader)
    images, _ = next(dataiter)
    writer.add_graph(model, images.to(device))
    img_grid = torchvision.utils.make_grid(images[:16])
    writer.add_image('CIFAR-10 Samples', img_grid, 0)

    global_step = 0
    for epoch in range(1, epochs + 1):
        model.train()
        loop = tqdm(train_loader, desc=f"Epoch [{epoch}/{epochs}] Training")
        for data, target in loop:
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            
            loop.set_postfix(loss=loss.item())
            writer.add_scalar('Train/Batch_Loss', loss.item(), global_step)
            global_step += 1
        
        model.eval()
        test_loss, correct, total = 0, 0, 0
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(device), target.to(device)
                output = model(data)
                test_loss += criterion(output, target).item() * data.size(0)
                pred = output.argmax(dim=1)
                correct += pred.eq(target).sum().item()
                total += data.size(0)
        
        avg_test_loss = test_loss / total
        accuracy = 100. * correct / total
        
        writer.add_scalar('Test/Epoch_Loss', avg_test_loss, epoch)
        writer.add_scalar('Test/Epoch_Accuracy', accuracy, epoch)
        writer.add_scalar('Train/Learning_Rate', optimizer.param_groups[0]['lr'], epoch)
        
        for name, param in model.named_parameters():
            writer.add_histogram(f'Weights/{name}', param, epoch)
        
        scheduler.step(avg_test_loss)
        print(f"Epoch {epoch} 完成 | 测试集损失: {avg_test_loss:.4f} | 测试集准确率: {accuracy:.2f}%")

# --- 步骤 5: 主执行流程 ---
if __name__ == "__main__":
    # 初始化
    DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {DEVICE}")
    
    train_loader, test_loader = get_cifar10_loaders()
    model = CBAM_CNN().to(DEVICE)
    
    # --- 作业1: 检查参数数目 ---
    print("\n--- CBAM-CNN 模型参数信息 ---")
    summary(model, input_size=(3, 32, 32))
    
    # --- 作业2: 使用TensorBoard监控训练 ---
    # 初始化 SummaryWriter
    log_dir = "runs/cifar10_cbam_cnn_exp"
    version = 1
    while os.path.exists(f"{log_dir}_v{version}"):
        version += 1
    log_dir = f"{log_dir}_v{version}"
    writer = SummaryWriter(log_dir)
    print(f"\nTensorBoard 日志将保存在: {log_dir}")
    print("训练开始后,请在终端运行 `tensorboard --logdir=runs` 来查看可视化结果。")
    
    # 开始训练
    EPOCHS = 20 # 可以根据需要调整
    train_and_evaluate(model, DEVICE, train_loader, test_loader, EPOCHS, writer)
    
    # 关闭 writer
    writer.close()
    print("\n✅ 训练完成,TensorBoard日志已保存。")

代码解析

  1. 检查参数数目 (torchsummary)

    • 实现:在主执行流程 (if __name__ == "__main__":) 中,我们初始化CBAM_CNN模型后,立刻调用了summary(model, input_size=(3, 32, 32))

    • 作用:这会在训练开始前,在您的终端打印出模型的详细结构、每一层的输出形状、以及每一层的参数量和总参数量。通过这个输出,您可以清晰地看到加入CBAM模块后,总参数量比普通CNN略有增加,但增加幅度很小,证明了其轻量级的特性。

  2. 使用TensorBoard监控训练过程

    • 初始化 (SummaryWriter):我们在主流程中创建了一个SummaryWriter实例,并为其指定了一个带版本号的日志目录(如 runs/cifar10_cbam_cnn_exp_v1),确保每次实验的日志都独立保存。

    • 封装训练函数:我们将整个训练和评估的循环封装在了train_and_evaluate函数中,并将writer对象作为参数传递进去,使得代码结构非常清晰。

    • 全面监控:在train_and_evaluate函数内部,我们集成了TensorBoard的四大核心记录功能:

      • 模型图 (add_graph):记录了模型的网络结构,您可以在GRAPHS标签页查看。

      • 图像 (add_image):记录了一批样本图像,方便您在IMAGES标签页确认数据是否正确加载。

      • 标量 (add_scalar)
        标量 ( add_scalar )

        • 实时损失:记录了每个训练批次的损失(Train/Batch_Loss),可以观察到最细微的训练波动。

        • 宏观趋势:记录了每个epoch结束后的测试集损失(Test/Epoch_Loss)和准确率(Test/Epoch_Accuracy),以及学习率的变化。这对于判断模型是否收敛、是否过拟合至关重要。

      • 直方图 (add_histogram):每个epoch结束后,记录模型所有层权重的分布情况。这可以帮助诊断梯度消失/爆炸等训练问题。


网站公告

今日签到

点亮在社区的每一天
去签到