神经网络 非线性激活层 正则化层 线性层

发布于:2025-07-24 ⋅ 阅读:(18) ⋅ 点赞:(0)
  1. 神经网络 非线性激活层

    作用:增强模型的非线性拟合能力

    非线性激活层网络:

    class activateNet(nn.Module):
        def __init__(self):
            super(activateNet,self).__init__()
            self.relu = nn.ReLU()
            self.sigmoid = nn.Sigmoid()
    
        def forward(self,input):
            #output = self.relu(input)
            output = self.sigmoid(input)
            return output
    
    activatenet = activateNet()
    

    relu:

    在这里插入图片描述

    sigmoid:

    在这里插入图片描述

    完整代码:

    这里是将上一步池化层的输出作为非线性激活层的输入

    code:

    import torch
    import torchvision
    from torch import nn
    from torch.nn import Conv2d, MaxPool2d
    from torch.utils.data import DataLoader
    from torch.utils.tensorboard import SummaryWriter
    from torchvision.datasets import ImageFolder
    from torchvision import transforms
    
    #数据预处理
    transform = transforms.Compose([
        transforms.Resize((224,224)),
        transforms.ToTensor(),
        transforms.Normalize(
            mean = [0.5,0.5,0.5],
            std = [0.5,0.5,0.5]
        )
    ])
    
    #加载数据集
    folder_path = '../images'
    dataset = ImageFolder(folder_path,transform=transform)
    dataloader = DataLoader(dataset,batch_size=1)
    
    #卷积
    class convNet(nn.Module):
        def __init__(self):
            #调用父类nn.Module的构造函数
            super(convNet,self).__init__()
            self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0)
    
        def forward(self,x):
            x = self.conv1(x)
            return x
    
    convnet = convNet()
    
    #池化
    class poolNet(nn.Module):
        def __init__(self):
            super(poolNet,self).__init__()
            #ceil_mode=True表示边缘不满3x3的部分也会被池化
            #kernel_size=3 默认是卷积核的大小
            self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=True)
            self.maxpool2 = MaxPool2d(kernel_size=3,ceil_mode=False)
    
        def forward(self,input):
            output = self.maxpool1(input)
            #output = self.maxpool2(input)
            return output
    
    poolnet = poolNet()
    
    #非线性激活层(这里使用relu、sigmoid)
    class activateNet(nn.Module):
        def __init__(self):
            super(activateNet,self).__init__()
            self.relu = nn.ReLU()
            self.sigmoid = nn.Sigmoid()
    
        def forward(self,input):
            #output = self.relu(input)
            output = self.sigmoid(input)
            return output
    
    activatenet = activateNet()
    
    
    writer = SummaryWriter('../logs')
    
    cnt = 0
    for data in dataloader:
        img,label = data
        print(img.shape)
        conv_output = convnet(img)
        print(conv_output.shape)
        writer.add_images('input',img,cnt)
        conv_output = torch.reshape(conv_output,(-1,3,222,222))
        writer.add_images('conv_output',conv_output,cnt)
        pool_output = poolnet(conv_output)
        writer.add_images('pool_output',pool_output,cnt)
        activate_output = activatenet(pool_output)
        writer.add_images('activate_output',activate_output,cnt)
        cnt = cnt + 1
    
    writer.close()
    
  2. 神经网络 正则化层

    防止过拟合

    比较简单 不写了

    这这里区别几个概念:

    - 归一化:将数据映射到特定区间(如[0,1]或[-1,1])的过程,常见方法如min-max归一化,通过减去最小值并除以极差实现。其目的是消除不同特征量纲差异的影响,使数据处于统一量级,便于模型快速收敛,例如将图像像素值从0-255转换为0-1。

    - 标准化:将数据转换为均值为0、标准差为1的分布,即通过减去均值并除以标准差实现(z-score标准化)。它能让数据更符合正态分布,降低异常值对模型的影响,常用于线性回归、SVM等对数据分布敏感的算法中。

    - 正则化:在模型训练中加入额外约束(如L1、L2范数)以防止过拟合的技术。L1正则化通过惩罚权重绝对值,可产生稀疏权重;L2正则化(权重衰减)惩罚权重平方,使权重更平滑。其核心是平衡模型复杂度与拟合能力,提升泛化性能。

  3. 神经网络 线性层(全连接层)

    输入为上一步非线性激活层的输出

    线性层:

    code:

    class linearNet(nn.Module):
        def __init__(self):
            super(linearNet,self).__init__()
            #in_features(根据下面展开后输出的数值) 表示输入特征的维度,out_features=10 表示输出特征的维度
            self.linear = nn.Linear(32856,10)
    
        def forward(self,input):
            output = self.linear(input)
            return output
    
    linearnet = linearNet()
    

    in_features:

        #为了得到in_features
        print(torch.reshape(activate_output,(1,1,1,-1)).shape)
    

    在这里插入图片描述

    完整代码:

    code:

    import torch
    import torchvision
    from torch import nn
    from torch.nn import Conv2d, MaxPool2d
    from torch.utils.data import DataLoader
    from torch.utils.tensorboard import SummaryWriter
    from torchvision.datasets import ImageFolder
    from torchvision import transforms
    
    #数据预处理
    transform = transforms.Compose([
        transforms.Resize((224,224)),
        transforms.ToTensor(),
        transforms.Normalize(
            mean = [0.5,0.5,0.5],
            std = [0.5,0.5,0.5]
        )
    ])
    
    #加载数据集
    folder_path = '../images'
    dataset = ImageFolder(folder_path,transform=transform)
    #drop_last=True 表示最后一个batch可能小于batch_size,不足的部分会被丢弃
    #dataloader = DataLoader(dataset,batch_size=1,drop_last=True)
    dataloader = DataLoader(dataset,batch_size=1)
    
    #卷积
    class convNet(nn.Module):
        def __init__(self):
            #调用父类nn.Module的构造函数
            super(convNet,self).__init__()
            self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0)
    
        def forward(self,x):
            x = self.conv1(x)
            return x
    
    convnet = convNet()
    
    #池化
    class poolNet(nn.Module):
        def __init__(self):
            super(poolNet,self).__init__()
            #ceil_mode=True表示边缘不满3x3的部分也会被池化
            #kernel_size=3 默认是卷积核的大小
            self.maxpool1 = MaxPool2d(kernel_size=3,ceil_mode=True)
            self.maxpool2 = MaxPool2d(kernel_size=3,ceil_mode=False)
    
        def forward(self,input):
            output = self.maxpool1(input)
            #output = self.maxpool2(input)
            return output
    
    poolnet = poolNet()
    
    #非线性激活层(这里使用relu、sigmoid)
    class activateNet(nn.Module):
        def __init__(self):
            super(activateNet,self).__init__()
            self.relu = nn.ReLU()
            self.sigmoid = nn.Sigmoid()
    
        def forward(self,input):
            #output = self.relu(input)
            output = self.sigmoid(input)
            return output
    
    activatenet = activateNet()
    
    #线性层(全连接层)
    class linearNet(nn.Module):
        def __init__(self):
            super(linearNet,self).__init__()
            #in_features(根据下面展开后输出的数值) 表示输入特征的维度,out_features=10 表示输出特征的维度
            self.linear = nn.Linear(32856,10)
    
        def forward(self,input):
            output = self.linear(input)
            return output
    
    linearnet = linearNet()
    
    writer = SummaryWriter('../logs')
    
    cnt = 0
    for data in dataloader:
        img,label = data
        print(img.shape)
        conv_output = convnet(img)
        print(conv_output.shape)
        writer.add_images('input',img,cnt)
        conv_output = torch.reshape(conv_output,(-1,3,222,222))
        writer.add_images('conv_output',conv_output,cnt)
        pool_output = poolnet(conv_output)
        writer.add_images('pool_output',pool_output,cnt)
        activate_output = activatenet(pool_output)
        writer.add_images('activate_output',activate_output,cnt)
        #为了得到in_features
        print(torch.reshape(activate_output,(1,1,1,-1)).shape)
        #flatten:展开成一维数组
        linear_output = torch.flatten(activate_output)
        linear_output = linearnet(linear_output)
        print(linear_output.shape)
        cnt = cnt + 1
    
    writer.close()
    

    在这里插入图片描述


网站公告

今日签到

点亮在社区的每一天
去签到