1. 官网描述
官网
使用 torch.nn 模块,我们可以创建自定义的神经网络模型,并使用 PyTorch 提供的优化器(如 torch.optim)和损失函数来训练和优化模型。
2. 常见结构
1.卷积操作
定义:
二维卷积
1.1版本 nn.functional.conv2d
torch.nn.functional.conv2d(input,weight,bias=None,stride=1,padding=0,dilation=1, groups=1)
例子:
代码:
import torchimport torch.nn.functional as Finput = torch.tensor([[1,2,0,3,1],[0,1,2,3,1],[1,2,1,0,0],[5,2,3,1,1],[2,1,0,1,1]])kernel = torch.tensor([[1,2,1], [0,1,0], [2,1,0]])print(input.shape,kernel.shape)#5,5 3,3input = torch.reshape(input,(1,1,5,5))kernel = torch.reshape(kernel,(1,1,3,3))output = F.conv2d(input,kernel,stride=1)print(output)output2 = F.conv2d(input,kernel,stride=2)print(output2)# padding=1 上下左右加一圈0output3 = F.conv2d(input,kernel,stride=1,padding=1)print(output3)
运行:
1.2版本 torch.nn.Conv2d
torch.nn.Conv2d(in_channels,输入通道数out_channels, 输出通道数kernel_size,卷积核大小(int or turple)3 (1,2)stride=1, 卷积核移动的步长padding=0,边界填充(加几圈0)dilation=1,groups=1, bias=True, padding_mode='zeros', device=None, dtype=None)
代码
import torchimport torchvisionfrom torch import nnfrom torch.utils.data import DataLoaderfrom torch.utils.tensorboard import SummaryWriterdataset = torchvision.datasets.CIFAR10('data',train=False, transform=torchvision.transforms.ToTensor(), download=True)dataloader = DataLoader(dataset,batch_size=64)class Tudui(nn.Module):def __init__(self):super(Tudui,self).__init__()self.conv1 = nn.Conv2d(3,6,3,stride=1,padding=0)def forward(self,x):x = self.conv1(x)return xtudui = Tudui()writer = SummaryWriter('logs')step = 0for data in dataloader:imgs,targets = dataoutput = tudui(imgs)print(output.shape)# 输入大小writer.add_images('input',imgs,step)# 输出大小output = torch.reshape(output,(-1,3,30,30))writer.add_images('output', output, step)step+=1
输出:
2.池化操作
目的:保留输入特征,减小数据量
最大池化MaxPool2d:下采样
torch.nn.MaxPool2d(
kernel_size, int/tuple 窗口
stride=None, 步长(默认值是Kernel_size)
padding=0, 在外面加一圈
dilation=1, (空洞卷积)
return_indices=False,
ceil_mode=False ceil模式:向上取整
)
例子:
import torchimport torchvision.datasetsfrom torch import nninput = torch.tensor([[1,2,0,3,1],[0,1,2,3,1],[1,2,1,0,0],[5,2,3,1,1],[2,1,0,1,1]])# (N,C,Hin,Win) (C,Hin,Win)-1表示自己计算batchsize,input = torch.reshape(input,(-1,1,5,5))print(input.shape)class Tudui(nn.Module):def __init__(self):super(Tudui,self).__init__()self.maxpool1 = nn.MaxPool2d(kernel_size=3,ceil_mode=True)# 模板创建自己的def forward(self,input):output = self.maxpool1(input)#输入return outputtudui = Tudui()output = tudui(input)print(output)
输出
例子2
代码
import torchimport torchvision.datasetsfrom torch import nnfrom torch.utils.data import DataLoaderfrom torch.utils.tensorboard import SummaryWriterclass Tudui(nn.Module):def __init__(self):super(Tudui,self).__init__()self.maxpool1 = nn.MaxPool2d(kernel_size=3,ceil_mode=True)# 模板创建自己的def forward(self,input):output = self.maxpool1(input)#输入return outputtudui = Tudui()dataset = torchvision.datasets.CIFAR10('data',train=False,download=True,transform=torchvision.transforms.ToTensor())dataloader = DataLoader(dataset,batch_size=64)writer = SummaryWriter('logs_maxpool')step = 0for data in dataloader:imgs,targets = datawriter.add_images('input', imgs, step)output = tudui(imgs)writer.add_images('maxpool',output,step)step+=1writer.close()
输出
3.非线性激活
torch.nn.ReLU(inplace=False)torch.nn.Sigmoid(*args, **kwargs)
代码:
import torchfrom torch import nninput = torch.tensor([[1,-0.5],[-1,3]])input = torch.reshape(input,(-1,1,2,2))class Tudui(nn.Module):def __init__(self):super().__init__()self.relu1 = nn.ReLU()def forward(self,input):output = self.relu1(input)return outputtudui = Tudui()output = tudui(input)print(output)
运行
例子2:
代码:
import torchimport torchvision.datasetsfrom torch import nnfrom torch.utils.data import DataLoaderfrom torch.utils.tensorboard import SummaryWriter# sigmoiddataset = torchvision.datasets.CIFAR10('data',train=False,download=True,transform=torchvision.transforms.ToTensor())dataloader = DataLoader(dataset,batch_size=64)class Tudui(nn.Module):def __init__(self):super().__init__()self.sigmoid1 = nn.Sigmoid()def forward(self,input):output = self.sigmoid1(input)return outputtudui = Tudui()writer = SummaryWriter('logs_sigmoid')step = 0for data in dataloader:imgs,targets = datawriter.add_images('input',imgs,step)output = tudui(imgs)writer.add_images('output', output, step)print(output.shape)step+=1writer.close()
输出
4.线性层
torch.nn.Linear(
in_features,
out_features,
bias=True,
device=None, dtype=None)
代码:
import torchimport torchvisionfrom torch import nnfrom torch.utils.data import DataLoaderdataset = torchvision.datasets.CIFAR10('data',train=False,transform=torchvision.transforms.ToTensor(),download=True)dataloader = DataLoader(dataset,64,drop_last=True)class Tudui(nn.Module):def __init__(self):super(Tudui,self).__init__()self.linear1 = nn.Linear(in_features=196608,out_features=10)def forward(self,input):return self.linear1(input)tudui = Tudui()for data in dataloader:imgs, targets = data# output = torch.flatten(imgs)#torch.Size([196608])# 替换展平output = torch.reshape(imgs,(1,1,1,-1)) #torch.Size([1, 1, 1, 196608])print(output.shape)output = tudui(output)#torch.Size([10]) torch.Size([1, 1, 1, 10])print(output.shape)
4.序列化层(sequential)
例子:
代码:
import torchfrom torch import nnfrom torch.utils.tensorboard import SummaryWriterclass Tudui(nn.Module):def __init__(self):super().__init__()# 草稿中计算 stride paddingself.conv1 = nn.Conv2d(in_channels=3,out_channels=32,kernel_size=5,stride=1,padding=2)self.maxpool1 = nn.MaxPool2d(2)self.conv2 = nn.Conv2d(in_channels=32,out_channels=32,kernel_size=5,padding=2)self.maxpool2 = nn.MaxPool2d(2)self.conv3 = nn.Conv2d(in_channels=32,out_channels=64,kernel_size=5,padding=2)self.maxpool3 = nn.MaxPool2d(2)self.flatten = nn.Flatten()#64是隐藏单元数self.linear1 = nn.Linear(in_features=1024,out_features=64)#10是输出类别self.linear2 = nn.Linear(in_features=64, out_features=10)# 另一种写法self.model1 = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2),nn.MaxPool2d(2),nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2),nn.MaxPool2d(2),nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2),nn.MaxPool2d(2),nn.Flatten(),nn.Linear(in_features=1024, out_features=64),nn.Linear(in_features=64, out_features=10))def forward(self,x):# x = self.conv1(x)# x = self.maxpool1(x)# x = self.conv2(x)# x = self.maxpool2(x)# x = self.conv3(x)# x = self.maxpool3(x)# x = self.flatten(x)# x = self.linear1(x)# x = self.linear2(x)# sequential方式x = self.model1(x)return xtudui = Tudui()print(tudui)# 用来检验网络结构参数input = torch.ones((64,3,32,32))# 64 batchsize(64张)output = tudui(input)writer= SummaryWriter('logs_s')writer.add_graph(tudui,input)writer.close()
输出