模型构造¶
In [1]:
Copied!
import torch
import torch.nn as nn
from collections import OrderedDict
import torch
import torch.nn as nn
from collections import OrderedDict
继承Module类来构造模型¶
In [2]:
Copied!
class MLP(nn.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs) # 继承构造函数
self.hidden = nn.Linear(28 * 28, 256) # 自定义模型
self.act = nn.ReLU()
self.output = nn.Linear(256, 10)
def forward(self, x):
h = self.act(self.hidden(x))
return self.output(h)
X = torch.rand(1, 28 * 28)
net = MLP()
print(net)
print('---')
print(net(X))
class MLP(nn.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs) # 继承构造函数
self.hidden = nn.Linear(28 * 28, 256) # 自定义模型
self.act = nn.ReLU()
self.output = nn.Linear(256, 10)
def forward(self, x):
h = self.act(self.hidden(x))
return self.output(h)
X = torch.rand(1, 28 * 28)
net = MLP()
print(net)
print('---')
print(net(X))
MLP(
(hidden): Linear(in_features=784, out_features=256, bias=True)
(act): ReLU()
(output): Linear(in_features=256, out_features=10, bias=True)
)
---
tensor([[ 0.1956, -0.2109, 0.0177, 0.1671, 0.0754, 0.3425, -0.0693, 0.0261,
0.1196, 0.2053]], grad_fn=<AddmmBackward0>)
In [3]:
Copied!
# 仿照nn.Sequential的实现,便于理解
class MySequential(nn.Module):
def __init__(self, *args):
super().__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict): # 如果传入的是一个OrderedDict
for key, module in args[0].items():
self.add_module(key, module) # add_module方法会将module添加进self._modules(一个OrderedDict)
else: # 传入的是一些Module
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def forward(self, input):
# self._modules返回一个 OrderedDict,保证会按照成员添加时的顺序遍历成员
for module in self._modules.values():
input = module(input)
return input
# 仿照nn.Sequential的实现,便于理解
class MySequential(nn.Module):
def __init__(self, *args):
super().__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict): # 如果传入的是一个OrderedDict
for key, module in args[0].items():
self.add_module(key, module) # add_module方法会将module添加进self._modules(一个OrderedDict)
else: # 传入的是一些Module
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def forward(self, input):
# self._modules返回一个 OrderedDict,保证会按照成员添加时的顺序遍历成员
for module in self._modules.values():
input = module(input)
return input
In [4]:
Copied!
net = nn.Sequential( # 用nn.Sequential定义模型
nn.Linear(28 * 28, 256),
nn.ReLU(),
nn.Linear(256, 10),
)
print(net)
net(X)
net = nn.Sequential( # 用nn.Sequential定义模型
nn.Linear(28 * 28, 256),
nn.ReLU(),
nn.Linear(256, 10),
)
print(net)
net(X)
Sequential( (0): Linear(in_features=784, out_features=256, bias=True) (1): ReLU() (2): Linear(in_features=256, out_features=10, bias=True) )
Out[4]:
tensor([[-0.0999, -0.3157, 0.1503, -0.2465, 0.0070, 0.0309, 0.3908, 0.0503,
-0.0100, 0.0931]], grad_fn=<AddmmBackward0>)
ModuleList类¶
In [5]:
Copied!
# nn.ModuleLis用于储存模块
net = nn.ModuleList([nn.Linear(784, 256), nn.ReLU()])
net.append(nn.Linear(256, 10)) # 类似List的append操作
print(net[-1]) # 类似List的索引访问
print(net)
# net(X) # 会报NotImplementedError,因为nn.ModuleList仅仅是储存各种模块的列表,本身没有定义forward,所以不能像模型一样被调用。
# nn.ModuleLis用于储存模块
net = nn.ModuleList([nn.Linear(784, 256), nn.ReLU()])
net.append(nn.Linear(256, 10)) # 类似List的append操作
print(net[-1]) # 类似List的索引访问
print(net)
# net(X) # 会报NotImplementedError,因为nn.ModuleList仅仅是储存各种模块的列表,本身没有定义forward,所以不能像模型一样被调用。
Linear(in_features=256, out_features=10, bias=True) ModuleList( (0): Linear(in_features=784, out_features=256, bias=True) (1): ReLU() (2): Linear(in_features=256, out_features=10, bias=True) )
In [6]:
Copied!
# 用nn.ModuleList便于添加模块
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)]) # 这里是ModuleList的用法
def forward(self, x):
# ModuleList can act as an iterable, or be indexed using ints
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return x
# 用nn.ModuleList便于添加模块
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)]) # 这里是ModuleList的用法
def forward(self, x):
# ModuleList can act as an iterable, or be indexed using ints
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return x
ModuleDict类¶
In [7]:
Copied!
# 用nn.ModuleDict定义模型
net = nn.ModuleDict({
'linear': nn.Linear(784, 256),
'act': nn.ReLU(),
})
net['output'] = nn.Linear(256, 10) # 添加
print(net['linear']) # 访问
print(net.output)
print(net)
# net(torch.zeros(1, 784)) # 会报NotImplementedError
# 用nn.ModuleDict定义模型
net = nn.ModuleDict({
'linear': nn.Linear(784, 256),
'act': nn.ReLU(),
})
net['output'] = nn.Linear(256, 10) # 添加
print(net['linear']) # 访问
print(net.output)
print(net)
# net(torch.zeros(1, 784)) # 会报NotImplementedError
Linear(in_features=784, out_features=256, bias=True) Linear(in_features=256, out_features=10, bias=True) ModuleDict( (linear): Linear(in_features=784, out_features=256, bias=True) (act): ReLU() (output): Linear(in_features=256, out_features=10, bias=True) )
构造复杂的模型¶
In [8]:
Copied!
class FancyMLP(nn.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rand_weight = torch.rand((20, 20), requires_grad=False) # 不可训练参数(常数参数)
self.linear = nn.Linear(20, 20)
def forward(self, x):
x = self.linear(x)
x = nn.functional.relu(torch.mm(x, self.rand_weight.data) + 1)
x = self.linear(x)
while x.norm().item() > 1:
x /= 2
if x.norm().item() < 0.8:
x *= 10
return x.sum()
X = torch.rand(2, 20)
net = FancyMLP()
print(net)
net(X)
class FancyMLP(nn.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.rand_weight = torch.rand((20, 20), requires_grad=False) # 不可训练参数(常数参数)
self.linear = nn.Linear(20, 20)
def forward(self, x):
x = self.linear(x)
x = nn.functional.relu(torch.mm(x, self.rand_weight.data) + 1)
x = self.linear(x)
while x.norm().item() > 1:
x /= 2
if x.norm().item() < 0.8:
x *= 10
return x.sum()
X = torch.rand(2, 20)
net = FancyMLP()
print(net)
net(X)
FancyMLP( (linear): Linear(in_features=20, out_features=20, bias=True) )
Out[8]:
tensor(8.6183, grad_fn=<SumBackward0>)
In [9]:
Copied!
class NestMLP(nn.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.net = nn.Sequential(nn.Linear(40, 30), nn.ReLU())
def forward(self, x):
return self.net(x)
# 可使用多种方法嵌套定义
net = nn.Sequential(NestMLP(), nn.Linear(30, 20), FancyMLP())
X = torch.rand(2, 40)
print(net)
net(X)
class NestMLP(nn.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.net = nn.Sequential(nn.Linear(40, 30), nn.ReLU())
def forward(self, x):
return self.net(x)
# 可使用多种方法嵌套定义
net = nn.Sequential(NestMLP(), nn.Linear(30, 20), FancyMLP())
X = torch.rand(2, 40)
print(net)
net(X)
Sequential(
(0): NestMLP(
(net): Sequential(
(0): Linear(in_features=40, out_features=30, bias=True)
(1): ReLU()
)
)
(1): Linear(in_features=30, out_features=20, bias=True)
(2): FancyMLP(
(linear): Linear(in_features=20, out_features=20, bias=True)
)
)
Out[9]:
tensor(-0.2468, grad_fn=<SumBackward0>)