In [1]:
Copied!
import torch
from torch import nn
import torch
from torch import nn
In [5]:
Copied!
# 输入减均值,把均值移到零
class CenteredLayer(nn.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# CenteredLayer中不含模型参数
def forward(self, x):
return x - x.mean()
layer = CenteredLayer()
output = layer(torch.tensor([1, 2, 3, 4, 5], dtype=torch.float))
print(output)
# 输入减均值,把均值移到零
class CenteredLayer(nn.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# CenteredLayer中不含模型参数
def forward(self, x):
return x - x.mean()
layer = CenteredLayer()
output = layer(torch.tensor([1, 2, 3, 4, 5], dtype=torch.float))
print(output)
tensor([-2., -1., 0., 1., 2.])
In [16]:
Copied!
net = nn.Sequential(nn.Linear(8, 128), CenteredLayer())
y = net(torch.rand(4, 8))
print(y.mean().item()) # 输出值近似0
net = nn.Sequential(nn.Linear(8, 128), CenteredLayer())
y = net(torch.rand(4, 8))
print(y.mean().item()) # 输出值近似0
-5.587935447692871e-09
含模型参数的自定义层¶
In [22]:
Copied!
# 可用ParameterList表示参数列表
class MyListDense(nn.Module):
def __init__(self):
super().__init__()
self.params = nn.ParameterList([nn.Parameter(torch.randn(4, 4)) for i in range(3)])
self.params.append(nn.Parameter(torch.randn(4, 1)))
def forward(self, x):
for i in range(len(self.params)):
x = torch.mm(x, self.params[i]) # 该模型中所有层均表示线性变换
return x
net = MyListDense()
print(net)
# 可用ParameterList表示参数列表
class MyListDense(nn.Module):
def __init__(self):
super().__init__()
self.params = nn.ParameterList([nn.Parameter(torch.randn(4, 4)) for i in range(3)])
self.params.append(nn.Parameter(torch.randn(4, 1)))
def forward(self, x):
for i in range(len(self.params)):
x = torch.mm(x, self.params[i]) # 该模型中所有层均表示线性变换
return x
net = MyListDense()
print(net)
MyListDense(
(params): ParameterList(
(0): Parameter containing: [torch.float32 of size 4x4]
(1): Parameter containing: [torch.float32 of size 4x4]
(2): Parameter containing: [torch.float32 of size 4x4]
(3): Parameter containing: [torch.float32 of size 4x1]
)
)
In [21]:
Copied!
# 可用ParameterDict表示参数的字典
# 可按照字典的规则使用,如使用update()新增参数,使用keys()返回所有键值,使用items()返回所有键值对等
# 选择不同键,可进行不同的前向传播
class MyDictDense(nn.Module):
def __init__(self):
super().__init__()
self.params = nn.ParameterDict({
'linear1': nn.Parameter(torch.randn(4, 4)),
'linear2': nn.Parameter(torch.randn(4, 1))
})
self.params.update({'linear3': nn.Parameter(torch.randn(4, 2))})
def forward(self, x, choice='linear1'):
return torch.mm(x, self.params[choice])
net = MyDictDense()
x = torch.ones(1, 4)
print(net(x, 'linear1'))
print(net(x, 'linear2'))
print(net(x, 'linear3'))
# 可用ParameterDict表示参数的字典
# 可按照字典的规则使用,如使用update()新增参数,使用keys()返回所有键值,使用items()返回所有键值对等
# 选择不同键,可进行不同的前向传播
class MyDictDense(nn.Module):
def __init__(self):
super().__init__()
self.params = nn.ParameterDict({
'linear1': nn.Parameter(torch.randn(4, 4)),
'linear2': nn.Parameter(torch.randn(4, 1))
})
self.params.update({'linear3': nn.Parameter(torch.randn(4, 2))})
def forward(self, x, choice='linear1'):
return torch.mm(x, self.params[choice])
net = MyDictDense()
x = torch.ones(1, 4)
print(net(x, 'linear1'))
print(net(x, 'linear2'))
print(net(x, 'linear3'))
tensor([[ 4.0689, 2.0882, 1.5975, -0.4973]], grad_fn=<MmBackward0>) tensor([[0.8011]], grad_fn=<MmBackward0>) tensor([[-2.2830, -0.5524]], grad_fn=<MmBackward0>)
In [23]:
Copied!
# 自定义模型的嵌套
net = nn.Sequential(
MyDictDense(),
MyListDense(),
)
print(net)
print(net(x))
# 自定义模型的嵌套
net = nn.Sequential(
MyDictDense(),
MyListDense(),
)
print(net)
print(net(x))
Sequential(
(0): MyDictDense(
(params): ParameterDict(
(linear1): Parameter containing: [torch.FloatTensor of size 4x4]
(linear2): Parameter containing: [torch.FloatTensor of size 4x1]
(linear3): Parameter containing: [torch.FloatTensor of size 4x2]
)
)
(1): MyListDense(
(params): ParameterList(
(0): Parameter containing: [torch.float32 of size 4x4]
(1): Parameter containing: [torch.float32 of size 4x4]
(2): Parameter containing: [torch.float32 of size 4x4]
(3): Parameter containing: [torch.float32 of size 4x1]
)
)
)
tensor([[57.7282]], grad_fn=<MmBackward0>)