模型参数的访问、初始化和共享¶
In [1]:
Copied!
import torch
from torch import nn
from torch.nn import init
net = nn.Sequential(
nn.Linear(4, 3),
nn.ReLU(),
nn.Linear(3, 1)
) # pytorch已进行默认初始化
print(net)
X = torch.rand(2, 4)
Y = net(X).sum()
import torch
from torch import nn
from torch.nn import init
net = nn.Sequential(
nn.Linear(4, 3),
nn.ReLU(),
nn.Linear(3, 1)
) # pytorch已进行默认初始化
print(net)
X = torch.rand(2, 4)
Y = net(X).sum()
Sequential( (0): Linear(in_features=4, out_features=3, bias=True) (1): ReLU() (2): Linear(in_features=3, out_features=1, bias=True) )
访问模型参数¶
In [2]:
Copied!
# .named_parameters() 得到生成器,迭代得到每一层的可训练参数
print(type(net.named_parameters()))
for name, param in net.named_parameters():
print(name, param.size())
# .named_parameters() 得到生成器,迭代得到每一层的可训练参数
print(type(net.named_parameters()))
for name, param in net.named_parameters():
print(name, param.size())
<class 'generator'> 0.weight torch.Size([3, 4]) 0.bias torch.Size([3]) 2.weight torch.Size([1, 3]) 2.bias torch.Size([1])
In [4]:
Copied!
# nn.Sequential 可用[]访问其中一层
# 再用.named_parameters(),得到特定层的参数生成器
for name, param in net[0].named_parameters():
print(name, param.size(), type(param))
# nn.Sequential 可用[]访问其中一层
# 再用.named_parameters(),得到特定层的参数生成器
for name, param in net[0].named_parameters():
print(name, param.size(), type(param))
weight torch.Size([3, 4]) <class 'torch.nn.parameter.Parameter'> bias torch.Size([3]) <class 'torch.nn.parameter.Parameter'>
In [5]:
Copied!
# Parameter是Tensor的子类,如果自定义模型时有参数类型为Parameter,则自动添加到参数列表中;而Tensor不会添加
class MyModel(nn.Module):
def __init__(self, **kwargs):
super(MyModel, self).__init__(**kwargs)
self.weight1 = nn.Parameter(torch.rand(20, 20))
self.weight2 = torch.rand(20, 20)
def forward(self, x):
pass
n = MyModel()
for name, param in n.named_parameters():
print(name)
# Parameter是Tensor的子类,如果自定义模型时有参数类型为Parameter,则自动添加到参数列表中;而Tensor不会添加
class MyModel(nn.Module):
def __init__(self, **kwargs):
super(MyModel, self).__init__(**kwargs)
self.weight1 = nn.Parameter(torch.rand(20, 20))
self.weight2 = torch.rand(20, 20)
def forward(self, x):
pass
n = MyModel()
for name, param in n.named_parameters():
print(name)
weight1
In [11]:
Copied!
# .parameters() 只返回参数,不返回名字(weight/bias)
weight_0 = list(net[0].parameters())[0]
print(weight_0.data)
print(weight_0.grad) # 反向传播前梯度为None
Y.backward()
print(weight_0.grad)
# .parameters() 只返回参数,不返回名字(weight/bias)
weight_0 = list(net[0].parameters())[0]
print(weight_0.data)
print(weight_0.grad) # 反向传播前梯度为None
Y.backward()
print(weight_0.grad)
tensor([[ 0.4203, -0.3237, -0.2945, 0.3421],
[ 0.3183, -0.3916, -0.4305, -0.2748],
[-0.2611, -0.4299, 0.3824, -0.4395]])
None
tensor([[0.3651, 0.2748, 0.2382, 0.2263],
[0.2062, 0.0813, 0.0769, 0.1600],
[0.0000, 0.0000, 0.0000, 0.0000]])
初始化模型参数¶
In [19]:
Copied!
for name, param in net.named_parameters():
if 'weight' in name: # 筛选weight参数,初始化
init.normal_(param, mean=0, std=0.01)
if 'bias' in name:
init.constant_(param, val=0)
print(name, param.data)
for name, param in net.named_parameters():
if 'weight' in name: # 筛选weight参数,初始化
init.normal_(param, mean=0, std=0.01)
if 'bias' in name:
init.constant_(param, val=0)
print(name, param.data)
0.weight tensor([[ 0.0032, -0.0049, 0.0114, 0.0028],
[-0.0212, 0.0144, -0.0073, 0.0033],
[ 0.0005, -0.0112, -0.0108, -0.0269]])
0.bias tensor([0., 0., 0.])
2.weight tensor([[ 0.0025, -0.0049, 0.0011]])
2.bias tensor([0.])
自定义初始化方法¶
In [20]:
Copied!
# 自定义torch.nn.init.normal_,便于理解
def normal_(tensor, mean=0, std=1):
with torch.no_grad():
return tensor.normal_(mean, std)
# 自定义torch.nn.init.normal_,便于理解
def normal_(tensor, mean=0, std=1):
with torch.no_grad():
return tensor.normal_(mean, std)
In [21]:
Copied!
# 自定义初始化
def init_weight_(tensor):
with torch.no_grad():
tensor.uniform_(-10, 10)
tensor *= (tensor.abs() >= 5).float()
for name, param in net.named_parameters():
if 'weight' in name:
init_weight_(param)
print(name, param.data)
# 自定义初始化
def init_weight_(tensor):
with torch.no_grad():
tensor.uniform_(-10, 10)
tensor *= (tensor.abs() >= 5).float()
for name, param in net.named_parameters():
if 'weight' in name:
init_weight_(param)
print(name, param.data)
0.weight tensor([[-0.0000, -0.0000, 5.4666, -0.0000],
[ 7.5083, 5.7375, 8.9135, -7.0757],
[ 0.0000, 7.3806, 9.3287, -0.0000]])
2.weight tensor([[ 7.7203, -7.8855, -0.0000]])
共享模型参数¶
In [22]:
Copied!
linear = nn.Linear(1, 1, bias=False)
net = nn.Sequential(linear, linear)
# net[0]和net[1]是Linear,共用参数,每次更新同步修改两层
print(net)
for name, param in net.named_parameters():
init.constant_(param, val=3)
print(name, param.data)
linear = nn.Linear(1, 1, bias=False)
net = nn.Sequential(linear, linear)
# net[0]和net[1]是Linear,共用参数,每次更新同步修改两层
print(net)
for name, param in net.named_parameters():
init.constant_(param, val=3)
print(name, param.data)
Sequential( (0): Linear(in_features=1, out_features=1, bias=False) (1): Linear(in_features=1, out_features=1, bias=False) ) 0.weight tensor([[3.]])