In [2]:
Copied!
import torch
import torch.nn as nn
import sys
sys.path.append('..')
import utils
import torch
import torch.nn as nn
import sys
sys.path.append('..')
import utils
In [3]:
Copied!
def corr2d_multi_in(X, K):
res = utils.corr2d(X[0, :, :], K[0, :, :])
for i in range(1, X.shape[0]):
res += utils.corr2d(X[i, :, :], K[i, :, :])
return res
def corr2d_multi_in(X, K):
res = utils.corr2d(X[0, :, :], K[0, :, :])
for i in range(1, X.shape[0]):
res += utils.corr2d(X[i, :, :], K[i, :, :])
return res
In [4]:
Copied!
X = torch.tensor([[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
K = torch.tensor([[[0, 1], [2, 3]], [[1, 2], [3, 4]]])
corr2d_multi_in(X, K)
X = torch.tensor([[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]])
K = torch.tensor([[[0, 1], [2, 3]], [[1, 2], [3, 4]]])
corr2d_multi_in(X, K)
Out[4]:
tensor([[ 56, 72],
[104, 120]])
多输出通道¶
In [5]:
Copied!
# 这里卷积核的形状为 输入通道数 x 输出通道数 x h x w
# 对K的第0维遍历,每次同输入X做互相关计算。所有结果使用stack函数合并在一起
def corr2d_multi_in_out(X, K):
return torch.stack([corr2d_multi_in(X, k) for k in K])
# 这里卷积核的形状为 输入通道数 x 输出通道数 x h x w
# 对K的第0维遍历,每次同输入X做互相关计算。所有结果使用stack函数合并在一起
def corr2d_multi_in_out(X, K):
return torch.stack([corr2d_multi_in(X, k) for k in K])
In [6]:
Copied!
K = torch.stack([K, K + 1, K + 2])
K.shape # torch.Size([3, 2, 2, 2])
K = torch.stack([K, K + 1, K + 2])
K.shape # torch.Size([3, 2, 2, 2])
Out[6]:
torch.Size([3, 2, 2, 2])
In [7]:
Copied!
corr2d_multi_in_out(X, K)
corr2d_multi_in_out(X, K)
Out[7]:
tensor([[[ 56, 72],
[104, 120]],
[[ 76, 100],
[148, 172]],
[[ 96, 128],
[192, 224]]])
1x1卷积层¶
In [ ]:
Copied!
# 1x1卷积层用于输入输出通道数的转化
def corr2d_multi_in_out_1x1(X, K):
c_i, h, w = X.shape
c_o = K.shape[0]
X = X.view(c_i, h * w)
K = K.view(c_o, c_i)
Y = torch.mm(K, X) # 全连接层的矩阵乘法
return Y.view(c_o, h, w)
# 1x1卷积层用于输入输出通道数的转化
def corr2d_multi_in_out_1x1(X, K):
c_i, h, w = X.shape
c_o = K.shape[0]
X = X.view(c_i, h * w)
K = K.view(c_o, c_i)
Y = torch.mm(K, X) # 全连接层的矩阵乘法
return Y.view(c_o, h, w)