This commit is contained in:
guanjihuan 2024-04-03 16:22:28 +08:00
parent 5e3e22e25d
commit 19974d761a
2 changed files with 107 additions and 0 deletions

View File

@ -0,0 +1,77 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/39320
"""
import torch
input_data = torch.randn(1, 1, 28, 28)
print('【有填充的情况,不同卷积核大小对输出数据的维度的影响】')
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=1, stride=1, padding=1, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为1", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=2, stride=1, padding=1, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为2", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=1, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为3", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=4, stride=1, padding=1, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为4", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=5, stride=1, padding=1, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为5", output_data.shape)
print()
print('【无填充的情况,不同卷积核大小对输出数据的维度的影响】')
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=1, stride=1, padding=0, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为1", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=2, stride=1, padding=0, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为2", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=0, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为3", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=4, stride=1, padding=0, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为4", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=5, stride=1, padding=0, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为5", output_data.shape)
print()
print('【不同步幅对输出数据的维度的影响以卷积核大小为3有填充的情况为例')
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=1, dilation=1)
output_data = conv_layer(input_data)
print("维度步幅为1", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, stride=2, padding=1, dilation=1)
output_data = conv_layer(input_data)
print("维度步幅为2", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, stride=3, padding=1, dilation=1)
output_data = conv_layer(input_data)
print("维度步幅为3", output_data.shape)
print()
print('【不同扩张率对输出数据的维度的影响以步幅为1有填充的情况为例')
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=1, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为3扩张率为1", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=1, dilation=2)
output_data = conv_layer(input_data)
print("维度卷积核大小为3扩张率为2", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=1, dilation=3)
output_data = conv_layer(input_data)
print("维度卷积核大小为3扩张率为3", output_data.shape)
print()
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=4, stride=1, padding=1, dilation=1)
output_data = conv_layer(input_data)
print("维度卷积核大小为4扩张率为1", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=4, stride=1, padding=1, dilation=2)
output_data = conv_layer(input_data)
print("维度卷积核大小为4扩张率为2", output_data.shape)
conv_layer = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=4, stride=1, padding=1, dilation=3)
output_data = conv_layer(input_data)
print("维度卷积核大小为4扩张率为3", output_data.shape)

View File

@ -0,0 +1,30 @@
"""
This code is supported by the website: https://www.guanjihuan.com
The newest version of this code is on the web page: https://www.guanjihuan.com/archives/39320
"""
import torch
input_data = torch.randn(1, 1, 28, 28)
print('【不同卷积核大小对输出数据的维度的影响】')
max_pool = torch.nn.MaxPool2d(kernel_size=1, stride=1)
output_data = max_pool(input_data)
print("维度卷积核大小为1", output_data.shape)
max_pool = torch.nn.MaxPool2d(kernel_size=2, stride=1)
output_data = max_pool(input_data)
print("维度卷积核大小为2", output_data.shape)
max_pool = torch.nn.MaxPool2d(kernel_size=3, stride=1)
output_data = max_pool(input_data)
print("维度卷积核大小为3", output_data.shape)
print()
print('【不同步幅对输出数据的维度的影响以卷积核大小为2为例')
max_pool = torch.nn.MaxPool2d(kernel_size=2, stride=1)
output_data = max_pool(input_data)
print("维度步幅为1", output_data.shape)
max_pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
output_data = max_pool(input_data)
print("维度步幅为2", output_data.shape)
max_pool = torch.nn.MaxPool2d(kernel_size=2, stride=3)
output_data = max_pool(input_data)
print("维度步幅为3", output_data.shape)