nn.Sequential - DonghoonPark12/ssd.pytorch GitHub Wiki
nn.Sequentialμ λͺ¨λμ 컨ν μ΄λ. μμ°¨μ μΌλ‘ μ²λ¦¬ν΄μΌ νλ λͺ¨λμ΄ μμ λ μλμ κ°μ΄ μ¬μ©νλ€.
import torch.nn as nn
model = nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
μλμ κ°μ΄ νλμ block1,2λ‘ λ¬ΆκΈ°λ νλ€.
class MyCNNClassifier(nn.Module):
def __init__(self, in_c, n_classes):
super().__init__()
self.conv_block1 = nn.Sequential(
nn.Conv2d(in_c, 32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU()
)
self.conv_block2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU()
)
self.decoder = nn.Sequential(
nn.Linear(32 * 28 * 28, 1024),
nn.Sigmoid(),
nn.Linear(1024, n_classes)
)
def forward(self, x):
x = self.conv_block1(x)
x = self.conv_block2(x)
x = x.view(x.size(0), -1) # flat
x = self.decoder(x)
return x
conv_block1,2κ° λ§€μ° μ μ¬ν¨μ μκΈ°ν΄λ³΄μ. μλμ ν¨μλ₯Ό λλ©΄
def conv_block(in_f, out_f, *args, **kwargs):
return nn.Sequential(
nn.Conv2d(in_f, out_f, *args, **kwargs),
nn.BatchNorm2d(out_f),
nn.ReLU()
)
μλ μ²λΌ ννλ κ°λ₯νλ€.
class MyCNNClassifier(nn.Module):
def __init__(self, in_c, n_classes):
super().__init__()
self.conv_block1 = conv_block(in_c, 32, kernel_size=3, padding=1)
self.conv_block2 = conv_block(32, 64, kernel_size=3, padding=1)
self.decoder = nn.Sequential(
nn.Linear(32 * 28 * 28, 1024),
nn.Sigmoid(),
nn.Linear(1024, n_classes)
)
def forward(self, x):
x = self.conv_block1(x)
x = self.conv_block2(x)
x = x.view(x.size(0), -1) # flat
x = self.decoder(x)
return x