CNN套路

  • 加载库,加载数据
  • 划分training data and testing data
  • 定义超参数
  • 构建网络架构
  • 选择优化器 和 损失函数
  • 训练网络
  • Evaluation and Visualization

构建CNN网络架构

class Classifier(nn.Module):
    """Convnet Classifier"""
    def __init__(self):
        super(Classifier, self).__init__()
        self.conv = nn.Sequential(
            # Layer 1
            nn.Conv2d(in_channels=1, out_channels=16, 
            kernel_size=(3, 3), padding=1), #in_channel 是Input 的高度,RGB,
                                            #out_channel 是输出的高度,即特征数,要与下一层的in_channel相等
                                            #kernel_size 是卷积核的大小
                                            #stride 卷积核移动步子
                                            #padding 在外图补上0,防止输出图像越来越小。

            nn.Dropout(p=0.5),              #以伯努利分布概率为p,在input的元素上填0.
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2, 2), stride=2),  #MaxPool 的kernal_size代表 即长/2,宽/2,

            # Layer 2
            nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3), padding=1),
            nn.Dropout(p=0.5),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2, 2), stride=2),

            # Layer 3
            nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), padding=1),
            nn.Dropout(p=0.5),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2, 2), stride=2),

            # Layer 4
            nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), padding=1),
            nn.Dropout(p=0.5),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=(2, 2), stride=2)
        )
        # Logistic Regression
        self.clf = nn.Linear(128, 10)

    def forward(self, x):
        return self.clf(self.conv(x).squeeze())

results matching ""

    No results matching ""