File size: 2,842 Bytes
05101d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import torch
import torchvision

from torch import nn

class TinyCNN(nn.Module):

  def __init__(self, input_shape: int, hidden_units: int, output_shape: int) -> None:
      super().__init__()
      self.conv_block_1 = nn.Sequential(
        nn.Conv2d(in_channels=input_shape,
                out_channels=hidden_units,
                kernel_size=3, # how big is the square that's going over the image?
                stride=1, # default
                padding=1), # options = "valid" (no padding) or "same" (output has same shape as input) or int for specific number
        nn.BatchNorm2d(hidden_units),
        nn.ReLU(),
        # nn.Conv2d(in_channels=hidden_units,
        #         out_channels=128,
        #         kernel_size=3,
        #         stride=1,
        #         padding=0),
        # nn.BatchNorm2d(128),
        # nn.ReLU(),
        nn.MaxPool2d(kernel_size=2,
                        stride=2), # default stride value is same as kernel_size
        nn.Dropout(p=0.25)
      )
      self.conv_block_2 = nn.Sequential(
          nn.Conv2d(hidden_units, 128, kernel_size=3, padding=1),
          # nn.ReLU(),
          # nn.Conv2d(128, 128, kernel_size=3, padding=0),
          nn.BatchNorm2d(128),
          nn.ReLU(),
          nn.MaxPool2d(2),
          nn.Dropout(p=0.25)
      )

      self.conv_block_3 = nn.Sequential(
          nn.Conv2d(128, 512, kernel_size=3, padding=1),
          # nn.ReLU(),
          # nn.Conv2d(128, 512, kernel_size=3, padding=0),
          nn.BatchNorm2d(512),
          nn.ReLU(),
          nn.MaxPool2d(2),
          nn.Dropout(p=0.25)
      )

      self.conv_block_4 = nn.Sequential(
          nn.Conv2d(512, 512, kernel_size=3, padding=1),
        #   nn.ReLU(),
        #   nn.Conv2d(512, 512, kernel_size=3, padding=2),
          nn.BatchNorm2d(512),
          nn.ReLU(),
          nn.MaxPool2d(2),
          nn.Dropout(p=0.25)
      )

      self.fc_1 = nn.Sequential(
          nn.Flatten(),
          nn.Linear(in_features=256*392, out_features = 256),
          nn.BatchNorm1d(256),
          nn.ReLU(),
          nn.Dropout(p=0.25)
      )

      self.fc_2 = nn.Sequential(

          # Where did this in_features shape come from?
          # It's because each layer of our network compresses and changes the shape of our inputs data.
          nn.Linear(in_features=256,
                    out_features=512),
          nn.BatchNorm1d(512),
          nn.ReLU(),
          nn.Dropout(p=0.25)
      )

      self.classifier = nn.Sequential(
          nn.Linear(in_features=512,
                  out_features=output_shape)
      )

  def forward(self, x):
      x = self.conv_block_1(x)
      x = self.conv_block_2(x)
      x = self.conv_block_3(x)
      x = self.conv_block_4(x)
      x = self.fc_1(x)
      x = self.fc_2(x)
      x = self.classifier(x)
      return x