eyupipler commited on
Commit
ec036cb
·
verified ·
1 Parent(s): 493e3f3

Update model.py

Browse files
Files changed (1) hide show
  1. model.py +1 -142
model.py CHANGED
@@ -1,144 +1,3 @@
1
  import torch
2
  import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- class Enhanced3DCNN(nn.Module):
6
- def __init__(self, num_classes=3, dropout_rate=0.5):
7
- super().__init__()
8
- self.conv1 = nn.Conv3d(1, 32, kernel_size=3, padding=1)
9
- self.bn1 = nn.BatchNorm3d(32)
10
- self.pool1 = nn.MaxPool3d(2)
11
-
12
- self.conv2 = nn.Conv3d(32, 64, kernel_size=3, padding=1)
13
- self.bn2 = nn.BatchNorm3d(64)
14
- self.pool2 = nn.MaxPool3d(2)
15
-
16
- self.conv3 = nn.Conv3d(64, 128, kernel_size=3, padding=1)
17
- self.bn3 = nn.BatchNorm3d(128)
18
- self.pool3 = nn.MaxPool3d(2)
19
-
20
- self.conv4 = nn.Conv3d(128, 256, kernel_size=3, padding=1)
21
- self.bn4 = nn.BatchNorm3d(256)
22
- self.pool4 = nn.AdaptiveAvgPool3d(1)
23
-
24
- self.dropout = nn.Dropout(dropout_rate)
25
- self.fc1 = nn.Linear(256, 128)
26
- self.fc2 = nn.Linear(128, num_classes)
27
-
28
- self._initialize_weights()
29
-
30
- def _initialize_weights(self):
31
- for m in self.modules():
32
- if isinstance(m, nn.Conv3d):
33
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
34
- if m.bias is not None:
35
- nn.init.constant_(m.bias, 0)
36
- elif isinstance(m, nn.BatchNorm3d):
37
- nn.init.constant_(m.weight, 1)
38
- nn.init.constant_(m.bias, 0)
39
- elif isinstance(m, nn.Linear):
40
- nn.init.normal_(m.weight, 0, 0.01)
41
- nn.init.constant_(m.bias, 0)
42
-
43
- def forward(self, x):
44
- x = self.pool1(F.relu(self.bn1(self.conv1(x))))
45
- x = self.pool2(F.relu(self.bn2(self.conv2(x))))
46
- x = self.pool3(F.relu(self.bn3(self.conv3(x))))
47
- x = self.pool4(F.relu(self.bn4(self.conv4(x))))
48
- x = x.view(x.size(0), -1)
49
- x = F.relu(self.fc1(self.dropout(x)))
50
- return self.fc2(x)
51
-
52
- class Enhanced3DCNNv7(nn.Module):
53
- def __init__(self, num_classes=3, dropout_rate=0.2):
54
- super().__init__()
55
- self.conv1 = nn.Conv3d(1, 32, kernel_size=2, padding=1)
56
- self.bn1 = nn.BatchNorm3d(32)
57
- self.pool1 = nn.MaxPool3d(2)
58
-
59
- self.conv2 = nn.Conv3d(32, 64, kernel_size=2, padding=1)
60
- self.bn2 = nn.BatchNorm3d(64)
61
- self.pool2 = nn.MaxPool3d(2)
62
-
63
- self.conv3 = nn.Conv3d(64, 128, kernel_size=2, padding=1)
64
- self.bn3 = nn.BatchNorm3d(128)
65
- self.pool3 = nn.MaxPool3d(2)
66
-
67
- self.conv4 = nn.Conv3d(128, 256, kernel_size=2, padding=1)
68
- self.bn4 = nn.BatchNorm3d(256)
69
- self.pool4 = nn.AdaptiveAvgPool3d(1)
70
-
71
- self.dropout = nn.Dropout(dropout_rate)
72
- self.fc1 = nn.Linear(256, 128)
73
- self.fc2 = nn.Linear(128, num_classes)
74
-
75
- self._initialize_weights()
76
-
77
- def _initialize_weights(self):
78
- for m in self.modules():
79
- if isinstance(m, nn.Conv3d):
80
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
81
- if m.bias is not None:
82
- nn.init.constant_(m.bias, 0)
83
- elif isinstance(m, nn.BatchNorm3d):
84
- nn.init.constant_(m.weight, 1)
85
- nn.init.constant_(m.bias, 0)
86
- elif isinstance(m, nn.Linear):
87
- nn.init.normal_(m.weight, 0, 0.01)
88
- nn.init.constant_(m.bias, 0)
89
-
90
- def forward(self, x):
91
- x = self.pool1(F.relu(self.bn1(self.conv1(x))))
92
- x = self.pool2(F.relu(self.bn2(self.conv2(x))))
93
- x = self.pool3(F.relu(self.bn3(self.conv3(x))))
94
- x = self.pool4(F.relu(self.bn4(self.conv4(x))))
95
- x = x.view(x.size(0), -1)
96
- x = F.relu(self.fc1(self.dropout(x)))
97
- return self.fc2(x)
98
-
99
- class Enhanced3DCNNv8(nn.Module):
100
- def __init__(self, num_classes=3, dropout_rate=0.25):
101
- super().__init__()
102
- self.conv1 = nn.Conv3d(1, 32, kernel_size=1, padding=1)
103
- self.bn1 = nn.BatchNorm3d(32)
104
- self.pool1 = nn.MaxPool3d(2)
105
-
106
- self.conv2 = nn.Conv3d(32, 64, kernel_size=1, padding=1)
107
- self.bn2 = nn.BatchNorm3d(64)
108
- self.pool2 = nn.MaxPool3d(2)
109
-
110
- self.conv3 = nn.Conv3d(64, 128, kernel_size=1, padding=1)
111
- self.bn3 = nn.BatchNorm3d(128)
112
- self.pool3 = nn.MaxPool3d(2)
113
-
114
- self.conv4 = nn.Conv3d(128, 256, kernel_size=1, padding=1)
115
- self.bn4 = nn.BatchNorm3d(256)
116
- self.pool4 = nn.AdaptiveAvgPool3d(1)
117
-
118
- self.dropout = nn.Dropout(dropout_rate)
119
- self.fc1 = nn.Linear(256, 128)
120
- self.fc2 = nn.Linear(128, num_classes)
121
-
122
- self._initialize_weights()
123
-
124
- def _initialize_weights(self):
125
- for m in self.modules():
126
- if isinstance(m, nn.Conv3d):
127
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
128
- if m.bias is not None:
129
- nn.init.constant_(m.bias, 0)
130
- elif isinstance(m, nn.BatchNorm3d):
131
- nn.init.constant_(m.weight, 1)
132
- nn.init.constant_(m.bias, 0)
133
- elif isinstance(m, nn.Linear):
134
- nn.init.normal_(m.weight, 0, 0.01)
135
- nn.init.constant_(m.bias, 0)
136
-
137
- def forward(self, x):
138
- x = self.pool1(F.relu(self.bn1(self.conv1(x))))
139
- x = self.pool2(F.relu(self.bn2(self.conv2(x))))
140
- x = self.pool3(F.relu(self.bn3(self.conv3(x))))
141
- x = self.pool4(F.relu(self.bn4(self.conv4(x))))
142
- x = x.view(x.size(0), -1)
143
- x = F.relu(self.fc1(self.dropout(x)))
144
- return self.fc2(x)
 
1
  import torch
2
  import torch.nn as nn
3
+ import torch.nn.functional as F