Skip to content

Commit f7fe8d6

Browse files
committed
fix bug (match channel)
1 parent d8bf72b commit f7fe8d6

File tree

2 files changed

+13
-13
lines changed

2 files changed

+13
-13
lines changed

models/modules.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -83,14 +83,14 @@ def forward(self, x):
8383

8484
return out
8585

86-
# Softmax Attention
87-
model = SoftmaxAttention(3).cuda()
88-
x = torch.randn(2,3,32,32)
89-
out = model(Variable(x.cuda()))
90-
print(out.size())
91-
92-
# Sigmoid Attention
93-
model = SigmoidAttention(3).cuda()
94-
x = torch.randn(2,3,32,32)
95-
out = model(Variable(x.cuda()))
96-
print(out.size())
86+
# # Softmax Attention
87+
# model = SoftmaxAttention(3).cuda()
88+
# x = torch.randn(2,3,32,32)
89+
# out = model(Variable(x.cuda()))
90+
# print(out.size())
91+
92+
# # Sigmoid Attention
93+
# model = SigmoidAttention(3).cuda()
94+
# x = torch.randn(2,3,32,32)
95+
# out = model(Variable(x.cuda()))
96+
# print(out.size())

models/ressoftattnet.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@ def __init__(self, block, layers, num_classes=1000):
2626
self.layer1 = self._make_layer(block, 16, layers[0])
2727
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
2828
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
29-
self.att = SoftmaxAttention(64)
30-
self.bn = nn.BatchNorm2d(64)
29+
self.att = SoftmaxAttention(64 * block.expansion)
30+
self.bn = nn.BatchNorm2d(64 * block.expansion)
3131
self.avgpool = nn.AvgPool2d(8)
3232
self.fc = nn.Linear(64 * block.expansion, num_classes)
3333

0 commit comments

Comments
 (0)