Skip to content

Commit 49a459e

Browse files
committed
Merge remote-tracking branch 'upstream/main' into vit_siglip_and_reg
2 parents a58f916 + 68b2824 commit 49a459e

17 files changed

+48
-50
lines changed

.github/workflows/tests.yml

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,12 @@ jobs:
1616
strategy:
1717
matrix:
1818
os: [ubuntu-latest]
19-
python: ['3.10']
20-
torch: ['1.13.0']
21-
torchvision: ['0.14.0']
19+
python: ['3.10', '3.11']
20+
torch: [{base: '1.13.0', vision: '0.14.0'}, {base: '2.1.0', vision: '0.16.0'}]
2221
testmarker: ['-k "not test_models"', '-m base', '-m cfg', '-m torchscript', '-m features', '-m fxforward', '-m fxbackward']
22+
exclude:
23+
- python: '3.11'
24+
torch: {base: '1.13.0', vision: '0.14.0'}
2325
runs-on: ${{ matrix.os }}
2426

2527
steps:
@@ -34,17 +36,17 @@ jobs:
3436
pip install -r requirements-dev.txt
3537
- name: Install torch on mac
3638
if: startsWith(matrix.os, 'macOS')
37-
run: pip install --no-cache-dir torch==${{ matrix.torch }} torchvision==${{ matrix.torchvision }}
39+
run: pip install --no-cache-dir torch==${{ matrix.torch.base }} torchvision==${{ matrix.torch.vision }}
3840
- name: Install torch on Windows
3941
if: startsWith(matrix.os, 'windows')
40-
run: pip install --no-cache-dir torch==${{ matrix.torch }} torchvision==${{ matrix.torchvision }}
42+
run: pip install --no-cache-dir torch==${{ matrix.torch.base }} torchvision==${{ matrix.torch.vision }}
4143
- name: Install torch on ubuntu
4244
if: startsWith(matrix.os, 'ubuntu')
4345
run: |
4446
sudo sed -i 's/azure\.//' /etc/apt/sources.list
4547
sudo apt update
4648
sudo apt install -y google-perftools
47-
pip install --no-cache-dir torch==${{ matrix.torch }}+cpu torchvision==${{ matrix.torchvision }}+cpu -f https://download.pytorch.org/whl/torch_stable.html
49+
pip install --no-cache-dir torch==${{ matrix.torch.base }}+cpu torchvision==${{ matrix.torch.vision }}+cpu -f https://download.pytorch.org/whl/torch_stable.html
4850
- name: Install requirements
4951
run: |
5052
pip install -r requirements.txt

tests/test_optim.py

Lines changed: 18 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
import torch
1212
from torch.testing._internal.common_utils import TestCase
13-
from torch.autograd import Variable
13+
from torch.nn import Parameter
1414
from timm.scheduler import PlateauLRScheduler
1515

1616
from timm.optim import create_optimizer_v2
@@ -21,9 +21,9 @@
2121

2222

2323
def _test_basic_cases_template(weight, bias, input, constructor, scheduler_constructors):
24-
weight = Variable(weight, requires_grad=True)
25-
bias = Variable(bias, requires_grad=True)
26-
input = Variable(input)
24+
weight = Parameter(weight)
25+
bias = Parameter(bias)
26+
input = Parameter(input)
2727
optimizer = constructor(weight, bias)
2828
schedulers = []
2929
for scheduler_constructor in scheduler_constructors:
@@ -55,9 +55,9 @@ def fn():
5555

5656

5757
def _test_state_dict(weight, bias, input, constructor):
58-
weight = Variable(weight, requires_grad=True)
59-
bias = Variable(bias, requires_grad=True)
60-
input = Variable(input)
58+
weight = Parameter(weight)
59+
bias = Parameter(bias)
60+
input = Parameter(input)
6161

6262
def fn_base(optimizer, weight, bias):
6363
optimizer.zero_grad()
@@ -73,8 +73,9 @@ def fn_base(optimizer, weight, bias):
7373
for _i in range(20):
7474
optimizer.step(fn)
7575
# Clone the weights and construct new optimizer for them
76-
weight_c = Variable(weight.data.clone(), requires_grad=True)
77-
bias_c = Variable(bias.data.clone(), requires_grad=True)
76+
with torch.no_grad():
77+
weight_c = Parameter(weight.clone().detach())
78+
bias_c = Parameter(bias.clone().detach())
7879
optimizer_c = constructor(weight_c, bias_c)
7980
fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c)
8081
# Load state dict
@@ -86,12 +87,8 @@ def fn_base(optimizer, weight, bias):
8687
for _i in range(20):
8788
optimizer.step(fn)
8889
optimizer_c.step(fn_c)
89-
#assert torch.equal(weight, weight_c)
90-
#assert torch.equal(bias, bias_c)
9190
torch_tc.assertEqual(weight, weight_c)
9291
torch_tc.assertEqual(bias, bias_c)
93-
# Make sure state dict wasn't modified
94-
torch_tc.assertEqual(state_dict, state_dict_c)
9592
# Make sure state dict is deterministic with equal but not identical parameters
9693
torch_tc.assertEqual(optimizer.state_dict(), optimizer_c.state_dict())
9794
# Make sure repeated parameters have identical representation in state dict
@@ -103,9 +100,10 @@ def fn_base(optimizer, weight, bias):
103100
if not torch.cuda.is_available():
104101
return
105102

106-
input_cuda = Variable(input.data.float().cuda())
107-
weight_cuda = Variable(weight.data.float().cuda(), requires_grad=True)
108-
bias_cuda = Variable(bias.data.float().cuda(), requires_grad=True)
103+
with torch.no_grad():
104+
input_cuda = Parameter(input.clone().detach().float().cuda())
105+
weight_cuda = Parameter(weight.clone().detach().cuda())
106+
bias_cuda = Parameter(bias.clone().detach().cuda())
109107
optimizer_cuda = constructor(weight_cuda, bias_cuda)
110108
fn_cuda = functools.partial(fn_base, optimizer_cuda, weight_cuda, bias_cuda)
111109

@@ -216,21 +214,21 @@ def _test_rosenbrock(constructor, scheduler_constructors=None):
216214
scheduler_constructors = []
217215
params_t = torch.tensor([1.5, 1.5])
218216

219-
params = Variable(params_t, requires_grad=True)
217+
params = Parameter(params_t)
220218
optimizer = constructor([params])
221219
schedulers = []
222220
for scheduler_constructor in scheduler_constructors:
223221
schedulers.append(scheduler_constructor(optimizer))
224222

225223
solution = torch.tensor([1, 1])
226-
initial_dist = params.data.dist(solution)
224+
initial_dist = params.clone().detach().dist(solution)
227225

228226
def eval(params, w):
229227
# Depending on w, provide only the x or y gradient
230228
optimizer.zero_grad()
231229
loss = rosenbrock(params)
232230
loss.backward()
233-
grad = drosenbrock(params.data)
231+
grad = drosenbrock(params.clone().detach())
234232
# NB: We torture test the optimizer by returning an
235233
# uncoalesced sparse tensor
236234
if w:
@@ -256,7 +254,7 @@ def eval(params, w):
256254
else:
257255
scheduler.step()
258256

259-
torch_tc.assertLessEqual(params.data.dist(solution), initial_dist)
257+
torch_tc.assertLessEqual(params.clone().detach().dist(solution), initial_dist)
260258

261259

262260
def _build_params_dict(weight, bias, **kwargs):

timm/layers/mlp.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -130,8 +130,6 @@ def __init__(
130130
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1])
131131
self.drop2 = nn.Dropout(drop_probs[1])
132132

133-
self.drop = nn.Dropout(drop)
134-
135133
def init_weights(self):
136134
# override init of fc1 w/ gate portion set to weight near zero, bias=1
137135
nn.init.ones_(self.fc1_g.bias)

timm/models/beit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None):
155155
x = F.scaled_dot_product_attention(
156156
q, k, v,
157157
attn_mask=rel_pos_bias,
158-
dropout_p=self.attn_drop.p,
158+
dropout_p=self.attn_drop.p if self.training else 0.,
159159
)
160160
else:
161161
q = q * self.scale

timm/models/cait.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def forward(self, x):
5050
if self.fused_attn:
5151
x_cls = torch.nn.functional.scaled_dot_product_attention(
5252
q, k, v,
53-
dropout_p=self.attn_drop.p,
53+
dropout_p=self.attn_drop.p if self.training else 0.,
5454
)
5555
else:
5656
q = q * self.scale

timm/models/eva.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def forward(
126126
x = F.scaled_dot_product_attention(
127127
q, k, v,
128128
attn_mask=attn_mask,
129-
dropout_p=self.attn_drop.p,
129+
dropout_p=self.attn_drop.p if self.training else 0.,
130130
)
131131
else:
132132
q = q * self.scale

timm/models/fastvit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -514,7 +514,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
514514
if self.fused_attn:
515515
x = torch.nn.functional.scaled_dot_product_attention(
516516
q, k, v,
517-
dropout_p=self.attn_drop.p,
517+
dropout_p=self.attn_drop.p if self.training else 0.,
518518
)
519519
else:
520520
q = q * self.scale

timm/models/maxxvit.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
190190
k.transpose(-1, -2).contiguous(),
191191
v.transpose(-1, -2).contiguous(),
192192
attn_mask=attn_bias,
193-
dropout_p=self.attn_drop.p,
193+
dropout_p=self.attn_drop.p if self.training else 0.,
194194
).transpose(-1, -2).reshape(B, -1, H, W)
195195
else:
196196
q = q * self.scale
@@ -259,7 +259,7 @@ def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
259259
x = torch.nn.functional.scaled_dot_product_attention(
260260
q, k, v,
261261
attn_mask=attn_bias,
262-
dropout_p=self.attn_drop.p,
262+
dropout_p=self.attn_drop.p if self.training else 0.,
263263
)
264264
else:
265265
q = q * self.scale

timm/models/metaformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ def forward(self, x):
198198
if self.fused_attn:
199199
x = F.scaled_dot_product_attention(
200200
q, k, v,
201-
dropout_p=self.attn_drop.p,
201+
dropout_p=self.attn_drop.p if self.training else 0.,
202202
)
203203
else:
204204
attn = (q @ k.transpose(-2, -1)) * self.scale

timm/models/nest.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,14 +59,14 @@ def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.)
5959
def forward(self, x):
6060
"""
6161
x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim)
62-
"""
62+
"""
6363
B, T, N, C = x.shape
6464
# result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head)
6565
qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5)
6666
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
6767

6868
if self.fused_attn:
69-
x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p)
69+
x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p if self.training else 0.)
7070
else:
7171
q = q * self.scale
7272
attn = q @ k.transpose(-2, -1) # (B, H, T, N, N)
@@ -330,7 +330,7 @@ def __init__(
330330
# Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the
331331
# number of blocks along edge of image
332332
self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0]))
333-
333+
334334
# Patch embedding
335335
self.patch_embed = PatchEmbed(
336336
img_size=img_size,

0 commit comments

Comments
 (0)