Skip to content

Commit bb6eb6e

Browse files
author
rraminen
committed
Removing skip conditions of test_conv_backend_cudnn tests on ROCm
1 parent e8af168 commit bb6eb6e

File tree

1 file changed

+11
-71
lines changed

1 file changed

+11
-71
lines changed

test/nn/test_convolution.py

Lines changed: 11 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -2440,17 +2440,17 @@ def test_conv_transpose_with_output_size_and_no_batch_dim(self, device, N):
24402440
decorators=[onlyCUDA, disablecuDNN],
24412441
name="cuda_depthwise3d",
24422442
),
2443-
# === cudnn ===
2443+
# === cudnn or miopen ===
24442444
subtest(
24452445
(
24462446
(2, 6, 7),
24472447
False,
24482448
False,
24492449
3,
24502450
torch.strided,
2451-
torch._C._ConvBackend.Cudnn,
2451+
torch._C._ConvBackend.Miopen if torch.version.hip else torch._C._ConvBackend.Cudnn,
24522452
),
2453-
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen],
2453+
decorators=[onlyCUDA, skipCUDAIfNoCudnn],
24542454
name="cudnn1d",
24552455
),
24562456
subtest(
@@ -2460,9 +2460,9 @@ def test_conv_transpose_with_output_size_and_no_batch_dim(self, device, N):
24602460
False,
24612461
3,
24622462
torch.strided,
2463-
torch._C._ConvBackend.Cudnn,
2463+
torch._C._ConvBackend.Miopen if torch.version.hip else torch._C._ConvBackend.Cudnn,
24642464
),
2465-
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen],
2465+
decorators=[onlyCUDA, skipCUDAIfNoCudnn],
24662466
name="cudnn2d",
24672467
),
24682468
subtest(
@@ -2472,9 +2472,9 @@ def test_conv_transpose_with_output_size_and_no_batch_dim(self, device, N):
24722472
False,
24732473
3,
24742474
torch.strided,
2475-
torch._C._ConvBackend.Cudnn,
2475+
torch._C._ConvBackend.Miopen if torch.version.hip else torch._C._ConvBackend.Cudnn,
24762476
),
2477-
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen],
2477+
decorators=[onlyCUDA, skipCUDAIfNoCudnn],
24782478
name="cudnn3d",
24792479
),
24802480
subtest(
@@ -2484,9 +2484,9 @@ def test_conv_transpose_with_output_size_and_no_batch_dim(self, device, N):
24842484
False,
24852485
3,
24862486
torch.strided,
2487-
torch._C._ConvBackend.CudnnTranspose,
2487+
torch._C._ConvBackend.MiopenTranspose if torch.version.hip else torch._C._ConvBackend.CudnnTranspose,
24882488
),
2489-
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen],
2489+
decorators=[onlyCUDA, skipCUDAIfNoCudnn],
24902490
name="cudnn1d_transposed",
24912491
),
24922492
subtest(
@@ -2496,75 +2496,15 @@ def test_conv_transpose_with_output_size_and_no_batch_dim(self, device, N):
24962496
False,
24972497
3,
24982498
torch.strided,
2499-
torch._C._ConvBackend.CudnnTranspose,
2499+
torch._C._ConvBackend.MiopenTranspose if torch.version.hip else torch._C._ConvBackend.CudnnTranspose,
25002500
),
2501-
decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen],
2501+
decorators=[onlyCUDA, skipCUDAIfNoCudnn],
25022502
name="cudnn2d_transposed",
25032503
),
25042504
# FIXME: RuntimeError: CUDA out of memory.
25052505
# subtest(((2, 6, 7, 8, 9), True, False, 3, torch.strided, torch._C._ConvBackend.CudnnTranspose),
25062506
# decorators=[onlyCUDA, skipCUDAIfNoCudnn, skipCUDAIfMiopen], name='cudnn3d_transposed'),
25072507
# === miopen ===
2508-
subtest(
2509-
(
2510-
(2, 6, 7),
2511-
False,
2512-
False,
2513-
3,
2514-
torch.strided,
2515-
torch._C._ConvBackend.Miopen,
2516-
),
2517-
decorators=[onlyCUDA, skipCUDAIfNoMiopen],
2518-
name="miopen1d",
2519-
),
2520-
subtest(
2521-
(
2522-
(2, 6, 7, 8),
2523-
False,
2524-
False,
2525-
3,
2526-
torch.strided,
2527-
torch._C._ConvBackend.Miopen,
2528-
),
2529-
decorators=[onlyCUDA, skipCUDAIfNoMiopen],
2530-
name="miopen2d",
2531-
),
2532-
subtest(
2533-
(
2534-
(2, 6, 7, 8, 9),
2535-
False,
2536-
False,
2537-
3,
2538-
torch.strided,
2539-
torch._C._ConvBackend.Miopen,
2540-
),
2541-
decorators=[onlyCUDA, skipCUDAIfNoMiopen],
2542-
name="miopen3d",
2543-
),
2544-
subtest(
2545-
(
2546-
(2, 6, 7),
2547-
True,
2548-
False,
2549-
3,
2550-
torch.strided,
2551-
torch._C._ConvBackend.MiopenTranspose,
2552-
),
2553-
decorators=[onlyCUDA, skipCUDAIfNoMiopen],
2554-
name="miopen1d_transposed",
2555-
),
2556-
subtest(
2557-
(
2558-
(2, 6, 7, 8),
2559-
True,
2560-
False,
2561-
3,
2562-
torch.strided,
2563-
torch._C._ConvBackend.MiopenTranspose,
2564-
),
2565-
decorators=[onlyCUDA, skipCUDAIfNoMiopen],
2566-
name="miopen2d_transposed",
2567-
),
25682508
subtest(
25692509
(
25702510
(2, 6, 7, 8, 9),

0 commit comments

Comments
 (0)