Skip to content

Commit b602ea9

Browse files
Revert "[inductor] turn on windows inductor UTs (#160161)"
This reverts commit 4416433. Reverted #160161 on behalf of https://github.com/xuhancn due to auto merged with two related issue ([comment](#160161 (comment)))
1 parent 4416433 commit b602ea9

File tree

5 files changed

+10
-18
lines changed

5 files changed

+10
-18
lines changed

.github/workflows/trunk.yml

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -123,11 +123,9 @@ jobs:
123123
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
124124
test-matrix: |
125125
{ include: [
126-
{ config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
127-
{ config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
128-
{ config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
129-
{ config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
130-
{ config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
126+
{ config: "default", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
127+
{ config: "default", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
128+
{ config: "default", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
131129
]}
132130
secrets: inherit
133131

test/dynamo/test_decorators.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
import torch._dynamo.testing
1111
from torch._dynamo.exc import IncorrectUsage, Unsupported
1212
from torch._dynamo.utils import counters
13-
from torch.testing._internal.common_utils import skipIfWindows
1413

1514

1615
def my_custom_function(x):
@@ -893,9 +892,6 @@ def gn(x):
893892
self.assertEqual(gn(inp), inp + 3)
894893
self.assertEqual(cnts.frame_count, 1)
895894

896-
@skipIfWindows(
897-
msg="TODO: (xuhancn), confirm if torch.compiler.disable work on Windows."
898-
)
899895
def test_disable_recursive_false(self):
900896
def fn2(x):
901897
return x + 1

test/dynamo/test_logging.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,10 +21,8 @@
2121
from torch.testing._internal.common_cuda import SM90OrLater
2222
from torch.testing._internal.common_utils import (
2323
find_free_port,
24-
IS_WINDOWS,
2524
munge_exc,
2625
skipIfTorchDynamo,
27-
skipIfWindows,
2826
TEST_XPU,
2927
xfailIf,
3028
)
@@ -530,7 +528,7 @@ def test_invalid_artifact_flag_error_msg(self):
530528
"import torch",
531529
env=env,
532530
)
533-
lines = stderr.decode().split("\r\n" if IS_WINDOWS else "\n")
531+
lines = stderr.decode().split("\n")
534532
# This is a sanity assert that our error is not spammy.
535533
# As of this test creation this was 18.
536534
# See this issue for the purpose o this test:
@@ -546,7 +544,6 @@ def test_invalid_artifact_flag_error_msg(self):
546544
self.assertEqual(lines[-4], "Valid settings:")
547545

548546
@requires_distributed()
549-
@skipIfWindows(msg="TODO: (xuhancn), Can't reproduce locally")
550547
def test_distributed_rank_logging(self):
551548
env = dict(os.environ)
552549
env["TORCH_LOGS"] = "dynamo"

test/inductor/test_cpu_select_algorithm.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626
)
2727
from torch.testing._internal.common_utils import (
2828
IS_MACOS,
29-
IS_WINDOWS,
3029
parametrize,
3130
skipIfWindows,
3231
TEST_MKL,
@@ -3095,5 +3094,5 @@ def forward(self, x, weight):
30953094
if __name__ == "__main__":
30963095
from torch.testing._internal.inductor_utils import HAS_CPU
30973096

3098-
if HAS_CPU and not (IS_MACOS or IS_WINDOWS):
3097+
if HAS_CPU and not IS_MACOS:
30993098
run_tests()

torch/_dynamo/test_case.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,11 @@ def run_tests(needs: Union[str, tuple[str, ...]] = ()) -> None:
4141
if TEST_WITH_TORCHDYNAMO or TEST_WITH_CROSSREF:
4242
return # skip testing
4343

44-
# Enable Inductor UTs on Windows for CPU.
45-
# CUDA on Windows is not verified, NVDA developer can continue to enable CUDA based on CPU path.
46-
if torch.cuda.is_available() and IS_WINDOWS:
44+
if (
45+
not torch.xpu.is_available()
46+
and IS_WINDOWS
47+
and os.environ.get("TORCHINDUCTOR_WINDOWS_TESTS", "0") == "0"
48+
):
4749
return
4850

4951
if isinstance(needs, str):

0 commit comments

Comments
 (0)