Skip to content

Commit 7a92b51

Browse files
XuehaiPanpytorchmergebot
authored andcommitted
[BE][2/16] fix typos in torch/ (torch/_*/) (#156312)
Pull Request resolved: #156312 Approved by: https://github.com/albanD
1 parent 8b97e4d commit 7a92b51

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

68 files changed

+121
-122
lines changed

.lintrunner.toml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1169,7 +1169,6 @@ exclude_patterns = [
11691169
'aten/src/ATen/[a-mA-M]*/**',
11701170
'test/**',
11711171
'test/[a-hA-h]*/**',
1172-
'torch/_*/**',
11731172
'torch/distributed/tensor/**',
11741173
]
11751174
init_command = [

test/cpp/jit/test_custom_class_registrations.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -371,12 +371,12 @@ struct ElementwiseInterpreter : torch::CustomClassHolder {
371371
return environment.at(*output_name_);
372372
}
373373

374-
// Ser/De infrastructure. See
374+
// SerDe infrastructure. See
375375
// https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html#defining-serialization-deserialization-methods-for-custom-c-classes
376376
// for more info.
377377

378378
// This is the type we will use to marshall information on disk during
379-
// ser/de. It is a simple tuple composed of primitive types and simple
379+
// SerDe. It is a simple tuple composed of primitive types and simple
380380
// collection types like vector, optional, and dict.
381381
using SerializationType = std::tuple<
382382
std::vector<std::string> /*input_names_*/,

test/export/test_export.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -487,7 +487,7 @@ def _is_tensor_leaf(x):
487487
eps = [ep]
488488
if test_serdes:
489489
# test dynamic shapes serialization
490-
# test that behavior remains the same when exporting with ser/des specs:
490+
# test that behavior remains the same when exporting with SerDes specs:
491491
# serialize + deserialize original specs, and export.
492492
ep_serdes = export(
493493
model,
@@ -5011,7 +5011,7 @@ def forward(self, x):
50115011
# There should be nonzero view nodes in the graph
50125012
self.assertTrue(view_count > 0)
50135013

5014-
@testing.expectedFailureCppSerDes # cpp ser/der not handling complicated symbols
5014+
@testing.expectedFailureCppSerDes # cpp SerDer not handling complicated symbols
50155015
def test_solver_unsupported_sympy_function(self):
50165016
# repro of https://github.com/pytorch/pytorch/issues/131897
50175017

test/test_fx.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -954,7 +954,7 @@ def __init__(self, interpreter):
954954
script_out = scripted_lowered(x)
955955
torch.testing.assert_close(script_out, ref_out)
956956

957-
# Test TorchScript ser/de
957+
# Test TorchScript SerDe
958958
import_copy = self.getExportImportCopy(scripted_lowered)
959959
imported_out = import_copy(x)
960960
torch.testing.assert_close(imported_out, ref_out)

test/test_unary_ufuncs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1104,7 +1104,7 @@ def test_silu_complex(self, device, dtype):
11041104
self.assertEqual(res.real, out.real, atol=atol, rtol=rtol)
11051105
self.assertEqual(res.imag, out.imag, atol=atol, rtol=rtol)
11061106

1107-
# It is not obvious how to merge this into OpInfo becuase these inputs
1107+
# It is not obvious how to merge this into OpInfo because these inputs
11081108
# succeed for gradcheck but are expected to fail for gradgradcheck
11091109
@dtypes(torch.double)
11101110
def test_sinc(self, device, dtype):

torch/_decomp/decompositions.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ class Reduction(Enum):
5252

5353

5454
# This wraps a decomposition and performs various type promotion logic within it, depending on the strategy provided
55-
# We're currently re-using ELEMENTWISE_TYPE_PROMOTION_KIND, although some of the usages are on non-elementwise ops
55+
# We're currently reusing ELEMENTWISE_TYPE_PROMOTION_KIND, although some of the usages are on non-elementwise ops
5656
# Will need to validate the non-elementwise uses
5757
def type_casts(
5858
f: Callable,
@@ -947,7 +947,7 @@ def check_positive(param, param_name, strict=True):
947947
)
948948
torch._check(
949949
all(c > 0 for c in output_size),
950-
lambda: f"Given an input with spacial size {tuple(shape[-2:])}, "
950+
lambda: f"Given an input with spatial size {tuple(shape[-2:])}, "
951951
f"kernel_size={kernel_size}, dilation={dilation}, "
952952
f"padding={padding}, stride={stride}, "
953953
"the calculated shape of the array of sliding blocks "
@@ -4046,7 +4046,7 @@ def nll_loss2d_forward(
40464046
return _nll_loss_forward(self, target, weight, reduction, ignore_index)
40474047

40484048

4049-
# These are adapted from aten/src/ATen/native/UpSample.h, wich is based on
4049+
# These are adapted from aten/src/ATen/native/UpSample.h, which is based on
40504050
# https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm
40514051
def _upsample_cubic_convolution1(x: Tensor, A: float) -> Tensor:
40524052
return ((A + 2) * x - (A + 3)) * x * x + 1

torch/_dynamo/convert_frame.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1063,7 +1063,7 @@ def format_func_info(code: CodeType) -> str:
10631063
return f"'{code.co_name}' ({code.co_filename}:{code.co_firstlineno})"
10641064

10651065
# NS: Don't add period at the end of string, as it'll be added to URL
1066-
# renderring it incorrect
1066+
# rendering it incorrect
10671067
log.warning(
10681068
"torch._dynamo hit config.%s (%s)\n"
10691069
" function: %s\n"

torch/_dynamo/output_graph.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -347,7 +347,7 @@ class StackLocalsMetadata:
347347

348348
def get_builtins_dict(global_scope):
349349
# f_globals["__builtins__"] can be a dict or a module. This is an
350-
# implemenation detail -
350+
# implementation detail -
351351
# https://docs.python.org/3/library/builtins.html.
352352

353353
# This makes guarding on any builtin messy because the guard check_fn

torch/_dynamo/variables/builder.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1662,13 +1662,13 @@ def wrap_listlike(self, value: Union[tuple, list, odict_values, NamedTuple]):
16621662
# <==> variable tracker" 1-to-1 mapping, which is mainly handled via
16631663
# `side_effects`. Note that constructing `tensor_variable` above
16641664
# already adds it to graph arg, but we never registered it with
1665-
# `side_effects`. The pre-emptive `realize` calls here basically
1665+
# `side_effects`. The preemptive `realize` calls here basically
16661666
# does that registration (at the end of `self.__call__`).
16671667
#
16681668
# A slightly cleaner alternative is to register the
16691669
# `tensor_variable`s above with `side_effects` directly, and just
16701670
# return the `list_variable`, but that breaks some tensor-subclass
1671-
# releated tests like `test_inputs_aliasing_bytecode_stack_restore`,
1671+
# related tests like `test_inputs_aliasing_bytecode_stack_restore`,
16721672
# because `tensor_variable` is constructed via
16731673
# `handle_traced_output`, which doesn't really expect/handle tensor
16741674
# subclass.

torch/_export/converter.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def execute_subgraph_from_prim_loop(
134134
):
135135
"""
136136
subgraph: GraphModule from sub-block.
137-
iter_idx: The index of interation.
137+
iter_idx: The index of interaction.
138138
len_loop_local_arguments: The number of loop local arguments in args.
139139
"""
140140

@@ -810,7 +810,7 @@ def convert_call_function_op(self, node: torch._C.Node):
810810

811811
fx_node = self.fx_graph.call_function(target, args, kwargs)
812812

813-
# TODO: covnert sourceRange() into stack_trace
813+
# TODO: convert sourceRange() into stack_trace
814814
# fx_node.meta["stack_trace"] = node.sourceRange()
815815

816816
if node.outputsSize() == 1:
@@ -883,7 +883,7 @@ def convert_aten_Int(self, node: torch._C.Node):
883883
torch.ops.aten._local_scalar_dense.default, (to_copy_node,)
884884
)
885885

886-
# TODO: covnert sourceRange() into stack_trace
886+
# TODO: convert sourceRange() into stack_trace
887887
# fx_node.meta["stack_trace"] = node.sourceRange()
888888

889889
output_name = node.output().debugName()
@@ -942,7 +942,7 @@ def convert_aten_div(self, node: torch._C.Node):
942942
kwargs,
943943
)
944944

945-
# TODO: covnert sourceRange() into stack_trace
945+
# TODO: convert sourceRange() into stack_trace
946946
# fx_node.meta["stack_trace"] = node.sourceRange()
947947

948948
output_name = node.output().debugName()
@@ -1006,7 +1006,7 @@ def convert_aten_add(self, node: torch._C.Node):
10061006
):
10071007
target = torch.ops.aten.add.t
10081008
else:
1009-
raise RuntimeError(f"unable to determind the target for {node}")
1009+
raise RuntimeError(f"unable to determined the target for {node}")
10101010
else:
10111011
target = get_op_overload(node)
10121012

@@ -1565,7 +1565,7 @@ def lift_get_attr(self):
15651565
#
15661566
# This function should happen in TS2EPConverter instead of
15671567
# TS2FXGraphConverter since it gets attributes from self.ts_model
1568-
# which is not accessable in TS2FXGraphConverter. It is similar to where
1568+
# which is not accessible in TS2FXGraphConverter. It is similar to where
15691569
# we collect self.name_to_param and self.name_to_buffer.
15701570
name_to_attribute_fqn: dict[str, str] = {}
15711571

0 commit comments

Comments
 (0)