Skip to content

Commit 70d7638

Browse files
cyyeverSkylion007
authored andcommitted
Fix clang-tidy suppression in torch/csrc/jit (#152271)
Remove some clang-tidy suppression in torch/csrc/jit by applying fixes or refactoring. Pull Request resolved: #152271 Approved by: https://github.com/Skylion007, https://github.com/malfet Co-authored-by: Aaron Gokaslan <aaronGokaslan@gmail.com>
1 parent c02edba commit 70d7638

19 files changed

+55
-52
lines changed

torch/csrc/jit/backends/backend.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77

88
namespace torch::jit {
99
namespace {
10-
// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
1110
inline c10::FunctionSchema getIsAvailableSchema() {
1211
c10::Argument self("self", c10::AnyType::get());
1312
c10::Argument available("available", c10::BoolType::get());
@@ -21,7 +20,6 @@ inline c10::FunctionSchema getIsAvailableSchema() {
2120

2221
constexpr static auto kBackendsNamespace = "__backends__";
2322

24-
// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
2523
inline c10::FunctionSchema getCompileSchema() {
2624
c10::Argument self("self", c10::AnyType::get());
2725
c10::Argument mod("processed", c10::AnyType::get());
@@ -38,7 +36,6 @@ inline c10::FunctionSchema getCompileSchema() {
3836
return compile_schema;
3937
}
4038

41-
// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
4239
inline c10::FunctionSchema getExecuteSchema() {
4340
auto any_list_ty = c10::ListType::create(c10::AnyType::get());
4441
c10::Argument self("self", c10::AnyType::get());

torch/csrc/jit/backends/xnnpack/xnnpack_backend_lib.cpp

Lines changed: 10 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -7,15 +7,12 @@
77
#include <caffe2/torch/csrc/jit/backends/xnnpack/compiler/xnn_compiler.h>
88
#include <torch/csrc/jit/backends/xnnpack/serialization/schema_generated.h>
99

10-
namespace torch {
11-
namespace jit {
12-
namespace xnnpack {
13-
namespace delegate {
10+
namespace torch::jit::xnnpack::delegate {
1411

1512
class XNNModelWrapper : public CustomClassHolder {
1613
public:
1714
XNNExecutor executor_;
18-
XNNModelWrapper(XNNExecutor executor) : executor_(std::move(executor)){};
15+
XNNModelWrapper(XNNExecutor executor) : executor_(std::move(executor)) {}
1916

2017
XNNModelWrapper() = delete;
2118

@@ -25,9 +22,8 @@ class XNNModelWrapper : public CustomClassHolder {
2522
class XNNPackBackend : public PyTorchBackendInterface {
2623
public:
2724
// Constructor.
28-
// NOLINTNEXTLINE(modernize-use-equals-default)
29-
explicit XNNPackBackend() {}
30-
virtual ~XNNPackBackend() override = default;
25+
explicit XNNPackBackend() = default;
26+
~XNNPackBackend() override = default;
3127

3228
bool is_available() override {
3329
return xnn_status_success == xnn_initialize(/*allocator=*/nullptr);
@@ -81,17 +77,18 @@ class XNNPackBackend : public PyTorchBackendInterface {
8177
XNNExecutor& executor = model_wrapper->executor_;
8278

8379
std::vector<float*> input_pointers;
84-
for (int i = 0; i < inputs.size(); ++i) {
85-
at::IValue val = inputs.get(i);
80+
input_pointers.reserve(inputs.size());
81+
for (const at::IValue& val : inputs) {
8682
TORCH_CHECK(val.isTensor(), "Non-tensor inputs not supported");
8783
input_pointers.push_back(val.toTensor().data_ptr<float>());
8884
}
8985

9086
std::vector<at::Tensor> output_tensors;
9187
std::vector<float*> output_pointers;
9288
output_tensors.reserve(output_shapes.size());
93-
for (int i = 0; i < output_shapes.size(); i++) {
94-
auto o_shape = output_shapes.get(i).toIntVector();
89+
output_pointers.reserve(output_shapes.size());
90+
for (const at::IValue& val : output_shapes) {
91+
auto o_shape = val.toIntVector();
9592
auto output = at::empty(o_shape, c10::ScalarType::Float);
9693
output_tensors.push_back(output);
9794
output_pointers.push_back(output.data_ptr<float>());
@@ -112,7 +109,4 @@ constexpr auto backend_name = "xnnpack";
112109
static auto cls = torch::jit::backend<XNNPackBackend>(backend_name);
113110
} // namespace
114111

115-
} // namespace delegate
116-
} // namespace xnnpack
117-
} // namespace jit
118-
} // namespace torch
112+
} // namespace torch::jit::xnnpack::delegate

torch/csrc/jit/frontend/edit_distance.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,10 @@ size_t ComputeEditDistance(
1212
const char* word1,
1313
const char* word2,
1414
size_t maxEditDistance) {
15-
size_t m = strlen(word1);
16-
size_t n = strlen(word2);
15+
size_t m = std::strlen(word1);
16+
size_t n = std::strlen(word2);
1717

18-
const unsigned small_buffer_size = 64;
18+
constexpr unsigned small_buffer_size = 64;
1919
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
2020
unsigned small_buffer[small_buffer_size];
2121
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)

torch/csrc/jit/frontend/lexer.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
#include <c10/util/Exception.h>
44

5+
#include <cstring>
56
#include <string>
67
#include <unordered_map>
78

@@ -65,9 +66,10 @@ bool SharedParserData::isBinary(int kind, int* prec) {
6566
C10_EXPORT int stringToKind(const std::string& str) {
6667
static std::unordered_map<std::string, int> str_to_kind = []() {
6768
std::unordered_map<std::string, int> ret_str_to_kind;
68-
for (char tok : std::string(valid_single_char_tokens))
69-
// NOLINTNEXTLINE(bugprone-signed-char-misuse)
70-
ret_str_to_kind[std::string(1, tok)] = tok;
69+
ret_str_to_kind.reserve(std::strlen(valid_single_char_tokens));
70+
for (const char* tok = valid_single_char_tokens; *tok; tok++) {
71+
ret_str_to_kind[std::string(1, *tok)] = static_cast<unsigned char>(*tok);
72+
}
7173
#define DEFINE_CASE(tok, _, str) \
7274
if (std::string(str) != "") \
7375
ret_str_to_kind[str] = tok;
Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#pragma once
22

33
namespace torch::jit {
4-
static const char* valid_single_char_tokens = "+-*/%@()[]:,={}><.?!&^|~";
4+
static constexpr const char* valid_single_char_tokens =
5+
"+-*/%@()[]:,={}><.?!&^|~";
56
} // namespace torch::jit

torch/csrc/jit/mobile/type_parser.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,7 @@ static constexpr const char* kTypeTorchbindCustomClass =
1919
static constexpr const char* kTypeNamedTuple = "NamedTuple";
2020

2121
bool isSpecialChar(char a) {
22-
for (const char* c = valid_single_char_tokens; *c; c++) {
23-
if (a == *c)
24-
return true;
25-
}
26-
return false;
22+
return std::strchr(valid_single_char_tokens, a);
2723
}
2824
} // namespace
2925

torch/csrc/jit/passes/freeze_module.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -980,6 +980,7 @@ class AttributePropagator {
980980
std::unordered_map<ClassTypePtr, IValue::HashAliasedIValues>
981981
SharedTypeSubModules_;
982982

983+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
983984
Module& module_;
984985

985986
// Allow to freeze modules containing interfaces.

torch/csrc/jit/python/init.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1638,7 +1638,7 @@ void initJITBindings(PyObject* module) {
16381638
"get_record_offset_no_read",
16391639
[](PyTorchStreamReader& self,
16401640
size_t zipfile_header_offset,
1641-
const std::string filename,
1641+
const std::string& filename,
16421642
size_t size,
16431643
uint64_t storage_alignment) {
16441644
return self.getRecordOffsetNoRead(
@@ -1748,7 +1748,7 @@ void initJITBindings(PyObject* module) {
17481748

17491749
m.def(
17501750
"_jit_resolve_packet",
1751-
[](const char* op_name, py::args args, const py::kwargs& kwargs) {
1751+
[](const char* op_name, const py::args& args, const py::kwargs& kwargs) {
17521752
try {
17531753
auto symbol = Symbol::fromQualString(op_name);
17541754
bool allow_numbers_as_tensors = opAllowsNumbersAsTensors(symbol);
@@ -2140,7 +2140,7 @@ void initJITBindings(PyObject* module) {
21402140
return py::make_tuple();
21412141
},
21422142
/* __setstate__ */
2143-
[](const py::tuple& /* unused */) { // NOLINT
2143+
[](const py::tuple& /* unused */) {
21442144
TORCH_CHECK(false, "Can not unpickle torch.futures.Future");
21452145
// Note that this return has no meaning since we always
21462146
// throw, it's only here to satisfy PyBind's API
@@ -2177,7 +2177,7 @@ void initJITBindings(PyObject* module) {
21772177
return py::make_tuple();
21782178
},
21792179
/* __setstate__ */
2180-
[](const py::tuple& /* unused */) { // NOLINT
2180+
[](const py::tuple& /* unused */) {
21812181
TORCH_CHECK(false, "Can not unpickle torch.jit._Await");
21822182
// Note that this return has no meaning since we always
21832183
// throw, it's only here to satisfy PyBind's API

torch/csrc/jit/runtime/argument_spec.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ struct CompleteArgumentInfo;
241241
struct CompleteArgumentSpec {
242242
CompleteArgumentSpec(bool with_grad, at::ArrayRef<IValue> inputs)
243243
: ninputs(inputs.size()) {
244-
int32_t all_dims = 0;
244+
int64_t all_dims = 0;
245245
const auto num_inputs = inputs.size();
246246
for (const auto i : c10::irange(num_inputs)) {
247247
if (!inputs[i].isTensor())

torch/csrc/jit/runtime/instruction.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ static_assert(
4444
"Instructions should be 8 bytes");
4545
std::ostream& operator<<(std::ostream& out, Instruction inst) {
4646
// TODO: use op info to print out the op in a more user-friendly way
47-
int nargs = std::strlen(OpInfo(inst.op));
47+
auto nargs = std::strlen(OpInfo(inst.op));
4848
out << inst.op;
4949
if (nargs > 0) {
5050
out << " " << inst.X;

0 commit comments

Comments
 (0)