@@ -191,7 +191,7 @@ void UpdateTorchValueByOnnxValueInfo(
191
191
}
192
192
}
193
193
194
- bool IsValidONNXControlflowNode (const Node* n) {
194
+ static bool IsValidONNXControlflowNode (const Node* n) {
195
195
// Skip when block size is zero. This is when the node is being created,
196
196
// and doesn't have subblocks attached yet. Run shape inference for these
197
197
// nodes later, when the subgraph has already completed shape inferencing.
@@ -205,7 +205,7 @@ bool IsValidONNXControlflowNode(const Node* n) {
205
205
return true ;
206
206
}
207
207
208
- bool IsValidONNXNode (const Node* n) {
208
+ static bool IsValidONNXNode (const Node* n) {
209
209
auto node_kind = n->kind ();
210
210
211
211
if (!node_kind.is_onnx ()) {
@@ -282,7 +282,7 @@ Value* CloneValueFromListConstruct(
282
282
auto input = n_graph->addInput ();
283
283
if (scalar_type) {
284
284
auto v_type = TensorType::create (
285
- scalar_type. value () ,
285
+ scalar_type,
286
286
at::kCPU ,
287
287
c10::SymbolicShape (),
288
288
c10::VaryingShape<c10::Stride>{},
@@ -1244,7 +1244,7 @@ void ProcessUnsqueezeNode(Node* n) {
1244
1244
void ComputeConstant (Node* n, int opset_version) {
1245
1245
if (n->kind () == ::c10::onnx::Constant) {
1246
1246
if (n->kindOf (attr::value) == AttributeKind::t) {
1247
- at::Tensor const_val = n->t (attr::value);
1247
+ const at::Tensor& const_val = n->t (attr::value);
1248
1248
at::Tensor const_val_copy =
1249
1249
at::empty (const_val.sizes (), const_val.options ());
1250
1250
const_val_copy.copy_ (const_val);
@@ -1381,7 +1381,7 @@ void ComputeConstant(Node* n, int opset_version) {
1381
1381
.value ()
1382
1382
.sizes ();
1383
1383
if (input0_shape_size.has_value ()) {
1384
- auto input0_shape_value = input0_shape_size.value ();
1384
+ const auto & input0_shape_value = input0_shape_size.value ();
1385
1385
if (ConstantValueMap::HasValue (n->input (1 )->debugName ())) {
1386
1386
// When value of `shape` is statically known,
1387
1387
// output shape can be computed.
@@ -1474,7 +1474,7 @@ void ComputeConstant(Node* n, int opset_version) {
1474
1474
.value ()
1475
1475
.sizes ();
1476
1476
if (input0_shape_size.has_value ()) {
1477
- auto input0_shape_value = input0_shape_size.value ();
1477
+ const auto & input0_shape_value = input0_shape_size.value ();
1478
1478
int64_t total_size = 1 ;
1479
1479
auto is_full_static = true ;
1480
1480
for (const auto i : c10::irange (input0_shape_value.size ())) {
@@ -1510,7 +1510,7 @@ void ComputeConstant(Node* n, int opset_version) {
1510
1510
.value ()
1511
1511
.sizes ();
1512
1512
if (input0_shape_size.has_value ()) {
1513
- auto input0_shape_value = input0_shape_size.value ();
1513
+ const auto & input0_shape_value = input0_shape_size.value ();
1514
1514
if (ConstantValueMap::HasValue (n->input (1 )->debugName ())) {
1515
1515
auto shape_temp = ConstantValueMap::GetValueInto1DInt64Vector (
1516
1516
n->input (1 )->debugName ());
@@ -1659,10 +1659,10 @@ void SpecialPostProcess(Node* n) {
1659
1659
};
1660
1660
1661
1661
auto find_sequence_empty = [](Value* input,
1662
- TensorTypePtr t_type) -> Node* {
1662
+ const TensorTypePtr& t_type) -> Node* {
1663
1663
auto find_sequence_empty_impl =
1664
1664
[](Value* input,
1665
- TensorTypePtr t_type,
1665
+ const TensorTypePtr& t_type,
1666
1666
auto & find_sequence_empty_ref) -> Node* {
1667
1667
auto input_node = input->node ();
1668
1668
TORCH_INTERNAL_ASSERT (input_node);
@@ -1708,7 +1708,7 @@ void SpecialPostProcess(Node* n) {
1708
1708
return nullptr ;
1709
1709
};
1710
1710
return find_sequence_empty_impl (
1711
- input, std::move ( t_type) , find_sequence_empty_impl);
1711
+ input, t_type, find_sequence_empty_impl);
1712
1712
};
1713
1713
1714
1714
if (seq_node && t_type && t_type->scalarType ()) {
@@ -1837,7 +1837,7 @@ void FetchBlockInputMetadataFromParent(Block* b) {
1837
1837
}
1838
1838
}
1839
1839
1840
- void RemoveProcessedInputs (const Node* n) {
1840
+ static void RemoveProcessedInputs (const Node* n) {
1841
1841
// After processing a node for shape inference, remove intermediate tensors
1842
1842
// that are stored in ConstantValueMap to reduce memory usage.
1843
1843
// This will only remove tensors that are no longer needed by any other node.
@@ -2213,7 +2213,7 @@ void ONNXSetDynamicInputShape(
2213
2213
GRAPH_UPDATE (" dynamic axes tensor names:" , [&]() {
2214
2214
std::vector<std::string> res (dynamic_axes.size ());
2215
2215
std::transform (
2216
- dynamic_axes.begin (), dynamic_axes.end (), res.begin (), [](auto pair) {
2216
+ dynamic_axes.begin (), dynamic_axes.end (), res.begin (), [](const auto & pair) {
2217
2217
return pair.first ;
2218
2218
});
2219
2219
return res;
0 commit comments