-
Notifications
You must be signed in to change notification settings - Fork 24k
/
Copy pathqhardswish.cpp
105 lines (87 loc) · 3.29 KB
/
qhardswish.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Context.h>
#include <torch/library.h>
#include <ATen/native/quantized/cpu/QuantizedOps.h>
#include <ATen/native/quantized/cpu/init_qnnpack.h>
#include <ATen/native/quantized/cpu/QnnpackUtils.h>
#include <caffe2/utils/threadpool/pthreadpool-cpp.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/_empty_affine_quantized.h>
#endif
#include <algorithm>
namespace at::native {
DEFINE_DISPATCH(qhardswish_stub);
namespace {
#ifdef USE_PYTORCH_QNNPACK
Tensor qnnpack_hardswish(const Tensor& qx, Tensor& qy) {
TORCH_CHECK(qx.ndimension() > 0, "qnnpack_hardswish(): Got empty input tensor");
TORCH_CHECK(qx.scalar_type() == c10::kQUInt8,
"qnnpack_hardswish(): Expected input data type to be ",
toString(c10::kQUInt8),
" but got ",
toString(qx.scalar_type()));
initQNNPACK();
size_t num_elems = qx.numel() / qx.size(0);
const auto i_zero_point = qx.q_zero_point();
const auto i_scale = qx.q_scale();
const auto o_zero_point = qy.q_zero_point();
const auto o_scale = qy.q_scale();
pytorch_qnnp_operator_t hardswish_op{nullptr};
const pytorch_qnnp_status createStatus = pytorch_qnnp_create_hardswish_nc_q8(
num_elems, // channels
i_zero_point,
i_scale,
o_zero_point,
o_scale,
std::numeric_limits<uint8_t>::min(), // output min
std::numeric_limits<uint8_t>::max(), // output max
0, // flags
&hardswish_op);
std::unique_ptr<pytorch_qnnp_operator, QnnpackOperatorDeleter>
qnnpack_uniq_ptr(hardswish_op);
TORCH_INTERNAL_ASSERT(createStatus == pytorch_qnnp_status_success,
"failed to create QNNPACK Hardswish operator");
const pytorch_qnnp_status setupStatus = pytorch_qnnp_setup_hardswish_nc_q8(
hardswish_op,
qx.size(0), // batch size
(uint8_t*)qx.data_ptr<c10::quint8>(), // input data
num_elems, // input stride
(uint8_t*)qy.data_ptr<c10::quint8>(), // output data
num_elems); // output stride
TORCH_INTERNAL_ASSERT(setupStatus == pytorch_qnnp_status_success,
"failed to setup QNNPACK Hardswish operator");
pthreadpool_t threadpool = caffe2::pthreadpool_();
const pytorch_qnnp_status runStatus =
pytorch_qnnp_run_operator(hardswish_op, threadpool);
TORCH_INTERNAL_ASSERT(
runStatus == pytorch_qnnp_status_success,
"failed to run QNNPACK Hardswish operator");
return qy;
}
#endif // USE_PYTORCH_QNNPACK
} // namespace
static Tensor quantized_hardswish(const Tensor& qx, double output_scale, int64_t output_zero_point) {
Tensor qy = at::_empty_affine_quantized(
qx.sizes(),
at::device(kCPU).dtype(qx.scalar_type()),
output_scale,
output_zero_point,
qx.suggest_memory_format());
#ifdef USE_PYTORCH_QNNPACK
if (at::globalContext().qEngine() == at::QEngine::QNNPACK &&
qx.scalar_type() == kQUInt8) {
Tensor qx_contig = qx.contiguous(qx.suggest_memory_format());
qnnpack_hardswish(qx_contig, qy);
return qy;
}
#endif // USE_PYTORCH_QNNPACK
qhardswish_stub(qx.device().type(), qx, qy);
return qy;
}
TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
m.impl(TORCH_SELECTIVE_NAME("quantized::hardswish"), TORCH_FN(quantized_hardswish));
}
} // namespace at::native