Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

Commit 718dde9

Browse files
authored
[v1.9.x][submodule] Upgrade oneDNN to the top of rls-v2.4 branch (#20994)
1 parent cfbcfb1 commit 718dde9

2 files changed

Lines changed: 30 additions & 36 deletions

File tree

3rdparty/mkldnn

src/operator/nn/mkldnn/mkldnn_convolution.cc

Lines changed: 29 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -112,41 +112,35 @@ std::shared_ptr<mkldnn::convolution_forward::primitive_desc> GetConvFwdImpl(
112112
int mask = (param.requantize_scales.size() > 1) ? 2 : 0;
113113
attr.set_output_scales(mask, param.requantize_scales);
114114
}
115-
auto GetConvFwdPd =
116-
[&param, &data, &weights, &output, &attr](const mkldnn::convolution_forward::desc& desc) {
117-
auto engine = CpuEngine::Get()->get_engine();
118-
try {
119-
// MKLDNN introduced padded formats since 0.15 which require more memory compared to the
120-
// actual size of the tensor. Currently, MKLDNN operators still reuse memory from memory
121-
// planning, so here we need to select a suboptimal kernel for computation that has the
122-
// expected memory size requirements
123-
auto conv_pd =
124-
std::make_shared<mkldnn::convolution_forward::primitive_desc>(desc, attr, engine);
125-
while (conv_pd->dst_desc().get_size() != GetArraySize(output) ||
126-
conv_pd->src_desc().get_size() != GetArraySize(data) ||
127-
(!param.mkldnn_param.quantized &&
128-
conv_pd->weights_desc().get_size() != GetArraySize(weights)) ||
129-
// With the upgrade of MKLDNN to version 2.4+
130-
// tests/python/mkl/test_subgraph.py::test_pos_conv_add started failing. Switching
131-
// away from primitive with weight mkldnn::format_tag ABcd4b16a4b in order to
132-
// temporarily fix the issue until full fix arrives. Tracking issue:
133-
// https://github.com/apache/incubator-mxnet/issues/20826.
134-
(param.mkldnn_param.quantized && conv_pd->weights_desc().dims()[1] < 4 &&
135-
conv_pd->weights_desc().data.padded_dims[1] == 16)) {
136-
// next_impl() will visit desc and engine, please make sure they are still alive here.
137-
CHECK(conv_pd->next_impl()) << "No convolution implementation for this request.";
138-
}
139-
return conv_pd;
140-
} catch (mkldnn::error& e) {
141-
if (e.status == mkldnn_unimplemented && param.mkldnn_param.quantized) {
142-
LOG(ERROR) << "AVX512-BW support or Intel(R) MKL dependency is "
143-
"required for int8 convolution";
144-
} else {
145-
LOG(ERROR) << e.message;
146-
}
147-
throw;
148-
}
149-
};
115+
auto GetConvFwdPd = [&param, &data, &weights, &output, &attr](
116+
const mkldnn::convolution_forward::desc& desc) {
117+
auto engine = CpuEngine::Get()->get_engine();
118+
try {
119+
// MKLDNN introduced padded formats since 0.15 which require more memory compared to the
120+
// actual size of the tensor. Currently, MKLDNN operators still reuse memory from memory
121+
// planning, so here we need to select a suboptimal kernel for computation that has the
122+
// expected memory size requirements
123+
auto conv_pd =
124+
std::make_shared<mkldnn::convolution_forward::primitive_desc>(desc, attr, engine);
125+
while (
126+
conv_pd->dst_desc().get_size() != GetArraySize(output) ||
127+
conv_pd->src_desc().get_size() != GetArraySize(data) ||
128+
(!param.mkldnn_param.quantized &&
129+
conv_pd->weights_desc().get_size() != GetArraySize(weights))) {
130+
// next_impl() will visit desc and engine, please make sure they are still alive here.
131+
CHECK(conv_pd->next_impl()) << "No convolution implementation for this request.";
132+
}
133+
return conv_pd;
134+
} catch (mkldnn::error& e) {
135+
if (e.status == mkldnn_unimplemented && param.mkldnn_param.quantized) {
136+
LOG(ERROR) << "AVX512-BW support or Intel(R) MKL dependency is "
137+
"required for int8 convolution";
138+
} else {
139+
LOG(ERROR) << e.message;
140+
}
141+
throw;
142+
}
143+
};
150144

151145
if (param.conv_param.dilate.ndim() == 0 && bias_md_ptr == nullptr) {
152146
mkldnn::convolution_forward::desc desc(prop, mkldnn::algorithm::convolution_direct, data_md,

0 commit comments

Comments
 (0)