Skip to content

Commit 7a8b5aa

Browse files
committed
dil always own whole storage whne create aten tensor if dil_tensor is int8
1 parent b61029b commit 7a8b5aa

File tree

4 files changed

+12
-3
lines changed

4 files changed

+12
-3
lines changed

intel_pytorch_extension_py/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@ def __init__(self, mixed_dtype = torch.bfloat16, configure_file = None):
1515
self.dtype = mixed_dtype
1616
self.configure_file = configure_file
1717

18+
if self.dtype != torch.bfloat16:
19+
core.clear_indicators()
1820
# for int8 path, if user give a exited configure file, load it.
1921
if self.configure_file != None and self.dtype != torch.bfloat16:
2022
if os.path.exists(self.configure_file) and os.stat(self.configure_file).st_size != 0:

torch_ipex/csrc/auto_opt_config.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,10 @@ class AutoOptConfig {
9898
}
9999
}
100100

101+
inline void clear_indicators() {
102+
indicators_.clear();
103+
}
104+
101105
inline void add_indicators() {
102106
num_ops_id = 0;
103107
indicators_.clear();

torch_ipex/csrc/cpu/dbl/Common.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -230,8 +230,8 @@ at::Tensor gen_aten_tensor_by(dil::tensor&& dil_tensor) {
230230
// And even the dil tensor is plain format, it also cannot be shared with cpu buffer.
231231
shade_data_context->mix_prec_type = cpu::MIX_PREC_TYPE::MIX_BF16_FP32;
232232
at_data_type = at::kFloat;
233-
} else if (check_auto_mix_int8_fp32() && (dil_tensor_type == dil::data_type::s8 || dil_tensor_type == dil::data_type::u8)) {
234-
// If the user enables auto-mix-precision, then the aten tensor should always be float.
233+
} else if ((dil_tensor_type == dil::data_type::s8 || dil_tensor_type == dil::data_type::u8)) {
234+
// If the dil_tensor is int8 or unint8, then the aten tensor should always be float.
235235
// And even the dil tensor is plain format, it also cannot be shared with cpu buffer.
236236
shade_data_context->mix_prec_type = cpu::MIX_PREC_TYPE::MIX_INT8_FP32;
237237
at_data_type = at::kFloat;

torch_ipex/csrc/init_python_bindings.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,9 +147,12 @@ void InitIpexModuleBindings(py::module m) {
147147
m.def("get_int8_calibration", []() { return AutoOptConfig::singleton().get_int8_calibration(); });
148148
m.def("calibration_reset", []() { AutoOptConfig::singleton().calibration_reset(); });
149149
m.def("add_indicators", []() { AutoOptConfig::singleton().add_indicators(); });
150+
// clear indicators for case having many scopes which have different structure
151+
m.def("clear_indicators", []() { AutoOptConfig::singleton().clear_indicators(); });
150152
m.def("get_int8_configures", []() {
151153
py::list output_list;
152154
auto indicators = AutoOptConfig::singleton().get_indicators();
155+
IPEX_CHECK(indicators.size() > 0, "can't load a empty indicators, please first do calibration step");
153156
for (auto indicator: indicators) {
154157
py::dict d;
155158
d["id"] = indicator.get_indicator_id();
@@ -169,7 +172,7 @@ void InitIpexModuleBindings(py::module m) {
169172
}
170173
return output_list; } );
171174
m.def("load_indicators_file", [](const py::list& l) {
172-
IPEX_CHECK(py::len(l) > 0, "can't load a empty configures, please first do calibration setp");
175+
IPEX_CHECK(py::len(l) > 0, "can't load a empty configures, please first do calibration step");
173176
std::vector<Indicator> indicators;
174177
for (py::handle i : l) {
175178
int64_t id = py::cast<std::int64_t>(i["id"]);

0 commit comments

Comments
 (0)