Skip to content

Commit adf6623

Browse files
committed
Apply clang format
1 parent 64e5218 commit adf6623

File tree

5 files changed

+34
-24
lines changed

5 files changed

+34
-24
lines changed

src/common/transformations/src/transformations/low_precision/mark_dequantization_subgraph.cpp

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,8 @@ ov::pass::MarkDequantization::MarkDequantization(const element::TypeVector& prec
231231
// required zero points
232232
auto required_subtract_pattern = wrap_type<v1::Subtract>({input_pattern, zp_reshape_pattern});
233233
auto required_convert_pattern = wrap_type<v0::Convert>({required_subtract_pattern}, consumers_count(1));
234-
auto pattern = std::make_shared<ov::pass::pattern::op::Or>(OutputVector{multiply_pattern, required_convert_pattern});
234+
auto pattern =
235+
std::make_shared<ov::pass::pattern::op::Or>(OutputVector{multiply_pattern, required_convert_pattern});
235236

236237
ov::matcher_pass_callback callback = [OV_CAPTURE_CPY_AND_THIS](Matcher& m) -> bool {
237238
const auto& pt_map = m.get_pattern_value_map();
@@ -249,7 +250,10 @@ ov::pass::MarkDequantization::MarkDequantization(const element::TypeVector& prec
249250
return false;
250251
} else {
251252
// Multiply and Subtract have to be marked as dq
252-
set_rt_info(pt_map, mark_as_dequantization_node, {subtract_pattern, multiply_pattern}, {/* not applicable */});
253+
set_rt_info(pt_map,
254+
mark_as_dequantization_node,
255+
{subtract_pattern, multiply_pattern},
256+
{/* not applicable */});
253257

254258
// Convert might be presented on scales, zp and data_input.
255259
// Depending on the transformation arguments they have to be marked/unmarked with disable_cf rt_info.

src/common/transformations/src/transformations/op_conversions/convert_fc_to_compressed.cpp

Lines changed: 17 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,8 @@ ov::pass::ConvertFullyConnectedToFullyConnectedCompressed::ConvertFullyConnected
6565
auto required_convert_m = wrap_type<ov::op::v0::Convert>({required_subtract_m});
6666

6767
auto bias_m = any_input();
68-
auto weights_input_m = std::make_shared<ov::pass::pattern::op::Or>(ov::OutputVector{reshape_m, transpose_m, mul_m, required_convert_m});
68+
auto weights_input_m = std::make_shared<ov::pass::pattern::op::Or>(
69+
ov::OutputVector{reshape_m, transpose_m, mul_m, required_convert_m});
6970
auto fully_connected_m = wrap_type<ov::op::internal::FullyConnected>({activation_m, weights_input_m, bias_m});
7071

7172
ov::matcher_pass_callback callback = [OV_CAPTURE_CPY_AND_THIS](ov::pass::pattern::Matcher& m) {
@@ -83,9 +84,8 @@ ov::pass::ConvertFullyConnectedToFullyConnectedCompressed::ConvertFullyConnected
8384

8485
OPENVINO_ASSERT(current_shape.size() == 3);
8586

86-
auto new_shape = merge_forward
87-
? ov::Shape{current_shape[0] * current_shape[1], current_shape[2]}
88-
: ov::Shape{current_shape[0], current_shape[1] * current_shape[2]};
87+
auto new_shape = merge_forward ? ov::Shape{current_shape[0] * current_shape[1], current_shape[2]}
88+
: ov::Shape{current_shape[0], current_shape[1] * current_shape[2]};
8989

9090
return std::make_shared<ov::op::v0::Constant>(*constant, new_shape);
9191
};
@@ -101,23 +101,26 @@ ov::pass::ConvertFullyConnectedToFullyConnectedCompressed::ConvertFullyConnected
101101
const size_t OC = *(weights_shape.rbegin() + 1);
102102

103103
const ov::Output<Node>& fc_input_a = fc->input_value(0);
104-
std::shared_ptr<ov::Node> fc_input_b = reshape_const_to_2d(pattern_map.at(weights_m).get_node_shared_ptr(), false);
104+
std::shared_ptr<ov::Node> fc_input_b =
105+
reshape_const_to_2d(pattern_map.at(weights_m).get_node_shared_ptr(), false);
105106
std::shared_ptr<ov::Node> fc_input_bias = pattern_map.at(bias_m).get_node_shared_ptr();
106107

107108
bool has_required_convert = pattern_map.count(required_convert_m);
108109
if (has_required_convert) {
109-
std::shared_ptr<ov::Node> fc_input_scale = std::make_shared<ov::op::v0::Constant>(element::dynamic, Shape{0});
110+
std::shared_ptr<ov::Node> fc_input_scale =
111+
std::make_shared<ov::op::v0::Constant>(element::dynamic, Shape{0});
110112
std::vector<std::shared_ptr<ov::Node>> result_nodes = {};
111113
ov::disable_constant_folding(fc_input_scale);
112114
result_nodes.push_back(fc_input_scale);
113-
std::shared_ptr<ov::Node> fc_input_zp = reshape_const_to_2d(pattern_map.at(sub_const_m).get_node_shared_ptr(), false);
115+
std::shared_ptr<ov::Node> fc_input_zp =
116+
reshape_const_to_2d(pattern_map.at(sub_const_m).get_node_shared_ptr(), false);
114117

115118
auto new_fc = std::make_shared<ov::op::internal::FullyConnectedCompressed>(fc_input_a,
116-
fc_input_b,
117-
fc_input_bias,
118-
fc_input_scale,
119-
fc_input_zp,
120-
fc->get_output_type());
119+
fc_input_b,
120+
fc_input_bias,
121+
fc_input_scale,
122+
fc_input_zp,
123+
fc->get_output_type());
121124

122125
if (supports_config && !supports_config(new_fc, IC, OC, 1))
123126
return false;
@@ -155,8 +158,8 @@ ov::pass::ConvertFullyConnectedToFullyConnectedCompressed::ConvertFullyConnected
155158
pattern_map.count(sub_no_convert_m) > 0 || pattern_map.count(sub_with_convert_m) > 0;
156159
if (with_zero_point) {
157160
// WA: Convert ZP to u8 for OneDNN case to avoid u4 reorder
158-
optional_zero_point =
159-
convert_u4const_to_u8(reshape_const_to_2d(pattern_map.at(sub_const_m).get_node_shared_ptr(), merge_forward));
161+
optional_zero_point = convert_u4const_to_u8(
162+
reshape_const_to_2d(pattern_map.at(sub_const_m).get_node_shared_ptr(), merge_forward));
160163
}
161164

162165
std::shared_ptr<ov::Node> fc_input_scale = scale;

src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -847,7 +847,7 @@ struct ConvertFromBinPrecision<std::tuple<src_t, dst_t>> {
847847

848848
#define INTEL_CPU_CVT_FROM_2BIT_LIST \
849849
INTEL_CPU_CVT(u2, f32), INTEL_CPU_CVT(u2, f16), INTEL_CPU_CVT(u2, bf16), INTEL_CPU_CVT(u2, i32), \
850-
INTEL_CPU_CVT(u2, u8), INTEL_CPU_CVT(u2, i8)
850+
INTEL_CPU_CVT(u2, u8), INTEL_CPU_CVT(u2, i8)
851851

852852
struct ConvertFrom2BitContext {
853853
const void* srcPtr;

src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,8 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const FCAttrs& attrs,
255255

256256
if (auto it = memory.find(ARG_WEI | ARG_ATTR_ZERO_POINTS); it != memory.end()) {
257257
auto wei_precision = weiDesc->getPrecision();
258-
auto dstPrc = wei_precision == ov::element::u2 ? ov::element::u2 : useDynamicQuantization ? ov::element::u8 : ov::element::f32;
258+
auto dstPrc = wei_precision == ov::element::u2 ? ov::element::u2
259+
: useDynamicQuantization ? ov::element::u8 : ov::element::f32;
259260
dnnlpoc.appendDecompressionZeroPointsLegacy(it->second, !attrs.weightsNonTransposed, dstPrc);
260261
}
261262

src/tests/functional/plugin/shared/src/subgraph/weights_decompression_builders.cpp

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,9 +87,10 @@ std::shared_ptr<ov::Node> initMatMulDecompressionSubgraph(
8787

8888
auto up_to = weights_precision == ov::element::u2 ? 3 : weights_precision == ov::element::i4 ? 7 : 15;
8989
auto start_from = weights_precision == ov::element::u2 ? 0 : 1;
90-
auto weights_tensor = ov::test::utils::create_and_fill_tensor(weights_precision,
91-
transformed_weights_shape,
92-
ov::test::utils::InputGenerateData(start_from, up_to));
90+
auto weights_tensor =
91+
ov::test::utils::create_and_fill_tensor(weights_precision,
92+
transformed_weights_shape,
93+
ov::test::utils::InputGenerateData(start_from, up_to));
9394
auto weights = std::make_shared<ov::op::v0::Constant>(weights_tensor);
9495

9596
std::shared_ptr<ov::Node> last_node = weights;
@@ -118,9 +119,10 @@ std::shared_ptr<ov::Node> initMatMulDecompressionSubgraph(
118119
if (decompression_subtract_type != DecompressionType::empty) {
119120
auto subtract_shape =
120121
decompression_subtract_type == DecompressionType::full ? scaleshift_const_shape : ov::Shape({});
121-
auto shift_const_tensor = ov::test::utils::create_and_fill_tensor(weights_precision,
122-
subtract_shape,
123-
ov::test::utils::InputGenerateData(start_from, up_to));
122+
auto shift_const_tensor =
123+
ov::test::utils::create_and_fill_tensor(weights_precision,
124+
subtract_shape,
125+
ov::test::utils::InputGenerateData(start_from, up_to));
124126
auto shift_const = std::make_shared<ov::op::v0::Constant>(shift_const_tensor);
125127

126128
std::shared_ptr<ov::Node> shift_node = shift_const;

0 commit comments

Comments
 (0)