Skip to content
Open
Show file tree
Hide file tree
Changes from 20 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
bbe32a5
Adapt ConvertQuantizeDequantize for reusage in QDQStripping
v-Golubev Oct 1, 2025
1955d55
QDQStripping initial implementation
v-Golubev Oct 1, 2025
17d77ab
Finalize first QDQ Stripping implementation
v-Golubev Oct 1, 2025
e4f6d6a
Clang format
v-Golubev Oct 1, 2025
1640fdb
set replace_with_clamp to true
v-Golubev Oct 1, 2025
d6130ae
Avoid main LPT pipeline in case of non-u8 activations quantization
v-Golubev Oct 2, 2025
5824c91
WIP: some fixes + debug info
v-Golubev Oct 27, 2025
b4c320a
Logging extended
v-Golubev Oct 29, 2025
e771761
added graphs serialization
v-Golubev Oct 29, 2025
8d46d49
Further debug logging extending
v-Golubev Oct 29, 2025
dbba54b
ConvertQuantizeDequantize: ignore consumers_count check
v-Golubev Oct 30, 2025
95de481
Cleanup
v-Golubev Oct 31, 2025
8faa050
Introduced REPLACE_QDQ_WITH_CLAMP env variable
v-Golubev Oct 31, 2025
b181306
Added QDQStrippingTest
v-Golubev Oct 31, 2025
0f7aa0a
Warning fixed
v-Golubev Nov 3, 2025
cbc6318
Compilation error fix
v-Golubev Nov 3, 2025
0e403c2
Transformation pipeline minor corrections
v-Golubev Nov 3, 2025
5211740
QDQStrippingTest extending
v-Golubev Nov 3, 2025
0bb9870
Added clarification comment to ConvertQuantizeDequantize
v-Golubev Nov 3, 2025
6a6d9b8
Corrected LPT tests instances
v-Golubev Nov 3, 2025
5032aea
codestyle
v-Golubev Nov 4, 2025
68a9e3b
[GPU] Keep old behavior for non-i16 models
v-Golubev Nov 4, 2025
060e17d
Review comments applied
v-Golubev Nov 4, 2025
4736a76
Test data generation is adapted to the case when clamp is not inserted
v-Golubev Nov 4, 2025
5b25b47
Fixed undef
v-Golubev Nov 4, 2025
fd48bf4
Remove unnecessary log message
maxnick Nov 5, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Copyright (C) 2018-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include <memory>
#include <set>

#include "lpt_visibility.hpp"
#include "openvino/pass/matcher_pass.hpp"
#include "quantization_details.hpp"

namespace ov {
namespace pass {
namespace low_precision {

/**
* @ingroup ov_transformation_common_api
* @brief FQStrippingTransformation strips FakeQuantize operations with specified levels
* by replacing them with Clamp operations.
*/
class LP_TRANSFORMATIONS_API FQStrippingTransformation : public ov::pass::MatcherPass {
public:
OPENVINO_RTTI("FQStrippingTransformation", "0", MatcherPass);
FQStrippingTransformation(const std::set<size_t>& levels_to_strip, bool replace_with_clamp);
};

} // namespace low_precision
} // namespace pass
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ bool LowPrecision::isFunctionQuantized(const std::shared_ptr<const ov::Model>& m
} else if (const auto multiSubGraph = ov::as_type_ptr<ov::op::util::MultiSubGraphOp>(node)) {
// Look inside subraph operations, such as TensorIterator, Loop, If, etc
for (size_t i = 0; i < multiSubGraph->get_internal_subgraphs_size(); i++) {
if (isFunctionQuantized(multiSubGraph->get_function(i))) {
if (isFunctionQuantized(multiSubGraph->get_function(i), supported_levels, check_fake_convert)) {
return true;
}
}
Expand Down
92 changes: 92 additions & 0 deletions src/common/low_precision_transformations/src/qdq_stripping.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
// Copyright (C) 2018-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "low_precision/qdq_stripping.hpp"

#include <memory>

#include "itt.hpp"
#include "low_precision/common/ie_lpt_exception.hpp"
#include "low_precision/lpt_itt.hpp"
#include "low_precision/network_helper.hpp"
#include "openvino/core/except.hpp"
#include "openvino/core/type.hpp"
#include "openvino/op/clamp.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/equal.hpp"
#include "openvino/op/fake_quantize.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "openvino/util/log.hpp"
#include "transformations/utils/utils.hpp"

namespace ov {
namespace pass {
namespace low_precision {

FQStrippingTransformation::FQStrippingTransformation(const std::set<size_t>& levels_to_strip, bool replace_with_clamp) {
MATCHER_SCOPE(FQStrippingTransformation);
auto is_scalar = [](const Output<Node>& output) -> bool {
return ov::shape_size(output.get_shape()) == 1;
};
auto input_low_m = pattern::wrap_type<ov::op::v0::Constant>(is_scalar);
auto input_high_m = pattern::wrap_type<ov::op::v0::Constant>(is_scalar);
auto output_low_m = pattern::wrap_type<ov::op::v0::Constant>(is_scalar);
auto output_high_m = pattern::wrap_type<ov::op::v0::Constant>(is_scalar);
auto fq_m = pattern::wrap_type<ov::op::v0::FakeQuantize>(
{pattern::any_input(), input_low_m, input_high_m, output_low_m, output_high_m});

ov::graph_rewrite_callback callback = [OV_CAPTURE_CPY_AND_THIS](pattern::Matcher& m) {
const auto& pattern_map = m.get_pattern_value_map();
auto node = ov::as_type_ptr<ov::op::v0::FakeQuantize>(pattern_map.at(fq_m).get_node_shared_ptr());
if (!node) {
return false;
}

const size_t levels = node->get_levels();
if (!levels_to_strip.count(levels)) {
return false;
}

auto input = node->get_input_node_shared_ptr(0);
auto input_low = ov::as_type_ptr<ov::op::v0::Constant>(pattern_map.at(input_low_m).get_node_shared_ptr());
auto input_high = ov::as_type_ptr<ov::op::v0::Constant>(pattern_map.at(input_high_m).get_node_shared_ptr());
auto output_low = ov::as_type_ptr<ov::op::v0::Constant>(pattern_map.at(output_low_m).get_node_shared_ptr());
auto output_high = ov::as_type_ptr<ov::op::v0::Constant>(pattern_map.at(output_high_m).get_node_shared_ptr());

if (!input_low || !input_high || !output_low || !output_high) {
return false;
}
auto constants_are_equal = [](const std::shared_ptr<ov::op::v0::Constant>& lhs,
const std::shared_ptr<ov::op::v0::Constant>& rhs) {
auto equal =
ov::as_type_ptr<ov::op::v0::Constant>(ov::op::util::make_try_fold<ov::op::v1::Equal>(lhs, rhs));
OPENVINO_ASSERT(equal && ov::shape_size(equal->get_shape()) == 1,
"constants_are_equal expects scalar constant as a comparison result");
return equal->get_vector<bool>()[0] == true;
};

if (!constants_are_equal(input_low, output_low) || !constants_are_equal(input_high, output_high)) {
return false;
}

bool res = false;
if (replace_with_clamp) {
auto clamp = std::make_shared<ov::op::v0::Clamp>(input->output(0),
output_low->cast_vector<double>()[0],
output_high->cast_vector<double>()[0]);
res = replace_node_update_name(node, clamp);
} else {
res = replace_output_update_name(node->output(0), node->input_value(0));
}
OPENVINO_ASSERT(res, "FQ stripping failed");
return res;
};

auto m = std::make_shared<ov::pass::pattern::Matcher>(fq_m, matcher_name);
this->register_matcher(m, callback);
}

} // namespace low_precision
} // namespace pass
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <memory>
#include <vector>

#include "openvino/core/type/element_type.hpp"
#include "openvino/pass/matcher_pass.hpp"
#include "transformations_visibility.hpp"

Expand All @@ -32,5 +33,10 @@ class TRANSFORMATIONS_API ConvertQuantizeDequantize;
class ov::pass::ConvertQuantizeDequantize : public ov::pass::MatcherPass {
public:
OPENVINO_MATCHER_PASS_RTTI("ConvertQuantizeDequantize");
ConvertQuantizeDequantize();
ConvertQuantizeDequantize(const ov::element::TypeVector& supported_low_precisions = {ov::element::i8,
ov::element::u8,
ov::element::i16,
ov::element::u16},
const ov::element::TypeVector& supported_original_precisions = {ov::element::f32},
const bool ignore_consumers_count_check = false);
};
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include "openvino/op/fake_quantize.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/subtract.hpp"
#include "openvino/pass/pattern/op/optional.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "transformations/utils/utils.hpp"

Expand Down Expand Up @@ -62,100 +63,97 @@
// v
//

ov::pass::ConvertQuantizeDequantize::ConvertQuantizeDequantize() {
ov::pass::ConvertQuantizeDequantize::ConvertQuantizeDequantize(
const ov::element::TypeVector& supported_low_precisions,
const ov::element::TypeVector& supported_original_precisions,
const bool ignore_consumers_count_check) {
MATCHER_SCOPE(ConvertQuantizeDequantize);
auto data_pattern = pass::pattern::any_input();
auto input_low_pattern = pass::pattern::any_input();
auto input_high_pattern = pass::pattern::any_input();
auto output_low_pattern = ov::pass::pattern::wrap_type<ov::op::v0::Constant>();
auto output_high_pattern = ov::pass::pattern::wrap_type<ov::op::v0::Constant>();
auto fq_pattern = ov::pass::pattern::wrap_type<ov::op::v0::FakeQuantize>(

using namespace ov::pass::pattern;
using namespace ov::op;

auto data_pattern = any_input(type_matches_any(supported_original_precisions));
auto input_low_pattern = any_input();
auto input_high_pattern = any_input();
auto output_low_pattern = wrap_type<v0::Constant>();
auto output_high_pattern = wrap_type<v0::Constant>();
auto fq_pattern = wrap_type<v0::FakeQuantize>(
{data_pattern, input_low_pattern, input_high_pattern, output_low_pattern, output_high_pattern});
auto convert1_pattern = ov::pass::pattern::wrap_type<ov::op::v0::Convert>(
{fq_pattern},
pattern::type_matches_any({element::i8, element::u8, element::i16, element::u16}));
auto convert2_pattern =
ov::pass::pattern::wrap_type<ov::op::v0::Convert>({convert1_pattern}, pattern::type_matches(element::f32));
auto zero_point_pattern = pass::pattern::any_input();
auto sub_pattern = ov::pass::pattern::wrap_type<ov::op::v1::Subtract>({convert2_pattern, zero_point_pattern},
pattern::consumers_count(1));
auto scale_pattern = pass::pattern::any_input();
auto mul_pattern = ov::pass::pattern::wrap_type<ov::op::v1::Multiply>({sub_pattern, scale_pattern});

ov::matcher_pass_callback callback = [OV_CAPTURE_CPY_AND_THIS](pattern::Matcher& m) {
ov::pass::pattern::op::Predicate convert1_predicate =
ignore_consumers_count_check ? type_matches_any(supported_low_precisions)
: type_matches_any(supported_low_precisions) && consumers_count(1);
auto convert1_pattern = wrap_type<v0::Convert>({fq_pattern}, convert1_predicate);
ov::pass::pattern::op::Predicate convert2_predicate =
ignore_consumers_count_check ? type_matches_any(supported_original_precisions)
: type_matches_any(supported_original_precisions) && consumers_count(1);
auto convert2_pattern = wrap_type<v0::Convert>({convert1_pattern}, convert2_predicate);

auto zero_point_pattern = any_input();
ov::pass::pattern::op::Predicate sub_predicate =
ignore_consumers_count_check ? ov::pass::pattern::op::Predicate() : consumers_count(1);
auto sub_pattern = optional<v1::Subtract>({convert2_pattern, zero_point_pattern}, sub_predicate);
auto scale_pattern = any_input();
auto mul_pattern = wrap_type<v1::Multiply>({sub_pattern, scale_pattern});

ov::matcher_pass_callback callback = [OV_CAPTURE_CPY_AND_THIS](Matcher& m) {
auto pattern_map = m.get_pattern_value_map();

if (transformation_callback(m.get_match_root())) {
return false;
}

auto data = pattern_map[data_pattern];
auto input_low = pattern_map[input_low_pattern];
auto input_high = pattern_map[input_high_pattern];
auto output_low = ov::as_type_ptr<ov::op::v0::Constant>(pattern_map[output_low_pattern].get_node_shared_ptr());
auto data = pattern_map.at(data_pattern);
auto input_low = pattern_map.at(input_low_pattern);
auto input_high = pattern_map.at(input_high_pattern);
auto output_low =
ov::as_type_ptr<ov::op::v0::Constant>(pattern_map.at(output_low_pattern).get_node_shared_ptr());
if (!output_low)
return false;
auto output_high =
ov::as_type_ptr<ov::op::v0::Constant>(pattern_map[output_high_pattern].get_node_shared_ptr());
ov::as_type_ptr<ov::op::v0::Constant>(pattern_map.at(output_high_pattern).get_node_shared_ptr());
if (!output_high)
return false;
auto fq = ov::as_type_ptr<ov::op::v0::FakeQuantize>(pattern_map[fq_pattern].get_node_shared_ptr());
auto fq = ov::as_type_ptr<ov::op::v0::FakeQuantize>(pattern_map.at(fq_pattern).get_node_shared_ptr());
if (!fq)
return false;
auto zero_point = pattern_map[zero_point_pattern];
auto scale = pattern_map[scale_pattern];
auto convert1 = pattern_map[convert1_pattern];
auto convert2 = pattern_map[convert2_pattern];
auto mul = pattern_map[mul_pattern].get_node_shared_ptr();

// convert1 and convert2 should have only one input
if (convert1.get_target_inputs().size() != 1)
return false;
if (convert2.get_target_inputs().size() != 1)
auto scale = pattern_map.at(scale_pattern);
auto convert1 = pattern_map.at(convert1_pattern);
auto convert2 = pattern_map.at(convert2_pattern);
auto mul = pattern_map.at(mul_pattern).get_node_shared_ptr();

static const std::unordered_set<size_t> supported_levels{256, 65536};
const auto levels = fq->get_levels();
if (!supported_levels.count(levels))
return false;

// we support:
// i8 or u8: 'levels' attribute must be 256
// i16 or u16: 'levels' attribute must be 65536
size_t levels = fq->get_levels();
if (levels != 256 && levels != 65536)
return false;

// check if (out_low_val, out_high_val) is (-128, 127) or (0, 255) or (-32768, 32767) or (0, 65535)
float out_low_val;
if (!op::util::get_single_value(output_low, out_low_val))
if (!ov::op::util::get_single_value(output_low, out_low_val))
return false;
float out_high_val;
if (!op::util::get_single_value(output_high, out_high_val))
if (!ov::op::util::get_single_value(output_high, out_high_val))
return false;

static const std::unordered_map<ov::element::Type_t, std::pair<float, float>> supported_intervals{
{ov::element::i8, {-128.f, 127.f}},
{ov::element::u8, {0.f, 255.f}},
{ov::element::i16, {-32768.f, 32767.f}},
{ov::element::u16, {0.f, 65535.f}}};
const auto& type = convert1.get_element_type();
switch (type) {
case element::Type_t::i8:
if (out_low_val != -128 || out_high_val != 127)
return false;
break;
case element::Type_t::u8:
if (out_low_val != 0 || out_high_val != 255)
return false;
break;
case element::Type_t::i16:
if (out_low_val != -32768 || out_high_val != 32767)
return false;
break;
case element::Type_t::u16:
if (out_low_val != 0 || out_high_val != 65535)
return false;
break;
default:
// check if (out_low_val, out_high_val) pair is mapped on the expected precision ranges
if (supported_intervals.count(type) == 0 ||
supported_intervals.at(type) != std::make_pair(out_low_val, out_high_val)) {
return false;
}

std::shared_ptr<Node> new_out_low =
std::make_shared<ov::op::v1::Multiply>(std::make_shared<ov::op::v1::Subtract>(output_low, zero_point),
scale);
std::shared_ptr<Node> new_out_high =
std::make_shared<ov::op::v1::Multiply>(std::make_shared<ov::op::v1::Subtract>(output_high, zero_point),
scale);
const bool has_zero_point = pattern_map.count(zero_point_pattern);
std::shared_ptr<Node> new_out_low = output_low, new_out_high = output_high;
if (has_zero_point) {
const auto& zero_point = pattern_map.at(zero_point_pattern);
new_out_low = std::make_shared<ov::op::v1::Subtract>(new_out_low, zero_point);
new_out_high = std::make_shared<ov::op::v1::Subtract>(new_out_high, zero_point);
}
new_out_low = std::make_shared<ov::op::v1::Multiply>(new_out_low, scale);
new_out_high = std::make_shared<ov::op::v1::Multiply>(new_out_high, scale);

// check if new_out_low/high shapes are broadcastable to FQ's input
auto data_shape = data.get_partial_shape();
Expand Down
17 changes: 16 additions & 1 deletion src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include "low_precision/fold_convert.hpp"
#include "low_precision/fuse_convert.hpp"
#include "low_precision/group_convolution.hpp"
#include "low_precision/qdq_stripping.hpp"
#include "low_precision/low_precision.hpp"
#include "low_precision/mat_mul.hpp"
#include "low_precision/multiply_to_group_convolution.hpp"
Expand Down Expand Up @@ -387,8 +388,22 @@ void TransformationsPipeline::apply(std::shared_ptr<ov::Model> func) {
ov::disable_keep_const_precision(node);
}

auto is_model_quantized = ov::pass::low_precision::LowPrecision::isFunctionQuantized(func);
using namespace ov::pass::low_precision;
auto is_model_quantized = LowPrecision::isFunctionQuantized(func, std::set<levels>{levels::int8, levels::int8_narrow_range});
enableInt8 = config.get_enable_lp_transformations() && is_model_quantized;
{
using namespace ov::element;
// QDQ stripping pipeline
// 1. Transform DQ part to canonicalized form: Multiply->Add => Subtract->Multiply
manager.register_pass<AddTransformation>();
// 2. Fuse FQ->Convert->DQ to a single FQ
manager.register_pass<ov::pass::ConvertQuantizeDequantize>(TypeVector{i16, u16}, TypeVector{f32}, true);
// 3. Strip FQ layers with unsupported levels
bool replace_with_clamp = ov::util::getenv_bool("REPLACE_QDQ_WITH_CLAMP", true);
std::cout << "[ QDQ STRIPPING INFO ] replace_with_clamp = " << replace_with_clamp << std::endl;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please remove unnecessary log message

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Removed, thanks. The REPLACE_QDQ_WITH_CLAMP env variable should be also removed if accuracy validation show no issues

manager.register_pass<FQStrippingTransformation>(std::set<size_t>{levels::int16}, replace_with_clamp);
manager.register_pass<ov::pass::Validate>();
}

manager.register_pass<ov::pass::MarkDequantization>(
std::vector<ov::element::Type>{ ov::element::i8, ov::element::u8, ov::element::i4, ov::element::u4 },
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,14 @@ const std::vector<FakeQuantizeTransformationParam> fakeQuantizeOnDataValues = {
{ 256ul, {}, { -127.5f }, { 0.f }, { -127.5f }, { 0.f } },
"Pooling", "u8"
},
// Not expected FQ levels
{
{ 16ul, {}, { 0.f }, { 1.5f }, { 0.f }, { 1.5f } },
"Pooling", "u8"
"Pooling", "f32"
},
{
{ 16ul, {}, { -8.f }, { 7.f }, { -0.8f }, { 0.7f } },
"Pooling", "i8"
"Pooling", "f32"
},
// nGraph: I8->FP32 Convert is not supported
// { 256ul, {}, { -1.28f} , { 1.27f }, { -1.28f} , { 1.27f } },
Expand Down
Loading
Loading