Skip to content

Commit c0fa310

Browse files
Support Conv2D + BiasAdd + Relu + Sum fusion (#221)
Signed-off-by: Lv, Liang1 <[email protected]>
1 parent 98d3c83 commit c0fa310

File tree

4 files changed

+16
-6
lines changed

4 files changed

+16
-6
lines changed

neural_compressor/adaptor/tensorflow.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,10 @@
273273
'Dequantize + Conv2D + BiasAdd + LeakyRelu + Add + QuantizeV2',
274274
'Dequantize + Conv2D + LeakyRelu + AddV2 + QuantizeV2',
275275
'Dequantize + Conv2D + LeakyRelu + Add + QuantizeV2',
276+
'Dequantize + Conv2D + BiasAdd + Relu + AddV2 + QuantizeV2',
277+
'Dequantize + Conv2D + BiasAdd + Relu + Add + QuantizeV2',
278+
'Dequantize + Conv2D + Relu + AddV2 + QuantizeV2',
279+
'Dequantize + Conv2D + Relu + Add + QuantizeV2',
276280
'Dequantize + Conv2D + Add + QuantizeV2',
277281
'Dequantize + Conv2D + AddV2 + QuantizeV2',
278282
'Dequantize + Conv2D + AddV2 + Add + QuantizeV2',

neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_conv.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -58,12 +58,16 @@ def __init__(self, **kwargs):
5858
'DequantizeConv2DSigmoidQuantizeV2': self.apply_newly_conv_biasadd_relu_fusion,
5959
'DequantizeConv2DBiasAddLeakyReluAddV2QuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
6060
'DequantizeConv2DBiasAddLeakyReluAddQuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
61+
'DequantizeConv2DBiasAddReluAddV2QuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
62+
'DequantizeConv2DBiasAddReluAddQuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
6163
'DequantizeConv2DBiasAddAddLeakyReluQuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
6264
'DequantizeConv2DBiasAddAddV2LeakyReluQuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
6365
'DequantizeConv2DAddLeakyReluQuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
6466
'DequantizeConv2DAddV2LeakyReluQuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
6567
'DequantizeConv2DLeakyReluAddV2QuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
6668
'DequantizeConv2DLeakyReluAddQuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
69+
'DequantizeConv2DReluAddV2QuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
70+
'DequantizeConv2DReluAddQuantizeV2': self.apply_newly_conv_biasadd_addn_relu_fusion,
6771
'DequantizeConv2DAddRelu6QuantizeV2': self.apply_newly_conv_biasadd_relu_fusion,
6872
'DequantizeConv2DAddReluQuantizeV2': self.apply_newly_conv_biasadd_relu_fusion,
6973
'DequantizeConv2DBiasAddAddRelu6MulMulQuantizeV2': self.apply_conv_biasadd_hardswish_fusion,
@@ -1194,7 +1198,9 @@ def apply_newly_conv_biasadd_addn_relu_fusion(self, match_node_name):
11941198
# Dequantize + Conv2D + BiasAdd + AddV2 + Relu6 + QuantizeV2
11951199
# Dequantize + Conv2D + BiasAdd + Add + Relu + QuantizeV2
11961200
# Dequantize + Conv2D + BiasAdd + LeakyRelu + AddV2 + QuantizeV2
1201+
# Dequantize + Conv2D + BiasAdd + Relu + AddV2(Add) + QuantizeV2
11971202
# Dequantize + Conv2D + LeakyRelu + AddV2 + QuantizeV2
1203+
# Dequantize + Conv2D + Relu + AddV2(Add) + QuantizeV2
11981204
# Dequantize + Conv2D + Add + Add + Relu + QuantizeV2
11991205
# Dequantize + Conv2D + BiasAdd + Add + Relu + QuantizeV2
12001206
skip_node_name = match_node_name[2:]
@@ -1236,8 +1242,8 @@ def apply_newly_conv_biasadd_addn_relu_fusion(self, match_node_name):
12361242
return self.apply_newly_conv_biasadd_fusion(match_node_name[:3] + [match_node_name[-1]])
12371243

12381244
forth_node = self.node_name_mapping[match_node_name[4]].node
1239-
if forth_node.op != 'LeakyRelu':
1240-
if third_node.op != 'LeakyRelu' and not self._find_relu_node(matched_node.node):
1245+
if forth_node.op not in ('LeakyRelu', 'Relu'):
1246+
if third_node.op not in ('LeakyRelu', 'Relu') and not self._find_relu_node(matched_node.node):
12411247
return self.apply_newly_conv_biasadd_fusion(match_node_name[:3] + [match_node_name[-1]])
12421248

12431249
is_leakyrelu_add_fusion = third_node.op == 'LeakyRelu' and forth_node.op.find('Add') != -1
@@ -1251,7 +1257,7 @@ def apply_newly_conv_biasadd_addn_relu_fusion(self, match_node_name):
12511257

12521258
sum_node_name = self.node_name_mapping[match_node_name[3 + relu_offset]].node.input[sum_index]
12531259
deq_node = self.node_name_mapping[sum_node_name].node
1254-
if (deq_node.op != 'LeakyRelu' and deq_node.op != 'Dequantize') or \
1260+
if (deq_node.op != 'LeakyRelu' and deq_node.op != 'Dequantize' and deq_node.op != 'BiasAdd') or \
12551261
deq_node.op.find("Quantize") != -1:
12561262
return self.apply_newly_conv_biasadd_fusion(match_node_name[:3]+[match_node_name[-1]])
12571263

@@ -1350,7 +1356,7 @@ def apply_newly_conv_biasadd_addn_relu_fusion(self, match_node_name):
13501356

13511357
self.add_output_graph_node(quantized_conv_node)
13521358

1353-
if is_leakyrelu_add_fusion or is_leakyrelu:
1359+
if is_leakyrelu_add_fusion or is_leakyrelu or is_relu_add_fusion:
13541360
quantize_down_name = self._add_quantize_down_nodes(
13551361
node, quantized_node_name, dtypes.qint8, False)
13561362
self._intel_cpu_add_dequantize_result_node(

test/tfnewapi/test_tensorflow_graph_conv_fusion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,7 @@ def test_conv_biasadd_addv2_relu_fallback_fusion_1(self):
348348

349349
for i in output_graph.graph_def.node:
350350
if i.op == '_FusedQuantizedConv2D' and \
351-
i.attr['fused_ops'].list.s == [b'BiasAdd', b'Dequantize']:
351+
i.attr['fused_ops'].list.s == [b'BiasAdd', b'Sum', b'Relu', b'Requantize']:
352352
found_conv_fusion = True
353353
break
354354
self.assertEqual(found_conv_fusion, True)

test/tfnewapi/test_tensorflow_graph_qdq_conv_fusion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,7 @@ def test_conv_biasadd_addv2_relu_fallback_fusion_1(self):
317317

318318
for i in output_graph.graph_def.node:
319319
if i.op == '_FusedQuantizedConv2D' and \
320-
i.attr['fused_ops'].list.s == [b'BiasAdd', b'Dequantize']:
320+
i.attr['fused_ops'].list.s == [b'BiasAdd', b'Sum', b'Relu', b'Requantize']:
321321
found_conv_fusion = True
322322
break
323323
self.assertEqual(found_conv_fusion, True)

0 commit comments

Comments
 (0)