Skip to content

Commit 7e17e3b

Browse files
lvliang-intelzehao-intel
authored andcommitted
Support 'Square', 'Sum', 'SparseSegmentSqrtN' BF16 ops in TensorFlow backend (#223)
Signed-off-by: Lv, Liang1 <[email protected]> Signed-off-by: zehao-intel <[email protected]>
1 parent 913631b commit 7e17e3b

File tree

2 files changed

+1
-7
lines changed

2 files changed

+1
-7
lines changed

neural_compressor/adaptor/tensorflow.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
"Erf", "FusedBatchNormV2", "FusedBatchNormGradV2", "FusedBatchNormV3", "FusedBatchNormGradV3", "LeakyRelu", "LeakyReluGrad",
3636
"Mean", "Mul", "Sub", "Elu", "EluGrad", "FloorDiv", "_FusedBatchNormEx", "Log", "Log1p", "LogSoftmax", "Prod", "RealDiv",
3737
"Reciprocal", "Rsqrt", "Selu", "SeluGrad", "Sigmoid", "SigmoidGrad", "Softmax", "Softplus", "SoftplusGrad", "Softsign",
38-
"SoftsignGrad", "Sqrt", "SquaredDifference", "Tanh", "TanhGrad", #infer_list
38+
"SoftsignGrad", "Sqrt", "Square", "SquaredDifference", "Sum", "Tanh", "TanhGrad", "SparseSegmentSqrtN", # infer_list
3939
"Abs", "ArgMax","ArgMin","BatchToSpace","BatchToSpaceND","BroadcastTo","Ceil","CheckNumerics","ClipByValue","Concat","ConcatV2",
4040
"DepthToSpace","DynamicPartition","DynamicStitch","EnsureShape","Enter","Equal","Exit","ExpandDims","Fill","Floor","Gather",
4141
"GatherNd","GatherV2","Greater","GreaterEqual","Identity","IsFinite","IsInf","IsNan","Less","LessEqual","Max","Maximum","MaxPool",

neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_matmul.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -963,12 +963,6 @@ def _is_match_matmul(self, patterns, qdq_inserted=False):
963963
self.exclude_matmul_nodes.append(cur_node.name)
964964
continue
965965

966-
for i in self.node_name_mapping:
967-
if weight_node.input and not weight_node.input[0].startswith('^') \
968-
and weight_node.name in self.node_name_mapping[i].output:
969-
self.exclude_matmul_nodes.append(cur_node.name)
970-
continue
971-
972966
for sub_rule in patterns:
973967
if sub_rule[0] != "Dequantize":
974968
self.exclude_matmul_nodes.append(cur_node.name)

0 commit comments

Comments
 (0)