File tree Expand file tree Collapse file tree 3 files changed +24
-0
lines changed
examples/tensorflow/image_recognition/tensorflow_models/quantization/ptq Expand file tree Collapse file tree 3 files changed +24
-0
lines changed Original file line number Diff line number Diff line change @@ -38,6 +38,14 @@ quantization: # optional. tuning constrai
3838 algorithm : minmax
3939 weight :
4040 granularity : per_channel
41+ op_wise : {
42+ ' densenet121/MaxPool2D/MaxPool ' : {
43+ ' activation ' : {'dtype': ['fp32']}
44+ },
45+ ' densenet121/transition_block[1-3]/AvgPool2D/AvgPool ' : {
46+ ' activation ' : {'dtype': ['fp32']},
47+ }
48+ }
4149
4250evaluation : # optional. required if user doesn't provide eval_func in neural_compressor.Quantization.
4351 accuracy : # optional. required if user doesn't provide eval_func in neural_compressor.Quantization.
Original file line number Diff line number Diff line change @@ -38,6 +38,14 @@ quantization: # optional. tuning constrai
3838 algorithm : minmax
3939 weight :
4040 granularity : per_channel
41+ op_wise : {
42+ ' densenet161/MaxPool2D/MaxPool ' : {
43+ ' activation ' : {'dtype': ['fp32']}
44+ },
45+ ' densenet161/transition_block[1-3]/AvgPool2D/AvgPool ' : {
46+ ' activation ' : {'dtype': ['fp32']},
47+ }
48+ }
4149
4250evaluation : # optional. required if user doesn't provide eval_func in neural_compressor.Quantization.
4351 accuracy : # optional. required if user doesn't provide eval_func in neural_compressor.Quantization.
Original file line number Diff line number Diff line change @@ -38,6 +38,14 @@ quantization: # optional. tuning constrai
3838 algorithm : minmax
3939 weight :
4040 granularity : per_channel
41+ op_wise : {
42+ ' densenet169/MaxPool2D/MaxPool ' : {
43+ ' activation ' : {'dtype': ['fp32']}
44+ },
45+ ' densenet169/transition_block[1-3]/AvgPool2D/AvgPool ' : {
46+ ' activation ' : {'dtype': ['fp32']},
47+ }
48+ }
4149
4250evaluation : # optional. required if user doesn't provide eval_func in neural_compressor.Quantization.
4351 accuracy : # optional. required if user doesn't provide eval_func in neural_compressor.Quantization.
You can’t perform that action at this time.
0 commit comments