Skip to content

Commit b445adb

Browse files
authored
enhance benchmark (#604)
Signed-off-by: Xin He <[email protected]>
1 parent a5d055a commit b445adb

File tree

1 file changed

+22
-11
lines changed

1 file changed

+22
-11
lines changed

neural_compressor/benchmark.py

Lines changed: 22 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
from .utils import logger
3838
from .conf.pythonic_config import Config
3939
from .config import BenchmarkConfig
40+
from .utils.utility import Statistics
4041

4142

4243
def set_env_var(env_var, value, overwrite_existing=False):
@@ -162,13 +163,11 @@ def __init__(self, conf):
162163
if self.conf.usr_cfg.model.framework != 'NA':
163164
self.framework = self.conf.usr_cfg.model.framework.lower()
164165

165-
def __call__(self):
166+
def __call__(self, raw_cmd=None):
166167
"""Directly call a Benchmark object.
167168
168169
Args:
169-
model: Get the model
170-
b_dataloader: Set dataloader for benchmarking
171-
b_func: Eval function for benchmark
170+
raw_cmd: raw command used for benchmark
172171
"""
173172
cfg = self.conf.usr_cfg
174173
assert cfg.evaluation is not None, 'benchmark evaluation filed should not be None...'
@@ -181,7 +180,9 @@ def __call__(self):
181180
logger.info("Start to run Benchmark.")
182181
if os.environ.get('NC_ENV_CONF') == 'True':
183182
return self.run_instance()
184-
self.config_instance()
183+
if raw_cmd is None:
184+
raw_cmd = sys.executable + ' ' + ' '.join(sys.argv)
185+
self.config_instance(raw_cmd)
185186
self.summary_benchmark()
186187
return None
187188

@@ -204,16 +205,26 @@ def summary_benchmark(self):
204205
throughput_l.append(float(throughput.group(1))) if throughput and throughput.group(1) else None
205206
assert len(latency_l)==len(throughput_l)==num_of_instance, \
206207
"Multiple instance benchmark failed with some instance!"
207-
logger.info("\n\nMultiple instance benchmark summary: ")
208-
logger.info("Latency average: {:.3f} ms".format(sum(latency_l)/len(latency_l)))
209-
logger.info("Throughput sum: {:.3f} images/sec".format(sum(throughput_l)))
208+
209+
output_data = [
210+
["Latency average [second/sample]", "{:.3f}".format(sum(latency_l)/len(latency_l))],
211+
["Throughput sum [samples/second]", "{:.3f}".format(sum(throughput_l))]
212+
]
213+
logger.info("********************************************")
214+
Statistics(
215+
output_data,
216+
header='Multiple Instance Benchmark Summary',
217+
field_names=["Items", "Result"]).print_stat()
210218
else:
211219
# (TODO) should add summary after win32 benchmark has log
212220
pass
213221

214-
def config_instance(self):
215-
"""Configure the multi-instance commands and trigger benchmark with sub process."""
216-
raw_cmd = sys.executable + ' ' + ' '.join(sys.argv)
222+
def config_instance(self, raw_cmd):
223+
"""Configure the multi-instance commands and trigger benchmark with sub process.
224+
225+
Args:
226+
raw_cmd: raw command used for benchmark
227+
"""
217228
multi_instance_cmd = ''
218229
num_of_instance = int(os.environ.get('NUM_OF_INSTANCE'))
219230
cores_per_instance = int(os.environ.get('CORES_PER_INSTANCE'))

0 commit comments

Comments
 (0)