2323# OF THE POSSIBILITY OF SUCH DAMAGE.
2424from __future__ import print_function
2525
26+ import argparse
2627import os
2728import re
2829from abc import ABCMeta , abstractproperty , abstractmethod
2930from os .path import join
3031
3132import mx
32- from mx_benchmark import StdOutRule , VmRegistry , java_vm_registry , Vm , GuestVm , VmBenchmarkSuite
33+ from mx_benchmark import StdOutRule , VmRegistry , java_vm_registry , Vm , GuestVm , VmBenchmarkSuite , AveragingBenchmarkMixin
3334from mx_graalpython_bench_param import benchmarks_list , harnessPath
3435
3536# ----------------------------------------------------------------------------------------------------------------------
5354PYTHON_VM_REGISTRY_NAME = "Python"
5455CONFIGURATION_DEFAULT = "default"
5556
57+ DEFAULT_ITERATIONS = 10
58+
5659
5760# ----------------------------------------------------------------------------------------------------------------------
5861#
@@ -174,7 +177,7 @@ def config_name(self):
174177# the benchmark definition
175178#
176179# ----------------------------------------------------------------------------------------------------------------------
177- class PythonBenchmarkSuite (VmBenchmarkSuite ):
180+ class PythonBenchmarkSuite (VmBenchmarkSuite , AveragingBenchmarkMixin ):
178181 def __init__ (self , name , harness_path ):
179182 self ._name = name
180183 self ._harness_path = harness_path
@@ -193,7 +196,7 @@ def rules(self, output, benchmarks, bm_suite_args):
193196 r"^### iteration=(?P<iteration>[0-9]+), name=(?P<benchmark>[a-zA-Z0-9.\-]+), duration=(?P<time>[0-9]+(\.[0-9]+)?$)" , # pylint: disable=line-too-long
194197 {
195198 "benchmark" : '{}.{}' .format (self ._name , bench_name ),
196- "metric.name" : "time " ,
199+ "metric.name" : "warmup " ,
197200 "metric.iteration" : ("<iteration>" , int ),
198201 "metric.type" : "numeric" ,
199202 "metric.value" : ("<time>" , float ),
@@ -205,6 +208,24 @@ def rules(self, output, benchmarks, bm_suite_args):
205208 ),
206209 ]
207210
211+ def run (self , benchmarks , bmSuiteArgs ):
212+ results = super (PythonBenchmarkSuite , self ).run (benchmarks , bmSuiteArgs )
213+ self .addAverageAcrossLatestResults (results )
214+ return results
215+
216+ def postprocessRunArgs (self , run_args ):
217+ parser = argparse .ArgumentParser (add_help = False )
218+ parser .add_argument ("-i" , default = None )
219+ args , remaining = parser .parse_known_args (run_args )
220+ if args .i :
221+ if args .i .isdigit ():
222+ return ["-i" , args .i ] + remaining
223+ if args .i == "-1" :
224+ return remaining
225+ else :
226+ iterations = DEFAULT_ITERATIONS + self .getExtraIterationCount (DEFAULT_ITERATIONS )
227+ return ["-i" , str (iterations )] + remaining
228+
208229 def createVmCommandLineArgs (self , benchmarks , run_args ):
209230 if not benchmarks or len (benchmarks ) != 1 :
210231 mx .abort ("Please run a specific benchmark (mx benchmark {}:<benchmark-name>) or all the benchmarks "
@@ -214,10 +235,9 @@ def createVmCommandLineArgs(self, benchmarks, run_args):
214235
215236 cmd_args = [self ._harness_path , join (self ._bench_path , "{}.py" .format (benchmark ))]
216237 if len (run_args ) == 0 :
217- cmd_args .extend (self ._benchmarks [benchmark ])
218- else :
219- cmd_args .extend (run_args )
220-
238+ run_args = self ._benchmarks [benchmark ]
239+ run_args = self .postprocessRunArgs (run_args )
240+ cmd_args .extend (run_args )
221241 return cmd_args
222242
223243 def benchmarkList (self , bm_suite_args ):
0 commit comments