@@ -30,11 +30,11 @@ DRIVER_DIR = os.path.dirname(os.path.realpath(__file__))
3030def parse_results (res , optset ):
3131 # Parse lines like this
3232 # #,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),PEAK_MEMORY(B)
33- SCORERE = re .compile (r"(\d+),[ \t]*(\w+)," + \
34- "," .join ([r"[ \t]*([\d.]+)" ]* 7 ))
33+ SCORERE = re .compile (r"(\d+),[ \t]*(\w+)," +
34+ "," .join ([r"[ \t]*([\d.]+)" ] * 7 ))
3535 # The Totals line would be parsed like this.
36- TOTALRE = re .compile (r"()(Totals)," + \
37- "," .join ([r"[ \t]*([\d.]+)" ]* 7 ))
36+ TOTALRE = re .compile (r"()(Totals)," +
37+ "," .join ([r"[ \t]*([\d.]+)" ] * 7 ))
3838 KEYGROUP = 2
3939 VALGROUP = 4
4040 MEMGROUP = 9
@@ -51,14 +51,14 @@ def parse_results(res, optset):
5151 test = {}
5252 test ['Data' ] = [testresult ]
5353 test ['Info' ] = {}
54- test ['Name' ] = "nts.swift/" + optset + "." + testname + ".exec"
54+ test ['Name' ] = "nts.swift/" + optset + "." + testname + ".exec"
5555 tests .append (test )
5656 if testname != 'Totals' :
5757 mem_testresult = int (m .group (MEMGROUP ))
5858 mem_test = {}
5959 mem_test ['Data' ] = [mem_testresult ]
6060 mem_test ['Info' ] = {}
61- mem_test ['Name' ] = "nts.swift/mem_maxrss." + optset + "." + testname + ".mem"
61+ mem_test ['Name' ] = "nts.swift/mem_maxrss." + optset + "." + testname + ".mem"
6262 tests .append (mem_test )
6363 return tests
6464
@@ -85,7 +85,7 @@ def instrument_test(driver_path, test, num_samples):
8585 )
8686 peak_memory = re .match ('\s*(\d+)\s*maximum resident set size' ,
8787 test_output_raw .split ('\n ' )[- 15 ]).group (1 )
88- test_outputs .append (test_output_raw .split ()[1 ].split (',' ) + \
88+ test_outputs .append (test_output_raw .split ()[1 ].split (',' ) +
8989 [peak_memory ])
9090
9191 # Average sample results
@@ -102,7 +102,7 @@ def instrument_test(driver_path, test, num_samples):
102102 for i in range (AVG_START_INDEX , len (test_output )):
103103 avg_test_output [i ] += int (test_output [i ])
104104 for i in range (AVG_START_INDEX , len (avg_test_output )):
105- avg_test_output [i ] = int (round (avg_test_output [i ] / \
105+ avg_test_output [i ] = int (round (avg_test_output [i ] /
106106 float (len (test_outputs ))))
107107 avg_test_output [NUM_SAMPLES_INDEX ] = num_samples
108108 avg_test_output [MIN_INDEX ] = min (test_outputs ,
@@ -152,8 +152,8 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
152152 only run tests included in it."""
153153 (total_tests , total_min , total_max , total_mean ) = (0 , 0 , 0 , 0 )
154154 output = []
155- headings = ['#' , 'TEST' ,'SAMPLES' ,'MIN(μs)' ,'MAX(μs)' ,'MEAN(μs)' , 'SD (μs)' ,
156- 'MEDIAN(μs)' ,'MAX_RSS(B)' ]
155+ headings = ['#' , 'TEST' , 'SAMPLES' , 'MIN(μs)' , 'MAX(μs)' , 'MEAN(μs)' ,
156+ 'SD(μs)' , ' MEDIAN(μs)' , 'MAX_RSS(B)' ]
157157 line_format = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
158158 if verbose and log_directory :
159159 print line_format .format (* headings )
@@ -182,7 +182,7 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
182182 totals_output = '\n \n ' + ',' .join (totals )
183183 if verbose :
184184 if log_directory :
185- print line_format .format (* (['' ]+ totals ))
185+ print line_format .format (* (['' ] + totals ))
186186 else :
187187 print totals_output [1 :]
188188 formatted_output += totals_output
@@ -204,7 +204,7 @@ def submit(args):
204204 print "\n Running benchmarks..."
205205 for optset in args .optimization :
206206 print "Opt level:\t " , optset
207- file = os .path .join (args .tests , "Benchmark_" + optset )
207+ file = os .path .join (args .tests , "Benchmark_" + optset )
208208 try :
209209 res = run_benchmarks (file , benchmarks = args .benchmark ,
210210 num_samples = args .iterations )
@@ -227,7 +227,7 @@ def submit(args):
227227
228228def run (args ):
229229 optset = args .optimization
230- file = os .path .join (args .tests , "Benchmark_" + optset )
230+ file = os .path .join (args .tests , "Benchmark_" + optset )
231231 run_benchmarks (file , benchmarks = args .benchmarks ,
232232 num_samples = args .iterations , verbose = True ,
233233 log_directory = args .output_dir ,
@@ -260,10 +260,9 @@ def compare(args):
260260 recent_logs = {}
261261 for branch_dir in [current_branch_dir , master_branch_dir ]:
262262 for opt in ['O' , 'Onone' ]:
263- recent_logs [os .path .basename (branch_dir ) + '_' + opt ] = \
264- sorted (glob .glob (os .path .join (branch_dir ,
265- 'Benchmark_' + opt + '-*.log' )), key = os .path .getctime ,
266- reverse = True )
263+ recent_logs [os .path .basename (branch_dir ) + '_' + opt ] = sorted (
264+ glob .glob (os .path .join (branch_dir , 'Benchmark_' + opt + '-*.log' )),
265+ key = os .path .getctime , reverse = True )
267266
268267 if current_branch == 'master' :
269268 if len (recent_logs ['master_O' ]) > 1 and \
@@ -323,7 +322,7 @@ def main():
323322 submit_parser = subparsers .add_parser ('submit' ,
324323 help = 'run benchmarks and submit results to LNT' )
325324 submit_parser .add_argument ('-t' , '--tests' ,
326- help = 'directory containing Benchmark_O{,none,unchecked} ' + \
325+ help = 'directory containing Benchmark_O{,none,unchecked} ' +
327326 '(default: DRIVER_DIR)' ,
328327 default = DRIVER_DIR )
329328 submit_parser .add_argument ('-m' , '--machine' , required = True ,
@@ -345,7 +344,7 @@ def main():
345344 run_parser = subparsers .add_parser ('run' ,
346345 help = 'run benchmarks and output results to stdout' )
347346 run_parser .add_argument ('-t' , '--tests' ,
348- help = 'directory containing Benchmark_O{,none,unchecked} ' + \
347+ help = 'directory containing Benchmark_O{,none,unchecked} ' +
349348 '(default: DRIVER_DIR)' ,
350349 default = DRIVER_DIR )
351350 run_parser .add_argument ('-i' , '--iterations' ,
0 commit comments