@@ -34,6 +34,7 @@ import re
34
34
import subprocess
35
35
import sys
36
36
import time
37
+ from functools import reduce
37
38
38
39
from compare_perf_tests import LogParser
39
40
@@ -65,7 +66,7 @@ class BenchmarkDriver(object):
65
66
66
67
def _invoke (self , cmd ):
67
68
return self ._subprocess .check_output (
68
- cmd , stderr = self ._subprocess .STDOUT )
69
+ cmd , stderr = self ._subprocess .STDOUT , universal_newlines = True )
69
70
70
71
@property
71
72
def test_harness (self ):
@@ -144,7 +145,7 @@ class BenchmarkDriver(object):
144
145
verbose , measure_memory , quantile , gather_metadata )
145
146
output = self ._invoke (cmd )
146
147
results = self .parser .results_from_string (output )
147
- return results .items ()[0 ][1 ] if test else results
148
+ return list ( results .items () )[0 ][1 ] if test else results
148
149
149
150
def _cmd_run (self , test , num_samples , num_iters , sample_time , min_samples ,
150
151
verbose , measure_memory , quantile , gather_metadata ):
@@ -219,9 +220,9 @@ class BenchmarkDriver(object):
219
220
print (format (values ))
220
221
221
222
def result_values (r ):
222
- return map ( str , [ r . test_num , r . name , r . num_samples , r . min ,
223
- r . samples . q1 , r .median , r .samples . q3 , r .max ,
224
- r .max_rss ])
223
+ return [ str ( value ) for value in
224
+ [ r . test_num , r .name , r .num_samples , r .min ,
225
+ r . samples . q1 , r . median , r . samples . q3 , r . max , r .max_rss ]]
225
226
226
227
header = ['#' , 'TEST' , 'SAMPLES' , 'MIN(μs)' , 'Q1(μs)' , 'MEDIAN(μs)' ,
227
228
'Q3(μs)' , 'MAX(μs)' , 'MAX_RSS(B)' ]
@@ -303,7 +304,11 @@ class MarkdownReportHandler(logging.StreamHandler):
303
304
msg = self .format (record )
304
305
stream = self .stream
305
306
try :
306
- if (isinstance (msg , unicode ) and
307
+ unicode_type = unicode # Python 2
308
+ except NameError :
309
+ unicode_type = str # Python 3
310
+ try :
311
+ if (isinstance (msg , unicode_type ) and
307
312
getattr (stream , 'encoding' , None )):
308
313
stream .write (msg .encode (stream .encoding ))
309
314
else :
@@ -415,10 +420,10 @@ class BenchmarkDoctor(object):
415
420
setup , ratio = BenchmarkDoctor ._setup_overhead (measurements )
416
421
setup = 0 if ratio < 0.05 else setup
417
422
runtime = min (
418
- [(result .samples .min - correction ) for i_series in
419
- [BenchmarkDoctor ._select (measurements , num_iters = i )
420
- for correction in [(setup / i ) for i in [1 , 2 ]]
421
- ] for result in i_series ])
423
+ [(result .samples .min - correction ) for correction , i_series in
424
+ [( correction , BenchmarkDoctor ._select (measurements , num_iters = i ) )
425
+ for i , correction in [(i , setup // i ) for i in [1 , 2 ] ]]
426
+ for result in i_series ])
422
427
423
428
threshold = 1000
424
429
if threshold < runtime :
@@ -473,7 +478,7 @@ class BenchmarkDoctor(object):
473
478
474
479
@staticmethod
475
480
def _reasonable_setup_time (measurements ):
476
- setup = min ([result .setup
481
+ setup = min ([result .setup or 0
477
482
for result in BenchmarkDoctor ._select (measurements )])
478
483
if 200000 < setup : # 200 ms
479
484
BenchmarkDoctor .log_runtime .error (
@@ -537,7 +542,7 @@ class BenchmarkDoctor(object):
537
542
538
543
def capped (s ):
539
544
return min (s , 200 )
540
- run_args = [(capped (num_samples ), 1 ), (capped (num_samples / 2 ), 2 )]
545
+ run_args = [(capped (num_samples ), 1 ), (capped (num_samples // 2 ), 2 )]
541
546
opts = self .driver .args .optimization
542
547
opts = opts if isinstance (opts , list ) else [opts ]
543
548
self .log .debug (
@@ -691,6 +696,7 @@ def parse_args(args):
691
696
subparsers = parser .add_subparsers (
692
697
title = 'Swift benchmark driver commands' ,
693
698
help = 'See COMMAND -h for additional arguments' , metavar = 'COMMAND' )
699
+ subparsers .required = True
694
700
695
701
shared_benchmarks_parser = argparse .ArgumentParser (add_help = False )
696
702
benchmarks_group = shared_benchmarks_parser .add_mutually_exclusive_group ()
0 commit comments