Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 18 additions & 19 deletions benchmark/scripts/Benchmark_Driver
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,11 @@ DRIVER_DIR = os.path.dirname(os.path.realpath(__file__))
def parse_results(res, optset):
# Parse lines like this
# #,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),PEAK_MEMORY(B)
SCORERE = re.compile(r"(\d+),[ \t]*(\w+)," + \
",".join([r"[ \t]*([\d.]+)"]*7))
SCORERE = re.compile(r"(\d+),[ \t]*(\w+)," +
",".join([r"[ \t]*([\d.]+)"] * 7))
# The Totals line would be parsed like this.
TOTALRE = re.compile(r"()(Totals)," + \
",".join([r"[ \t]*([\d.]+)"]*7))
TOTALRE = re.compile(r"()(Totals)," +
",".join([r"[ \t]*([\d.]+)"] * 7))
KEYGROUP = 2
VALGROUP = 4
MEMGROUP = 9
Expand All @@ -51,14 +51,14 @@ def parse_results(res, optset):
test = {}
test['Data'] = [testresult]
test['Info'] = {}
test['Name'] = "nts.swift/"+optset+"."+testname+".exec"
test['Name'] = "nts.swift/" + optset + "." + testname + ".exec"
tests.append(test)
if testname != 'Totals':
mem_testresult = int(m.group(MEMGROUP))
mem_test = {}
mem_test['Data'] = [mem_testresult]
mem_test['Info'] = {}
mem_test['Name'] = "nts.swift/mem_maxrss."+optset+"."+testname+".mem"
mem_test['Name'] = "nts.swift/mem_maxrss." + optset + "." + testname + ".mem"
tests.append(mem_test)
return tests

Expand All @@ -85,7 +85,7 @@ def instrument_test(driver_path, test, num_samples):
)
peak_memory = re.match('\s*(\d+)\s*maximum resident set size',
test_output_raw.split('\n')[-15]).group(1)
test_outputs.append(test_output_raw.split()[1].split(',') + \
test_outputs.append(test_output_raw.split()[1].split(',') +
[peak_memory])

# Average sample results
Expand All @@ -102,7 +102,7 @@ def instrument_test(driver_path, test, num_samples):
for i in range(AVG_START_INDEX, len(test_output)):
avg_test_output[i] += int(test_output[i])
for i in range(AVG_START_INDEX, len(avg_test_output)):
avg_test_output[i] = int(round(avg_test_output[i] / \
avg_test_output[i] = int(round(avg_test_output[i] /
float(len(test_outputs))))
avg_test_output[NUM_SAMPLES_INDEX] = num_samples
avg_test_output[MIN_INDEX] = min(test_outputs,
Expand Down Expand Up @@ -152,8 +152,8 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
only run tests included in it."""
(total_tests, total_min, total_max, total_mean) = (0, 0, 0, 0)
output = []
headings = ['#', 'TEST','SAMPLES','MIN(μs)','MAX(μs)','MEAN(μs)','SD(μs)',
'MEDIAN(μs)','MAX_RSS(B)']
headings = ['#', 'TEST', 'SAMPLES', 'MIN(μs)', 'MAX(μs)', 'MEAN(μs)',
'SD(μs)', 'MEDIAN(μs)', 'MAX_RSS(B)']
line_format = '{:>3} {:<25} {:>7} {:>7} {:>7} {:>8} {:>6} {:>10} {:>10}'
if verbose and log_directory:
print line_format.format(*headings)
Expand Down Expand Up @@ -182,7 +182,7 @@ def run_benchmarks(driver, benchmarks=[], num_samples=10, verbose=False,
totals_output = '\n\n' + ','.join(totals)
if verbose:
if log_directory:
print line_format.format(*(['']+totals))
print line_format.format(*([''] + totals))
else:
print totals_output[1:]
formatted_output += totals_output
Expand All @@ -204,7 +204,7 @@ def submit(args):
print "\nRunning benchmarks..."
for optset in args.optimization:
print "Opt level:\t", optset
file = os.path.join(args.tests, "Benchmark_"+optset)
file = os.path.join(args.tests, "Benchmark_" + optset)
try:
res = run_benchmarks(file, benchmarks=args.benchmark,
num_samples=args.iterations)
Expand All @@ -227,7 +227,7 @@ def submit(args):

def run(args):
optset = args.optimization
file = os.path.join(args.tests, "Benchmark_"+optset)
file = os.path.join(args.tests, "Benchmark_" + optset)
run_benchmarks(file, benchmarks=args.benchmarks,
num_samples=args.iterations, verbose=True,
log_directory=args.output_dir,
Expand Down Expand Up @@ -260,10 +260,9 @@ def compare(args):
recent_logs = {}
for branch_dir in [current_branch_dir, master_branch_dir]:
for opt in ['O', 'Onone']:
recent_logs[os.path.basename(branch_dir) + '_' + opt] = \
sorted(glob.glob(os.path.join(branch_dir,
'Benchmark_' + opt + '-*.log')), key=os.path.getctime,
reverse=True)
recent_logs[os.path.basename(branch_dir) + '_' + opt] = sorted(
glob.glob(os.path.join(branch_dir, 'Benchmark_' + opt + '-*.log')),
key=os.path.getctime, reverse=True)

if current_branch == 'master':
if len(recent_logs['master_O']) > 1 and \
Expand Down Expand Up @@ -323,7 +322,7 @@ def main():
submit_parser = subparsers.add_parser('submit',
help='run benchmarks and submit results to LNT')
submit_parser.add_argument('-t', '--tests',
help='directory containing Benchmark_O{,none,unchecked} ' + \
help='directory containing Benchmark_O{,none,unchecked} ' +
'(default: DRIVER_DIR)',
default=DRIVER_DIR)
submit_parser.add_argument('-m', '--machine', required=True,
Expand All @@ -345,7 +344,7 @@ def main():
run_parser = subparsers.add_parser('run',
help='run benchmarks and output results to stdout')
run_parser.add_argument('-t', '--tests',
help='directory containing Benchmark_O{,none,unchecked} ' + \
help='directory containing Benchmark_O{,none,unchecked} ' +
'(default: DRIVER_DIR)',
default=DRIVER_DIR)
run_parser.add_argument('-i', '--iterations',
Expand Down
6 changes: 2 additions & 4 deletions benchmark/scripts/Benchmark_GuardMalloc.in
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,8 @@ class GuardMallocBenchmarkDriver(perf_test_driver.BenchmarkDriver):
print "Running {}...".format(test_name)
sys.stdout.flush()
status = subprocess.call([data['path'], data['test_name'], '--num-iters=2'],
env=data['env'],
stderr=open('/dev/null', 'w'),
stdout=open('/dev/null', 'w'))
env=data['env'], stderr=open('/dev/null', 'w'),
stdout=open('/dev/null', 'w'))
return GuardMallocResult(test_name, status)

SWIFT_BIN_DIR = os.path.dirname(os.path.abspath(__file__))
Expand All @@ -57,4 +56,3 @@ if __name__ == "__main__":
sys.exit(0)
else:
sys.exit(-1)

5 changes: 2 additions & 3 deletions benchmark/scripts/Benchmark_RuntimeLeaksRunner.in
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# ===----------------------------------------------------------------------===//

import os
import re
import sys
import json
import subprocess
Expand Down Expand Up @@ -64,6 +63,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
def __init__(self, binary, xfail_list):
perf_test_driver.BenchmarkDriver.__init__(self, binary, xfail_list,
enable_parallel=True)

def prepare_input(self, name):
return {}

Expand All @@ -75,8 +75,7 @@ class LeaksRunnerBenchmarkDriver(perf_test_driver.BenchmarkDriver):
p = subprocess.Popen([data['path'], "--run-all", "--num-samples=2",
"--num-iters={}".format(2), data['test_name']],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
status = p.wait()
output = p.stdout.readlines()
p.wait()
error_out = p.stderr.readlines()
except:
print("Child Process Failed! (%s,%s)" % (data['path'], data['test_name']))
Expand Down
60 changes: 34 additions & 26 deletions benchmark/scripts/compare_perf_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,27 +19,26 @@
# compare_perf_tests.py tot.O.times mypatch.O.times | sort -t, -n -k 6 | column -s, -t

import sys
import os
import re

VERBOSE=0
VERBOSE = 0

# #,TEST,SAMPLES,MIN(ms),MAX(ms),MEAN(ms),SD(ms),MEDIAN(ms)
SCORERE=re.compile(r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
TOTALRE=re.compile(r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+)")
KEYGROUP=2
VALGROUP=4
NUMGROUP=1
SCORERE = re.compile(r"(\d+),[ \t]*(\w+),[ \t]*([\d.]+),[ \t]*([\d.]+)")
TOTALRE = re.compile(r"()(Totals),[ \t]*([\d.]+),[ \t]*([\d.]+)")
KEYGROUP = 2
VALGROUP = 4
NUMGROUP = 1

IsTime=1
ShowSpeedup=1
PrintAllScores=0
IsTime = 1
ShowSpeedup = 1
PrintAllScores = 0

def parseInt(word):
try:
return int(word)
except:
raise ScoreParserException("Expected integer value, not "+word)
raise Exception("Expected integer value, not " + word)

def getScores(fname):
scores = {}
Expand All @@ -48,7 +47,8 @@ def getScores(fname):
f = open(fname)
try:
for line in f:
if VERBOSE: print "Parsing", line,
if VERBOSE:
print "Parsing", line,
m = SCORERE.match(line)
is_total = False
if not m:
Expand All @@ -57,7 +57,8 @@ def getScores(fname):
if not m:
continue

if VERBOSE: print " match", m.group(KEYGROUP), m.group(VALGROUP)
if VERBOSE:
print " match", m.group(KEYGROUP), m.group(VALGROUP)

if not m.group(KEYGROUP) in scores:
scores[m.group(KEYGROUP)] = []
Expand Down Expand Up @@ -90,31 +91,34 @@ def compareScores(key, score1, score2, runs, num):
bestscore1 = score
if isMaxScore(newscore=score, maxscore=worstscore1, invert=minworst):
worstscore1 = score
if PrintAllScores: print ("%d" % score).rjust(16),
if PrintAllScores:
print ("%d" % score).rjust(16),
for score in score2:
if isMaxScore(newscore=score, maxscore=bestscore2, invert=minbest):
bestscore2 = score
if isMaxScore(newscore=score, maxscore=worstscore2, invert=minworst):
worstscore2 = score
if PrintAllScores: print ("%d" % score).rjust(16),
if PrintAllScores:
print ("%d" % score).rjust(16),
r += 1
while r < runs:
if PrintAllScores: print ("0").rjust(9),
if PrintAllScores:
print ("0").rjust(9),
r += 1

if not PrintAllScores:
print ("%d" % bestscore1).rjust(16),
print ("%d" % bestscore2).rjust(16),

print ("%+d" % (bestscore2-bestscore1)).rjust(9),
print ("%+d" % (bestscore2 - bestscore1)).rjust(9),

if bestscore1 != 0 and bestscore2 != 0:
print ("%+.1f%%"%(((float(bestscore2)/bestscore1)-1)*100)).rjust(9),
print ("%+.1f%%" % (((float(bestscore2) / bestscore1) - 1) * 100)).rjust(9),
if ShowSpeedup:
Num, Den = float(bestscore2), float(bestscore1)
if IsTime:
Num, Den = Den, Num
print ("%.2fx"%(Num/Den)).rjust(9),
print ("%.2fx" % (Num / Den)).rjust(9),
else:
print "*".rjust(9),
if ShowSpeedup:
Expand Down Expand Up @@ -165,32 +169,36 @@ def usage():
if runs2 > runs:
runs = runs2

if VERBOSE: print scores1; print scores2
if VERBOSE:
print scores1
print scores2

keys = [f for f in set(scores1.keys() + scores2.keys())]
keys.sort()
if VERBOSE:
print "comparing ", file1, "vs", file2, "=",
if IsTime: print file1, "/", file2
else: print file2, "/", file1
if IsTime:
print file1, "/", file2
else:
print file2, "/", file1

print "#".rjust(3),
print "TEST".ljust(25),
if PrintAllScores:
for i in range(0,runs):
for i in range(0, runs):
print ("OLD_RUN%d" % i).rjust(9),
for i in range(0,runs):
for i in range(0, runs):
print ("NEW_RUN%d" % i).rjust(9),
else:
print "BEST_OLD_MIN(μs)".rjust(17),
print "BEST_NEW_MIN(μs)".rjust(17),
print 'DELTA'.rjust(9), '%DELTA'.rjust(9), 'SPEEDUP'.rjust(9)

for key in keys:
if not key in scores1:
if key not in scores1:
print key, "not in", file1
continue
if not key in scores2:
if key not in scores2:
print key, "not in", file2
continue
compareScores(key, scores1[key], scores2[key], runs, nums[key])
12 changes: 6 additions & 6 deletions benchmark/scripts/generate_harness/generate_harness.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

template_loader = jinja2.FileSystemLoader(searchpath="/")
template_env = jinja2.Environment(loader=template_loader, trim_blocks=True,
lstrip_blocks=True)
lstrip_blocks=True)

if __name__ == '__main__':
# CMakeList single-source
Expand All @@ -44,7 +44,7 @@ class multi_source_bench(object):
def __init__(self, path):
self.name = os.path.basename(path)
self.files = [x for x in os.listdir(path)
if x.endswith('.swift')]
if x.endswith('.swift')]
if os.path.isdir(multi_source_dir):
multisource_benches = [
multi_source_bench(os.path.join(multi_source_dir, x))
Expand All @@ -62,6 +62,7 @@ def get_run_funcs(filepath):
content = open(filepath).read()
matches = re.findall(r'func run_(.*?)\(', content)
return filter(lambda x: x not in ignored_run_funcs, matches)

def find_run_funcs(dirs):
ret_run_funcs = []
for d in dirs:
Expand All @@ -71,9 +72,9 @@ def find_run_funcs(dirs):
ret_run_funcs.extend(run_funcs)
return ret_run_funcs
run_funcs = sorted(
[(x, x)
for x in find_run_funcs([single_source_dir, multi_source_dir])],
key=lambda x: x[0]
[(x, x)
for x in find_run_funcs([single_source_dir, multi_source_dir])],
key=lambda x: x[0]
)

# Replace originals with files generated from templates
Expand All @@ -87,4 +88,3 @@ def find_run_funcs(dirs):
imports=imports,
run_funcs=run_funcs)
)

4 changes: 1 addition & 3 deletions benchmark/scripts/perf_test_driver/perf_test_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# ===----------------------------------------------------------------------===//

import os
import sys
import subprocess
import multiprocessing
import re
Expand Down Expand Up @@ -85,7 +84,7 @@ def prepare_input_wrapper(name):
results = None
if self.enable_parallel:
p = multiprocessing.Pool()
z = zip([self]*len(prepared_input), prepared_input)
z = zip([self] * len(prepared_input), prepared_input)
results = p.map(_unwrap_self, z)
else:
results = map(self.process_input, prepared_input)
Expand All @@ -112,4 +111,3 @@ def run(self):
has_failure = reduce(max, [d['has_failure']for d in self.data])
self.print_data(self.data, max_test_len)
return not has_failure

Loading