Skip to content

Commit 7d24f6e

Browse files
author
stelliom
committed
Modified runtime plots to performance plots
1 parent 73d0c6c commit 7d24f6e

File tree

1 file changed

+48
-5
lines changed

1 file changed

+48
-5
lines changed

scripts/plot-benchmarks.py

+48-5
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,44 @@ def collect_all_timings(path, prefix, labels, substeps):
8181
data[label][b][substep]["mean"] = float(vals[0])
8282
data[label][b][substep]["median"] = float(vals[1])
8383
data[label][b][substep]["stdev"] = float(vals[2])
84+
85+
return data
86+
87+
88+
def collect_all_flops(path, prefix, labels, substeps):
89+
"""
90+
Collect all flops data.
91+
Example of access to return dictionary: data['v1.1']['benchmark-1-0']['FLIP']['mean']
92+
"""
93+
data = {}
94+
95+
for label in labels:
96+
data[label] = {}
97+
batch_path = os.path.join(path, f"{prefix}{label}")
98+
benchmark_names = get_benchmark_names()
99+
100+
for b in benchmark_names:
101+
filename = os.path.join(batch_path, b, "cost_analysis.txt")
102+
print(f"Reading {filename}...")
103+
104+
filename = os.path.abspath(filename)
105+
if not os.path.isfile(filename):
106+
exit('Error: could not open file.')
107+
108+
with open(filename) as f:
109+
lines = f.read().splitlines()
110+
111+
data[label][b] = {}
112+
for line in lines:
113+
for substep in substeps:
114+
if line.find(substep) > -1:
115+
data[label][b][substep] = {}
116+
vals = line.split()
117+
data[label][b][substep]['adds'] = float(vals[1])
118+
data[label][b][substep]['muls'] = float(vals[2])
119+
data[label][b][substep]['divs'] = float(vals[3])
120+
data[label][b][substep]['read'] = float(vals[4])
121+
84122
return data
85123

86124

@@ -119,7 +157,7 @@ def plot_histogram(benchmark, tags, substeps, all_data, output, show=False, titl
119157
plt.close(fig)
120158

121159

122-
def plot_perf(tag, substeps, all_data, output, show=False, title=None):
160+
def plot_perf(tag, substeps, all_data, all_flops, output, show=False, title=None):
123161
if title is None:
124162
title = f"Runtime plot for optimization stage '{tag}'"
125163

@@ -130,7 +168,9 @@ def plot_perf(tag, substeps, all_data, output, show=False, title=None):
130168
for substep in substeps:
131169
tmp = []
132170
for benchmark in benchmarks:
133-
tmp.append(all_data[tag][benchmark][substep]['mean'])
171+
if substep != "compute_mesh": tmp.append((all_flops[tag][benchmark][substep]['adds'] + all_flops[tag][benchmark][substep]['muls']) / all_data[tag][benchmark][substep]['mean'])
172+
else: tmp.append(0.)
173+
# tmp.append(all_data[tag][benchmark][substep]['mean'])
134174
my_data[substep] = np.array(tmp)
135175

136176
fig, ax = plt.subplots()
@@ -140,7 +180,7 @@ def plot_perf(tag, substeps, all_data, output, show=False, title=None):
140180
ax.set_prop_cycle(my_cycler)
141181

142182

143-
ax.set_ylabel("Average runtime [cycles]")
183+
ax.set_ylabel("Average performance [cycles]")
144184
ax.set_xlabel("Number of cells [-]")
145185
for substep in substeps:
146186
ax.plot(problem_dimensions, my_data[substep], label=substep)
@@ -196,7 +236,10 @@ def plot_perf(tag, substeps, all_data, output, show=False, title=None):
196236
if d.find(prefix) == 0:
197237
all_tags.append(d.replace(prefix, "", 1))
198238

199-
all_data = collect_all_timings(path, prefix, all_tags, substeps)
239+
print(all_tags, args.tag)
240+
241+
all_data = collect_all_timings(path, prefix, all_tags, substeps)
242+
all_flops = collect_all_flops(path, prefix, all_tags, substeps)
200243

201244
benchmarks_hist_plots = None
202245
if args.all:
@@ -216,4 +259,4 @@ def plot_perf(tag, substeps, all_data, output, show=False, title=None):
216259

217260
if benchmark_perf_plots is not None:
218261
for tag in benchmark_perf_plots:
219-
plot_perf(tag, substeps, all_data, output, show)
262+
plot_perf(tag, substeps, all_data, all_flops, output, show)

0 commit comments

Comments
 (0)