X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_plots.py;h=7298babe537f0b43a6e4fcabd5f9bbc03e3d6d20;hp=3b9bf1c6bea08af0be8c3d1ae300b3f89d31a2ad;hb=0c02e000ae58474120246c484cf5458a76510288;hpb=a6ed764aecf2983a759931cc8d4bef161045d062 diff --git a/resources/tools/presentation/generator_plots.py b/resources/tools/presentation/generator_plots.py index 3b9bf1c6be..7298babe53 100644 --- a/resources/tools/presentation/generator_plots.py +++ b/resources/tools/presentation/generator_plots.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 Cisco and/or its affiliates. +# Copyright (c) 2021 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -18,15 +18,16 @@ import re import logging -from collections import OrderedDict -from copy import deepcopy - import hdrh.histogram import hdrh.codec import pandas as pd import plotly.offline as ploff import plotly.graph_objs as plgo +from collections import OrderedDict +from copy import deepcopy +from math import log + from plotly.exceptions import PlotlyError from pal_utils import mean, stdev @@ -36,18 +37,18 @@ COLORS = ( u"#1A1110", u"#DA2647", u"#214FC6", - u"#45A27D", - u"#391285", - u"#C95A49", + u"#01786F", + u"#BD8260", u"#FFD12A", + u"#A6E7FF", u"#738276", - u"#BD8260", + u"#C95A49", u"#FC5A8D", u"#CEC8EF", - u"#A6E7FF", + u"#391285", u"#6F2DA8", u"#FF878D", - u"#01786F", + u"#45A27D", u"#FFD0B9", u"#FD5240", u"#DB91EF", @@ -60,6 +61,9 @@ COLORS = ( REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)-') +# This value depends on latency stream rate (9001 pps) and duration (5s). +PERCENTILE_MAX = 99.9995 + def generate_plots(spec, data): """Generate all plots specified in the specification file. @@ -76,7 +80,8 @@ def generate_plots(spec, data): u"plot_tsa_name": plot_tsa_name, u"plot_http_server_perf_box": plot_http_server_perf_box, u"plot_nf_heatmap": plot_nf_heatmap, - u"plot_hdrh_lat_by_percentile": plot_hdrh_lat_by_percentile + u"plot_hdrh_lat_by_percentile": plot_hdrh_lat_by_percentile, + u"plot_hdrh_lat_by_percentile_x_log": plot_hdrh_lat_by_percentile_x_log } logging.info(u"Generating the plots ...") @@ -171,29 +176,39 @@ def plot_hdrh_lat_by_percentile(plot, input_data): for color, graph in enumerate(graphs): for idx, direction in enumerate((u"direction1", u"direction2")): - xaxis = [0.0, ] - yaxis = [0.0, ] - hovertext = [ - f"{desc[graph]}
" - f"Direction: {(u'W-E', u'E-W')[idx % 2]}
" - f"Percentile: 0.0%
" - f"Latency: 0.0uSec" - ] - decoded = hdrh.histogram.HdrHistogram.decode( - test[u"latency"][graph][direction][u"hdrh"] - ) + previous_x = 0.0 + xaxis = list() + yaxis = list() + hovertext = list() + try: + decoded = hdrh.histogram.HdrHistogram.decode( + test[u"latency"][graph][direction][u"hdrh"] + ) + except hdrh.codec.HdrLengthException: + logging.warning( + f"No data for direction {(u'W-E', u'E-W')[idx % 2]}" + ) + continue + for item in decoded.get_recorded_iterator(): percentile = item.percentile_level_iterated_to - if percentile > 99.9: - continue + xaxis.append(previous_x) + yaxis.append(item.value_iterated_to) + hovertext.append( + f"{desc[graph]}
" + f"Direction: {(u'W-E', u'E-W')[idx % 2]}
" + f"Percentile: {previous_x:.5f}-{percentile:.5f}%
" + f"Latency: {item.value_iterated_to}uSec" + ) xaxis.append(percentile) yaxis.append(item.value_iterated_to) hovertext.append( f"{desc[graph]}
" f"Direction: {(u'W-E', u'E-W')[idx % 2]}
" - f"Percentile: {percentile:.5f}%
" + f"Percentile: {previous_x:.5f}-{percentile:.5f}%
" f"Latency: {item.value_iterated_to}uSec" ) + previous_x = percentile fig.add_trace( plgo.Scatter( x=xaxis, @@ -204,7 +219,178 @@ def plot_hdrh_lat_by_percentile(plot, input_data): showlegend=bool(idx), line=dict( color=COLORS[color], - dash=u"solid" if idx % 2 else u"dash" + dash=u"solid", + width=1 if idx % 2 else 2 + ), + hovertext=hovertext, + hoverinfo=u"text" + ) + ) + + layout[u"title"][u"text"] = f"Latency: {name}" + fig.update_layout(layout) + + # Create plot + file_name = f"{plot[u'output-file']}-{name_link}.html" + logging.info(f" Writing file {file_name}") + + try: + # Export Plot + ploff.plot(fig, show_link=False, auto_open=False, + filename=file_name) + # Add link to the file: + if file_links and target_links: + with open(file_links, u"a") as file_handler: + file_handler.write( + f"- `{name_link} " + f"<{target_links}/{file_name.split(u'/')[-1]}>`_\n" + ) + except FileNotFoundError as err: + logging.error( + f"Not possible to write the link to the file " + f"{file_links}\n{err}" + ) + except PlotlyError as err: + logging.error(f" Finished with error: {repr(err)}") + + except hdrh.codec.HdrLengthException as err: + logging.warning(repr(err)) + continue + + except (ValueError, KeyError) as err: + logging.warning(repr(err)) + continue + + +def plot_hdrh_lat_by_percentile_x_log(plot, input_data): + """Generate the plot(s) with algorithm: plot_hdrh_lat_by_percentile_x_log + specified in the specification file. + + :param plot: Plot to generate. + :param input_data: Data to process. + :type plot: pandas.Series + :type input_data: InputData + """ + + # Transform the data + logging.info( + f" Creating the data set for the {plot.get(u'type', u'')} " + f"{plot.get(u'title', u'')}." + ) + if plot.get(u"include", None): + data = input_data.filter_tests_by_name( + plot, + params=[u"name", u"latency", u"parent", u"tags", u"type"] + )[0][0] + elif plot.get(u"filter", None): + data = input_data.filter_data( + plot, + params=[u"name", u"latency", u"parent", u"tags", u"type"], + continue_on_error=True + )[0][0] + else: + job = list(plot[u"data"].keys())[0] + build = str(plot[u"data"][job][0]) + data = input_data.tests(job, build) + + if data is None or len(data) == 0: + logging.error(u"No data.") + return + + desc = { + u"LAT0": u"No-load.", + u"PDR10": u"Low-load, 10% PDR.", + u"PDR50": u"Mid-load, 50% PDR.", + u"PDR90": u"High-load, 90% PDR.", + u"PDR": u"Full-load, 100% PDR.", + u"NDR10": u"Low-load, 10% NDR.", + u"NDR50": u"Mid-load, 50% NDR.", + u"NDR90": u"High-load, 90% NDR.", + u"NDR": u"Full-load, 100% NDR." + } + + graphs = [ + u"LAT0", + u"PDR10", + u"PDR50", + u"PDR90" + ] + + file_links = plot.get(u"output-file-links", None) + target_links = plot.get(u"target-links", None) + + for test in data: + try: + if test[u"type"] not in (u"NDRPDR",): + logging.warning(f"Invalid test type: {test[u'type']}") + continue + name = re.sub(REGEX_NIC, u"", test[u"parent"]. + replace(u'-ndrpdr', u'').replace(u'2n1l-', u'')) + try: + nic = re.search(REGEX_NIC, test[u"parent"]).group(1) + except (IndexError, AttributeError, KeyError, ValueError): + nic = u"" + name_link = f"{nic}-{test[u'name']}".replace(u'-ndrpdr', u'') + + logging.info(f" Generating the graph: {name_link}") + + fig = plgo.Figure() + layout = deepcopy(plot[u"layout"]) + + for color, graph in enumerate(graphs): + for idx, direction in enumerate((u"direction1", u"direction2")): + previous_x = 0.0 + prev_perc = 0.0 + xaxis = list() + yaxis = list() + hovertext = list() + try: + decoded = hdrh.histogram.HdrHistogram.decode( + test[u"latency"][graph][direction][u"hdrh"] + ) + except hdrh.codec.HdrLengthException: + logging.warning( + f"No data for direction {(u'W-E', u'E-W')[idx % 2]}" + ) + continue + + for item in decoded.get_recorded_iterator(): + # The real value is "percentile". + # For 100%, we cut that down to "x_perc" to avoid + # infinity. + percentile = item.percentile_level_iterated_to + x_perc = min(percentile, PERCENTILE_MAX) + xaxis.append(previous_x) + yaxis.append(item.value_iterated_to) + hovertext.append( + f"{desc[graph]}
" + f"Direction: {(u'W-E', u'E-W')[idx % 2]}
" + f"Percentile: {prev_perc:.5f}-{percentile:.5f}%
" + f"Latency: {item.value_iterated_to}uSec" + ) + next_x = 100.0 / (100.0 - x_perc) + xaxis.append(next_x) + yaxis.append(item.value_iterated_to) + hovertext.append( + f"{desc[graph]}
" + f"Direction: {(u'W-E', u'E-W')[idx % 2]}
" + f"Percentile: {prev_perc:.5f}-{percentile:.5f}%
" + f"Latency: {item.value_iterated_to}uSec" + ) + previous_x = next_x + prev_perc = percentile + fig.add_trace( + plgo.Scatter( + x=xaxis, + y=yaxis, + name=desc[graph], + mode=u"lines", + legendgroup=desc[graph], + showlegend=not(bool(idx)), + line=dict( + color=COLORS[color], + dash=u"solid", + width=1 if idx % 2 else 2 ), hovertext=hovertext, hoverinfo=u"text" @@ -212,6 +398,7 @@ def plot_hdrh_lat_by_percentile(plot, input_data): ) layout[u"title"][u"text"] = f"Latency: {name}" + layout[u"xaxis"][u"range"] = [0, 5.302] fig.update_layout(layout) # Create plot @@ -299,19 +486,21 @@ def plot_nf_reconf_box_name(plot, input_data): df_y = pd.DataFrame(y_vals) df_y.head() for i, col in enumerate(df_y.columns): + tst_name = re.sub(REGEX_NIC, u"", - col.lower().replace(u'-ndrpdr', u''). - replace(u'2n1l-', u'')) + col.lower().replace(u'-reconf', u''). + replace(u'2n1l-', u'').replace(u'2n-', u''). + replace(u'-testpmd', u'')) traces.append(plgo.Box( x=[str(i + 1) + u'.'] * len(df_y[col]), - y=[y if y else None for y in df_y[col]], + y=df_y[col], name=( f"{i + 1}. " f"({nr_of_samples[i]:02d} " f"run{u's' if nr_of_samples[i] > 1 else u''}, " f"packets lost average: {mean(loss[col]):.1f}) " - f"{u'-'.join(tst_name.split(u'-')[3:-2])}" + f"{u'-'.join(tst_name.split(u'-')[2:])}" ), hoverinfo=u"y+name" )) @@ -319,7 +508,7 @@ def plot_nf_reconf_box_name(plot, input_data): # Create plot layout = deepcopy(plot[u"layout"]) layout[u"title"] = f"Time Lost: {layout[u'title']}" - layout[u"yaxis"][u"title"] = u"Implied Time Lost [s]" + layout[u"yaxis"][u"title"] = u"Effective Blocked Time [s]" layout[u"legend"][u"font"][u"size"] = 14 layout[u"yaxis"].pop(u"range") plpl = plgo.Figure(data=traces, layout=layout) @@ -356,52 +545,83 @@ def plot_perf_box_name(plot, input_data): f"{plot.get(u'title', u'')}." ) data = input_data.filter_tests_by_name( - plot, params=[u"throughput", u"result", u"parent", u"tags", u"type"]) + plot, + params=[u"throughput", u"gbps", u"result", u"parent", u"tags", u"type"]) if data is None: logging.error(u"No data.") return # Prepare the data for the plot + plot_title = plot.get(u"title", u"").lower() + + if u"-gbps" in plot_title: + value = u"gbps" + multiplier = 1e6 + else: + value = u"throughput" + multiplier = 1.0 y_vals = OrderedDict() test_type = u"" - for job in data: - for build in job: - for test in build: - if y_vals.get(test[u"parent"], None) is None: - y_vals[test[u"parent"]] = list() - try: - if (test[u"type"] in (u"NDRPDR", ) and - u"-pdr" in plot.get(u"title", u"").lower()): - y_vals[test[u"parent"]].\ - append(test[u"throughput"][u"PDR"][u"LOWER"]) - test_type = u"NDRPDR" - elif (test[u"type"] in (u"NDRPDR", ) and - u"-ndr" in plot.get(u"title", u"").lower()): - y_vals[test[u"parent"]]. \ - append(test[u"throughput"][u"NDR"][u"LOWER"]) - test_type = u"NDRPDR" - elif test[u"type"] in (u"SOAK", ): - y_vals[test[u"parent"]].\ - append(test[u"throughput"][u"LOWER"]) - test_type = u"SOAK" - elif test[u"type"] in (u"HOSTSTACK", ): - if u"LDPRELOAD" in test[u"tags"]: - y_vals[test[u"parent"]].append( - float(test[u"result"][u"bits_per_second"]) / 1e3 - ) - elif u"VPPECHO" in test[u"tags"]: + + for item in plot.get(u"include", tuple()): + reg_ex = re.compile(str(item).lower()) + for job in data: + for build in job: + for test_id, test in build.iteritems(): + if not re.match(reg_ex, str(test_id).lower()): + continue + if y_vals.get(test[u"parent"], None) is None: + y_vals[test[u"parent"]] = list() + try: + if test[u"type"] in (u"NDRPDR", u"CPS"): + test_type = test[u"type"] + + if u"-pdr" in plot_title: + ttype = u"PDR" + elif u"-ndr" in plot_title: + ttype = u"NDR" + else: + raise RuntimeError( + u"Wrong title. No information about test " + u"type. Add '-ndr' or '-pdr' to the test " + u"title." + ) + y_vals[test[u"parent"]].append( - (float(test[u"result"][u"client"][u"tx_data"]) - * 8 / 1e3) / - ((float(test[u"result"][u"client"][u"time"]) + - float(test[u"result"][u"server"][u"time"])) / - 2) + test[value][ttype][u"LOWER"] * multiplier ) - test_type = u"HOSTSTACK" - else: - continue - except (KeyError, TypeError): - y_vals[test[u"parent"]].append(None) + + elif test[u"type"] in (u"SOAK",): + y_vals[test[u"parent"]]. \ + append(test[u"throughput"][u"LOWER"]) + test_type = u"SOAK" + + elif test[u"type"] in (u"HOSTSTACK",): + if u"LDPRELOAD" in test[u"tags"]: + y_vals[test[u"parent"]].append( + float( + test[u"result"][u"bits_per_second"] + ) / 1e3 + ) + elif u"VPPECHO" in test[u"tags"]: + y_vals[test[u"parent"]].append( + (float( + test[u"result"][u"client"][u"tx_data"] + ) * 8 / 1e3) / + ((float( + test[u"result"][u"client"][u"time"] + ) + + float( + test[u"result"][u"server"][u"time"]) + ) / 2) + ) + test_type = u"HOSTSTACK" + + else: + continue + + except (KeyError, TypeError): + y_vals[test[u"parent"]].append(None) # Add None to the lists with missing data max_len = 0 @@ -453,6 +673,8 @@ def plot_perf_box_name(plot, input_data): if layout.get(u"title", None): if test_type in (u"HOSTSTACK", ): layout[u"title"] = f"Bandwidth: {layout[u'title']}" + elif test_type in (u"CPS", ): + layout[u"title"] = f"CPS: {layout[u'title']}" else: layout[u"title"] = f"Throughput: {layout[u'title']}" if y_max: @@ -491,43 +713,62 @@ def plot_tsa_name(plot, input_data): f" Creating data set for the {plot.get(u'type', u'')} {plot_title}." ) data = input_data.filter_tests_by_name( - plot, params=[u"throughput", u"parent", u"tags", u"type"]) + plot, + params=[u"throughput", u"gbps", u"parent", u"tags", u"type"] + ) if data is None: logging.error(u"No data.") return - y_vals = OrderedDict() - for job in data: - for build in job: - for test in build: - if y_vals.get(test[u"parent"], None) is None: - y_vals[test[u"parent"]] = { - u"1": list(), - u"2": list(), - u"4": list() - } - try: - if test[u"type"] not in (u"NDRPDR",): - continue + plot_title = plot_title.lower() - if u"-pdr" in plot_title.lower(): - ttype = u"PDR" - elif u"-ndr" in plot_title.lower(): - ttype = u"NDR" - else: - continue + if u"-gbps" in plot_title: + value = u"gbps" + h_unit = u"Gbps" + multiplier = 1e6 + else: + value = u"throughput" + h_unit = u"Mpps" + multiplier = 1.0 - if u"1C" in test[u"tags"]: - y_vals[test[u"parent"]][u"1"]. \ - append(test[u"throughput"][ttype][u"LOWER"]) - elif u"2C" in test[u"tags"]: - y_vals[test[u"parent"]][u"2"]. \ - append(test[u"throughput"][ttype][u"LOWER"]) - elif u"4C" in test[u"tags"]: - y_vals[test[u"parent"]][u"4"]. \ - append(test[u"throughput"][ttype][u"LOWER"]) - except (KeyError, TypeError): - pass + y_vals = OrderedDict() + for item in plot.get(u"include", tuple()): + reg_ex = re.compile(str(item).lower()) + for job in data: + for build in job: + for test_id, test in build.iteritems(): + if re.match(reg_ex, str(test_id).lower()): + if y_vals.get(test[u"parent"], None) is None: + y_vals[test[u"parent"]] = { + u"1": list(), + u"2": list(), + u"4": list() + } + try: + if test[u"type"] not in (u"NDRPDR", u"CPS"): + continue + + if u"-pdr" in plot_title: + ttype = u"PDR" + elif u"-ndr" in plot_title: + ttype = u"NDR" + else: + continue + + if u"1C" in test[u"tags"]: + y_vals[test[u"parent"]][u"1"].append( + test[value][ttype][u"LOWER"] * multiplier + ) + elif u"2C" in test[u"tags"]: + y_vals[test[u"parent"]][u"2"].append( + test[value][ttype][u"LOWER"] * multiplier + ) + elif u"4C" in test[u"tags"]: + y_vals[test[u"parent"]][u"4"].append( + test[value][ttype][u"LOWER"] * multiplier + ) + except (KeyError, TypeError): + pass if not y_vals: logging.warning(f"No data for the plot {plot.get(u'title', u'')}") @@ -547,7 +788,7 @@ def plot_tsa_name(plot, input_data): y_max = list() nic_limit = 0 lnk_limit = 0 - pci_limit = plot[u"limits"][u"pci"][u"pci-g3-x8"] + pci_limit = 0 for test_name, test_vals in y_vals.items(): try: if test_vals[u"1"][1]: @@ -631,108 +872,110 @@ def plot_tsa_name(plot, input_data): if limit > lnk_limit: lnk_limit = limit + if u"cx556a" in test_name: + limit = plot[u"limits"][u"pci"][u"pci-g3-x8"] + else: + limit = plot[u"limits"][u"pci"][u"pci-g3-x16"] + if limit > pci_limit: + pci_limit = limit + traces = list() annotations = list() x_vals = [1, 2, 4] # Limits: - try: - threshold = 1.1 * max(y_max) # 10% - except ValueError as err: - logging.error(err) - return - nic_limit /= 1e6 - traces.append(plgo.Scatter( - x=x_vals, - y=[nic_limit, ] * len(x_vals), - name=f"NIC: {nic_limit:.2f}Mpps", - showlegend=False, - mode=u"lines", - line=dict( - dash=u"dot", - color=COLORS[-1], - width=1), - hoverinfo=u"none" - )) - annotations.append(dict( - x=1, - y=nic_limit, - xref=u"x", - yref=u"y", - xanchor=u"left", - yanchor=u"bottom", - text=f"NIC: {nic_limit:.2f}Mpps", - font=dict( - size=14, - color=COLORS[-1], - ), - align=u"left", - showarrow=False - )) - y_max.append(nic_limit) - - lnk_limit /= 1e6 - if lnk_limit < threshold: - traces.append(plgo.Scatter( - x=x_vals, - y=[lnk_limit, ] * len(x_vals), - name=f"Link: {lnk_limit:.2f}Mpps", - showlegend=False, - mode=u"lines", - line=dict( - dash=u"dot", - color=COLORS[-2], - width=1), - hoverinfo=u"none" - )) - annotations.append(dict( - x=1, - y=lnk_limit, - xref=u"x", - yref=u"y", - xanchor=u"left", - yanchor=u"bottom", - text=f"Link: {lnk_limit:.2f}Mpps", - font=dict( - size=14, - color=COLORS[-2], - ), - align=u"left", - showarrow=False - )) - y_max.append(lnk_limit) - - pci_limit /= 1e6 - if (pci_limit < threshold and - (pci_limit < lnk_limit * 0.95 or lnk_limit > lnk_limit * 1.05)): - traces.append(plgo.Scatter( - x=x_vals, - y=[pci_limit, ] * len(x_vals), - name=f"PCIe: {pci_limit:.2f}Mpps", - showlegend=False, - mode=u"lines", - line=dict( - dash=u"dot", - color=COLORS[-3], - width=1), - hoverinfo=u"none" - )) - annotations.append(dict( - x=1, - y=pci_limit, - xref=u"x", - yref=u"y", - xanchor=u"left", - yanchor=u"bottom", - text=f"PCIe: {pci_limit:.2f}Mpps", - font=dict( - size=14, - color=COLORS[-3], - ), - align=u"left", - showarrow=False - )) - y_max.append(pci_limit) + if u"-gbps" not in plot_title and u"-cps-" not in plot_title: + nic_limit /= 1e6 + lnk_limit /= 1e6 + pci_limit /= 1e6 + min_limit = min((nic_limit, lnk_limit, pci_limit)) + if nic_limit == min_limit: + traces.append(plgo.Scatter( + x=x_vals, + y=[nic_limit, ] * len(x_vals), + name=f"NIC: {nic_limit:.2f}Mpps", + showlegend=False, + mode=u"lines", + line=dict( + dash=u"dot", + color=COLORS[-1], + width=1), + hoverinfo=u"none" + )) + annotations.append(dict( + x=1, + y=nic_limit, + xref=u"x", + yref=u"y", + xanchor=u"left", + yanchor=u"bottom", + text=f"NIC: {nic_limit:.2f}Mpps", + font=dict( + size=14, + color=COLORS[-1], + ), + align=u"left", + showarrow=False + )) + y_max.append(nic_limit) + elif lnk_limit == min_limit: + traces.append(plgo.Scatter( + x=x_vals, + y=[lnk_limit, ] * len(x_vals), + name=f"Link: {lnk_limit:.2f}Mpps", + showlegend=False, + mode=u"lines", + line=dict( + dash=u"dot", + color=COLORS[-1], + width=1), + hoverinfo=u"none" + )) + annotations.append(dict( + x=1, + y=lnk_limit, + xref=u"x", + yref=u"y", + xanchor=u"left", + yanchor=u"bottom", + text=f"Link: {lnk_limit:.2f}Mpps", + font=dict( + size=14, + color=COLORS[-1], + ), + align=u"left", + showarrow=False + )) + y_max.append(lnk_limit) + elif pci_limit == min_limit: + traces.append(plgo.Scatter( + x=x_vals, + y=[pci_limit, ] * len(x_vals), + name=f"PCIe: {pci_limit:.2f}Mpps", + showlegend=False, + mode=u"lines", + line=dict( + dash=u"dot", + color=COLORS[-1], + width=1), + hoverinfo=u"none" + )) + annotations.append(dict( + x=1, + y=pci_limit, + xref=u"x", + yref=u"y", + xanchor=u"left", + yanchor=u"bottom", + text=f"PCIe: {pci_limit:.2f}Mpps", + font=dict( + size=14, + color=COLORS[-1], + ), + align=u"left", + showarrow=False + )) + y_max.append(pci_limit) # Perfect and measured: cidx = 0 @@ -744,7 +987,7 @@ def plot_tsa_name(plot, input_data): if isinstance(val[u"val"][idx], float): htext += ( f"No. of Runs: {val[u'count'][idx]}
" - f"Mean: {val[u'val'][idx]:.2f}Mpps
" + f"Mean: {val[u'val'][idx]:.2f}{h_unit}
" ) if isinstance(val[u"diff"][idx], float): htext += f"Diff: {round(val[u'diff'][idx]):.0f}%
"