X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_plots.py;h=05f525c96af4318d8b486f4a73efebd9d0787b2e;hp=2b1c48f02e73abe55e17d8f035f39460bc5e7bc8;hb=ef5c30213bb28824a55f4ebbcade6410ee8d2461;hpb=174ad309b359e9b323b97cae0a6877dce33deb5f diff --git a/resources/tools/presentation/generator_plots.py b/resources/tools/presentation/generator_plots.py index 2b1c48f02e..05f525c96a 100644 --- a/resources/tools/presentation/generator_plots.py +++ b/resources/tools/presentation/generator_plots.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019 Cisco and/or its affiliates. +# Copyright (c) 2020 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: @@ -18,29 +18,48 @@ import re import logging -from collections import OrderedDict -from copy import deepcopy - import hdrh.histogram import hdrh.codec import pandas as pd import plotly.offline as ploff import plotly.graph_objs as plgo -from plotly.subplots import make_subplots +from collections import OrderedDict +from copy import deepcopy +from math import log + from plotly.exceptions import PlotlyError from pal_utils import mean, stdev -COLORS = [u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink", - u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black", - u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson", - u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod", - u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon", - u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey"] - -REGEX_NIC = re.compile(r'\d*ge\dp\d\D*\d*-') +COLORS = ( + u"#1A1110", + u"#DA2647", + u"#214FC6", + u"#01786F", + u"#BD8260", + u"#FFD12A", + u"#A6E7FF", + u"#738276", + u"#C95A49", + u"#FC5A8D", + u"#CEC8EF", + u"#391285", + u"#6F2DA8", + u"#FF878D", + u"#45A27D", + u"#FFD0B9", + u"#FD5240", + u"#DB91EF", + u"#44D7A8", + u"#4F86F7", + u"#84DE02", + u"#FFCFF1", + u"#614051" +) + +REGEX_NIC = re.compile(r'(\d*ge\dp\d\D*\d*[a-z]*)-') def generate_plots(spec, data): @@ -55,11 +74,11 @@ def generate_plots(spec, data): generator = { u"plot_nf_reconf_box_name": plot_nf_reconf_box_name, u"plot_perf_box_name": plot_perf_box_name, - u"plot_lat_err_bars_name": plot_lat_err_bars_name, u"plot_tsa_name": plot_tsa_name, u"plot_http_server_perf_box": plot_http_server_perf_box, u"plot_nf_heatmap": plot_nf_heatmap, - u"plot_lat_hdrh_bar_name": plot_lat_hdrh_bar_name + u"plot_hdrh_lat_by_percentile": plot_hdrh_lat_by_percentile, + u"plot_hdrh_lat_by_percentile_x_log": plot_hdrh_lat_by_percentile_x_log } logging.info(u"Generating the plots ...") @@ -77,8 +96,8 @@ def generate_plots(spec, data): logging.info(u"Done.") -def plot_lat_hdrh_bar_name(plot, input_data): - """Generate the plot(s) with algorithm: plot_lat_hdrh_bar_name +def plot_hdrh_lat_by_percentile(plot, input_data): + """Generate the plot(s) with algorithm: plot_hdrh_lat_by_percentile specified in the specification file. :param plot: Plot to generate. @@ -88,169 +107,305 @@ def plot_lat_hdrh_bar_name(plot, input_data): """ # Transform the data - plot_title = plot.get(u"title", u"") logging.info( f" Creating the data set for the {plot.get(u'type', u'')} " - f"{plot_title}." + f"{plot.get(u'title', u'')}." ) - data = input_data.filter_tests_by_name( - plot, params=[u"latency", u"parent", u"tags", u"type"]) - if data is None or len(data[0][0]) == 0: + if plot.get(u"include", None): + data = input_data.filter_tests_by_name( + plot, + params=[u"name", u"latency", u"parent", u"tags", u"type"] + )[0][0] + elif plot.get(u"filter", None): + data = input_data.filter_data( + plot, + params=[u"name", u"latency", u"parent", u"tags", u"type"], + continue_on_error=True + )[0][0] + else: + job = list(plot[u"data"].keys())[0] + build = str(plot[u"data"][job][0]) + data = input_data.tests(job, build) + + if data is None or len(data) == 0: logging.error(u"No data.") return - # Prepare the data for the plot - directions = [u"W-E", u"E-W"] - tests = list() - traces = list() - for idx_row, test in enumerate(data[0][0]): + desc = { + u"LAT0": u"No-load.", + u"PDR10": u"Low-load, 10% PDR.", + u"PDR50": u"Mid-load, 50% PDR.", + u"PDR90": u"High-load, 90% PDR.", + u"PDR": u"Full-load, 100% PDR.", + u"NDR10": u"Low-load, 10% NDR.", + u"NDR50": u"Mid-load, 50% NDR.", + u"NDR90": u"High-load, 90% NDR.", + u"NDR": u"Full-load, 100% NDR." + } + + graphs = [ + u"LAT0", + u"PDR10", + u"PDR50", + u"PDR90" + ] + + file_links = plot.get(u"output-file-links", None) + target_links = plot.get(u"target-links", None) + + for test in data: try: - if test[u"type"] in (u"NDRPDR",): - if u"-pdr" in plot_title.lower(): - ttype = u"PDR" - elif u"-ndr" in plot_title.lower(): - ttype = u"NDR" - else: - logging.warning(f"Invalid test type: {test[u'type']}") - continue - name = re.sub(REGEX_NIC, u"", test[u"parent"]. - replace(u'-ndrpdr', u''). - replace(u'2n1l-', u'')) - histograms = list() - for idx_col, direction in enumerate( - (u"direction1", u"direction2", )): + if test[u"type"] not in (u"NDRPDR",): + logging.warning(f"Invalid test type: {test[u'type']}") + continue + name = re.sub(REGEX_NIC, u"", test[u"parent"]. + replace(u'-ndrpdr', u'').replace(u'2n1l-', u'')) + try: + nic = re.search(REGEX_NIC, test[u"parent"]).group(1) + except (IndexError, AttributeError, KeyError, ValueError): + nic = u"" + name_link = f"{nic}-{test[u'name']}".replace(u'-ndrpdr', u'') + + logging.info(f" Generating the graph: {name_link}") + + fig = plgo.Figure() + layout = deepcopy(plot[u"layout"]) + + for color, graph in enumerate(graphs): + for idx, direction in enumerate((u"direction1", u"direction2")): + xaxis = list() + yaxis = list() + hovertext = list() try: - hdr_lat = test[u"latency"][ttype][direction][u"hdrh"] - # TODO: Workaround, HDRH data must be aligned to 4 - # bytes, remove when not needed. - hdr_lat += u"=" * (len(hdr_lat) % 4) - xaxis = list() - yaxis = list() - hovertext = list() - decoded = hdrh.histogram.HdrHistogram.decode(hdr_lat) - total_count = decoded.get_total_count() - for item in decoded.get_recorded_iterator(): - xaxis.append(item.value_iterated_to) - prob = float(item.count_added_in_this_iter_step) / \ - total_count * 100 - yaxis.append(prob) - hovertext.append( - f"Test: {name}
" - f"Direction: {directions[idx_col]}
" - f"Latency: {item.value_iterated_to}uSec
" - f"Probability: {prob:.2f}%
" - f"Percentile: " - f"{item.percentile_level_iterated_to:.2f}" - ) - marker_color = [COLORS[idx_row], ] * len(yaxis) - marker_color[xaxis.index( - decoded.get_value_at_percentile(50.0))] = u"red" - marker_color[xaxis.index( - decoded.get_value_at_percentile(90.0))] = u"red" - marker_color[xaxis.index( - decoded.get_value_at_percentile(95.0))] = u"red" - histograms.append( - plgo.Bar( - x=xaxis, - y=yaxis, - showlegend=False, - name=name, - marker={u"color": marker_color}, - hovertext=hovertext, - hoverinfo=u"text" - ) + decoded = hdrh.histogram.HdrHistogram.decode( + test[u"latency"][graph][direction][u"hdrh"] ) - except hdrh.codec.HdrLengthException as err: + except hdrh.codec.HdrLengthException: logging.warning( - f"No or invalid data for HDRHistogram for the test " - f"{name}\n{err}" + f"No data for direction {(u'W-E', u'E-W')[idx % 2]}" ) continue - if len(histograms) == 2: - traces.append(histograms) - tests.append(name) - else: - logging.warning(f"Invalid test type: {test[u'type']}") - continue + + for item in decoded.get_recorded_iterator(): + percentile = item.percentile_level_iterated_to + if percentile > 99.9999999: + continue + xaxis.append(percentile) + yaxis.append(item.value_iterated_to) + hovertext.append( + f"{desc[graph]}
" + f"Direction: {(u'W-E', u'E-W')[idx % 2]}
" + f"Percentile: {percentile:.5f}%
" + f"Latency: {item.value_iterated_to}uSec" + ) + fig.add_trace( + plgo.Scatter( + x=xaxis, + y=yaxis, + name=desc[graph], + mode=u"lines", + legendgroup=desc[graph], + showlegend=bool(idx), + line=dict( + color=COLORS[color], + dash=u"dash" if idx % 2 else u"solid" + ), + hovertext=hovertext, + hoverinfo=u"text" + ) + ) + + layout[u"title"][u"text"] = f"Latency: {name}" + fig.update_layout(layout) + + # Create plot + file_name = f"{plot[u'output-file']}-{name_link}.html" + logging.info(f" Writing file {file_name}") + + try: + # Export Plot + ploff.plot(fig, show_link=False, auto_open=False, + filename=file_name) + # Add link to the file: + if file_links and target_links: + with open(file_links, u"a") as file_handler: + file_handler.write( + f"- `{name_link} " + f"<{target_links}/{file_name.split(u'/')[-1]}>`_\n" + ) + except FileNotFoundError as err: + logging.error( + f"Not possible to write the link to the file " + f"{file_links}\n{err}" + ) + except PlotlyError as err: + logging.error(f" Finished with error: {repr(err)}") + + except hdrh.codec.HdrLengthException as err: + logging.warning(repr(err)) + continue + except (ValueError, KeyError) as err: logging.warning(repr(err)) + continue - if not tests: - logging.warning(f"No data for {plot_title}.") - return - fig = make_subplots( - rows=len(tests), - cols=2, - specs=[ - [{u"type": u"bar"}, {u"type": u"bar"}] for _ in range(len(tests)) - ] - ) +def plot_hdrh_lat_by_percentile_x_log(plot, input_data): + """Generate the plot(s) with algorithm: plot_hdrh_lat_by_percentile_x_log + specified in the specification file. + + :param plot: Plot to generate. + :param input_data: Data to process. + :type plot: pandas.Series + :type input_data: InputData + """ - layout_axes = dict( - gridcolor=u"rgb(220, 220, 220)", - linecolor=u"rgb(220, 220, 220)", - linewidth=1, - showgrid=True, - showline=True, - showticklabels=True, - tickcolor=u"rgb(220, 220, 220)", + # Transform the data + logging.info( + f" Creating the data set for the {plot.get(u'type', u'')} " + f"{plot.get(u'title', u'')}." ) + if plot.get(u"include", None): + data = input_data.filter_tests_by_name( + plot, + params=[u"name", u"latency", u"parent", u"tags", u"type"] + )[0][0] + elif plot.get(u"filter", None): + data = input_data.filter_data( + plot, + params=[u"name", u"latency", u"parent", u"tags", u"type"], + continue_on_error=True + )[0][0] + else: + job = list(plot[u"data"].keys())[0] + build = str(plot[u"data"][job][0]) + data = input_data.tests(job, build) + + if data is None or len(data) == 0: + logging.error(u"No data.") + return - for idx_row, test in enumerate(tests): - for idx_col in range(2): - fig.add_trace( - traces[idx_row][idx_col], - row=idx_row + 1, - col=idx_col + 1 - ) - fig.update_xaxes( - row=idx_row + 1, - col=idx_col + 1, - **layout_axes - ) - fig.update_yaxes( - row=idx_row + 1, - col=idx_col + 1, - **layout_axes - ) + desc = { + u"LAT0": u"No-load.", + u"PDR10": u"Low-load, 10% PDR.", + u"PDR50": u"Mid-load, 50% PDR.", + u"PDR90": u"High-load, 90% PDR.", + u"PDR": u"Full-load, 100% PDR.", + u"NDR10": u"Low-load, 10% NDR.", + u"NDR50": u"Mid-load, 50% NDR.", + u"NDR90": u"High-load, 90% NDR.", + u"NDR": u"Full-load, 100% NDR." + } - layout = deepcopy(plot[u"layout"]) - - layout[u"title"][u"text"] = \ - f"Latency: {plot.get(u'graph-title', u'')}" - layout[u"height"] = 250 * len(tests) + 130 - - layout[u"annotations"][2][u"y"] = 1.06 - 0.008 * len(tests) - layout[u"annotations"][3][u"y"] = 1.06 - 0.008 * len(tests) - - for idx, test in enumerate(tests): - layout[u"annotations"].append({ - u"font": { - u"size": 14 - }, - u"showarrow": False, - u"text": f"{test}", - u"textangle": 0, - u"x": 0.5, - u"xanchor": u"center", - u"xref": u"paper", - u"y": 1.0 - float(idx) * 1.06 / len(tests), - u"yanchor": u"bottom", - u"yref": u"paper" - }) - - fig[u"layout"].update(layout) - - # Create plot - file_type = plot.get(u"output-file-type", u".html") - logging.info(f" Writing file {plot[u'output-file']}{file_type}.") - try: - # Export Plot - ploff.plot(fig, show_link=False, auto_open=False, - filename=f"{plot[u'output-file']}{file_type}") - except PlotlyError as err: - logging.error(f" Finished with error: {repr(err)}") + graphs = [ + u"LAT0", + u"PDR10", + u"PDR50", + u"PDR90" + ] + + file_links = plot.get(u"output-file-links", None) + target_links = plot.get(u"target-links", None) + + for test in data: + try: + if test[u"type"] not in (u"NDRPDR",): + logging.warning(f"Invalid test type: {test[u'type']}") + continue + name = re.sub(REGEX_NIC, u"", test[u"parent"]. + replace(u'-ndrpdr', u'').replace(u'2n1l-', u'')) + try: + nic = re.search(REGEX_NIC, test[u"parent"]).group(1) + except (IndexError, AttributeError, KeyError, ValueError): + nic = u"" + name_link = f"{nic}-{test[u'name']}".replace(u'-ndrpdr', u'') + + logging.info(f" Generating the graph: {name_link}") + + fig = plgo.Figure() + layout = deepcopy(plot[u"layout"]) + xaxis_max = 0 + + for color, graph in enumerate(graphs): + for idx, direction in enumerate((u"direction1", u"direction2")): + xaxis = list() + yaxis = list() + hovertext = list() + try: + decoded = hdrh.histogram.HdrHistogram.decode( + test[u"latency"][graph][direction][u"hdrh"] + ) + except hdrh.codec.HdrLengthException: + logging.warning( + f"No data for direction {(u'W-E', u'E-W')[idx % 2]}" + ) + continue + + for item in decoded.get_recorded_iterator(): + percentile = item.percentile_level_iterated_to + if percentile > 99.9999999: + continue + xaxis.append(100.0 / (100.0 - percentile)) + yaxis.append(item.value_iterated_to) + hovertext.append( + f"{desc[graph]}
" + f"Direction: {(u'W-E', u'E-W')[idx % 2]}
" + f"Percentile: {percentile:.5f}%
" + f"Latency: {item.value_iterated_to}uSec" + ) + fig.add_trace( + plgo.Scatter( + x=xaxis, + y=yaxis, + name=desc[graph], + mode=u"lines", + legendgroup=desc[graph], + showlegend=not(bool(idx)), + line=dict( + color=COLORS[color], + dash=u"dash" if idx % 2 else u"solid" + ), + hovertext=hovertext, + hoverinfo=u"text" + ) + ) + xaxis_max = max(xaxis) if xaxis_max < max( + xaxis) else xaxis_max + + layout[u"title"][u"text"] = f"Latency: {name}" + layout[u"xaxis"][u"range"] = [0, int(log(xaxis_max, 10)) + 1] + fig.update_layout(layout) + + # Create plot + file_name = f"{plot[u'output-file']}-{name_link}.html" + logging.info(f" Writing file {file_name}") + + try: + # Export Plot + ploff.plot(fig, show_link=False, auto_open=False, + filename=file_name) + # Add link to the file: + if file_links and target_links: + with open(file_links, u"a") as file_handler: + file_handler.write( + f"- `{name_link} " + f"<{target_links}/{file_name.split(u'/')[-1]}>`_\n" + ) + except FileNotFoundError as err: + logging.error( + f"Not possible to write the link to the file " + f"{file_links}\n{err}" + ) + except PlotlyError as err: + logging.error(f" Finished with error: {repr(err)}") + + except hdrh.codec.HdrLengthException as err: + logging.warning(repr(err)) + continue + + except (ValueError, KeyError) as err: + logging.warning(repr(err)) + continue def plot_nf_reconf_box_name(plot, input_data): @@ -326,7 +481,7 @@ def plot_nf_reconf_box_name(plot, input_data): # Create plot layout = deepcopy(plot[u"layout"]) layout[u"title"] = f"Time Lost: {layout[u'title']}" - layout[u"yaxis"][u"title"] = u"Implied Time Lost [s]" + layout[u"yaxis"][u"title"] = u"Effective Blocked Time [s]" layout[u"legend"][u"font"][u"size"] = 14 layout[u"yaxis"].pop(u"range") plpl = plgo.Figure(data=traces, layout=layout) @@ -363,34 +518,83 @@ def plot_perf_box_name(plot, input_data): f"{plot.get(u'title', u'')}." ) data = input_data.filter_tests_by_name( - plot, params=[u"throughput", u"parent", u"tags", u"type"]) + plot, + params=[u"throughput", u"gbps", u"result", u"parent", u"tags", u"type"]) if data is None: logging.error(u"No data.") return # Prepare the data for the plot + plot_title = plot.get(u"title", u"").lower() + + if u"-gbps" in plot_title: + value = u"gbps" + multiplier = 1e6 + else: + value = u"throughput" + multiplier = 1.0 y_vals = OrderedDict() - for job in data: - for build in job: - for test in build: - if y_vals.get(test[u"parent"], None) is None: - y_vals[test[u"parent"]] = list() - try: - if (test[u"type"] in (u"NDRPDR", ) and - u"-pdr" in plot.get(u"title", u"").lower()): - y_vals[test[u"parent"]].\ - append(test[u"throughput"][u"PDR"][u"LOWER"]) - elif (test[u"type"] in (u"NDRPDR", ) and - u"-ndr" in plot.get(u"title", u"").lower()): - y_vals[test[u"parent"]]. \ - append(test[u"throughput"][u"NDR"][u"LOWER"]) - elif test[u"type"] in (u"SOAK", ): - y_vals[test[u"parent"]].\ - append(test[u"throughput"][u"LOWER"]) - else: + test_type = u"" + + for item in plot.get(u"include", tuple()): + reg_ex = re.compile(str(item).lower()) + for job in data: + for build in job: + for test_id, test in build.iteritems(): + if not re.match(reg_ex, str(test_id).lower()): continue - except (KeyError, TypeError): - y_vals[test[u"parent"]].append(None) + if y_vals.get(test[u"parent"], None) is None: + y_vals[test[u"parent"]] = list() + try: + if test[u"type"] in (u"NDRPDR", u"CPS"): + test_type = test[u"type"] + + if u"-pdr" in plot_title: + ttype = u"PDR" + elif u"-ndr" in plot_title: + ttype = u"NDR" + else: + raise RuntimeError( + u"Wrong title. No information about test " + u"type. Add '-ndr' or '-pdr' to the test " + u"title." + ) + + y_vals[test[u"parent"]].append( + test[value][ttype][u"LOWER"] * multiplier + ) + + elif test[u"type"] in (u"SOAK",): + y_vals[test[u"parent"]]. \ + append(test[u"throughput"][u"LOWER"]) + test_type = u"SOAK" + + elif test[u"type"] in (u"HOSTSTACK",): + if u"LDPRELOAD" in test[u"tags"]: + y_vals[test[u"parent"]].append( + float( + test[u"result"][u"bits_per_second"] + ) / 1e3 + ) + elif u"VPPECHO" in test[u"tags"]: + y_vals[test[u"parent"]].append( + (float( + test[u"result"][u"client"][u"tx_data"] + ) * 8 / 1e3) / + ((float( + test[u"result"][u"client"][u"time"] + ) + + float( + test[u"result"][u"server"][u"time"]) + ) / 2) + ) + test_type = u"HOSTSTACK" + + else: + continue + + except (KeyError, TypeError): + y_vals[test[u"parent"]].append(None) # Add None to the lists with missing data max_len = 0 @@ -412,23 +616,26 @@ def plot_perf_box_name(plot, input_data): tst_name = re.sub(REGEX_NIC, u"", col.lower().replace(u'-ndrpdr', u''). replace(u'2n1l-', u'')) - traces.append( - plgo.Box( - x=[str(i + 1) + u'.'] * len(df_y[col]), - y=[y / 1000000 if y else None for y in df_y[col]], - name=( - f"{i + 1}. " - f"({nr_of_samples[i]:02d} " - f"run{u's' if nr_of_samples[i] > 1 else u''}) " - f"{tst_name}" - ), - hoverinfo=u"y+name" - ) + kwargs = dict( + x=[str(i + 1) + u'.'] * len(df_y[col]), + y=[y / 1e6 if y else None for y in df_y[col]], + name=( + f"{i + 1}. " + f"({nr_of_samples[i]:02d} " + f"run{u's' if nr_of_samples[i] > 1 else u''}) " + f"{tst_name}" + ), + hoverinfo=u"y+name" ) + if test_type in (u"SOAK", ): + kwargs[u"boxpoints"] = u"all" + + traces.append(plgo.Box(**kwargs)) + try: val_max = max(df_y[col]) if val_max: - y_max.append(int(val_max / 1000000) + 2) + y_max.append(int(val_max / 1e6) + 2) except (ValueError, TypeError) as err: logging.error(repr(err)) continue @@ -437,7 +644,12 @@ def plot_perf_box_name(plot, input_data): # Create plot layout = deepcopy(plot[u"layout"]) if layout.get(u"title", None): - layout[u"title"] = f"Throughput: {layout[u'title']}" + if test_type in (u"HOSTSTACK", ): + layout[u"title"] = f"Bandwidth: {layout[u'title']}" + elif test_type in (u"CPS", ): + layout[u"title"] = f"CPS: {layout[u'title']}" + else: + layout[u"title"] = f"Throughput: {layout[u'title']}" if y_max: layout[u"yaxis"][u"range"] = [0, max(y_max)] plpl = plgo.Figure(data=traces, layout=layout) @@ -457,179 +669,6 @@ def plot_perf_box_name(plot, input_data): return -def plot_lat_err_bars_name(plot, input_data): - """Generate the plot(s) with algorithm: plot_lat_err_bars_name - specified in the specification file. - - :param plot: Plot to generate. - :param input_data: Data to process. - :type plot: pandas.Series - :type input_data: InputData - """ - - # Transform the data - plot_title = plot.get(u"title", u"") - logging.info( - f" Creating data set for the {plot.get(u'type', u'')} {plot_title}." - ) - data = input_data.filter_tests_by_name( - plot, params=[u"latency", u"parent", u"tags", u"type"]) - if data is None: - logging.error(u"No data.") - return - - # Prepare the data for the plot - y_tmp_vals = OrderedDict() - for job in data: - for build in job: - for test in build: - try: - logging.debug(f"test[u'latency']: {test[u'latency']}\n") - except ValueError as err: - logging.warning(repr(err)) - if y_tmp_vals.get(test[u"parent"], None) is None: - y_tmp_vals[test[u"parent"]] = [ - list(), # direction1, min - list(), # direction1, avg - list(), # direction1, max - list(), # direction2, min - list(), # direction2, avg - list() # direction2, max - ] - try: - if test[u"type"] not in (u"NDRPDR", ): - logging.warning(f"Invalid test type: {test[u'type']}") - continue - if u"-pdr" in plot_title.lower(): - ttype = u"PDR" - elif u"-ndr" in plot_title.lower(): - ttype = u"NDR" - else: - logging.warning( - f"Invalid test type: {test[u'type']}" - ) - continue - y_tmp_vals[test[u"parent"]][0].append( - test[u"latency"][ttype][u"direction1"][u"min"]) - y_tmp_vals[test[u"parent"]][1].append( - test[u"latency"][ttype][u"direction1"][u"avg"]) - y_tmp_vals[test[u"parent"]][2].append( - test[u"latency"][ttype][u"direction1"][u"max"]) - y_tmp_vals[test[u"parent"]][3].append( - test[u"latency"][ttype][u"direction2"][u"min"]) - y_tmp_vals[test[u"parent"]][4].append( - test[u"latency"][ttype][u"direction2"][u"avg"]) - y_tmp_vals[test[u"parent"]][5].append( - test[u"latency"][ttype][u"direction2"][u"max"]) - except (KeyError, TypeError) as err: - logging.warning(repr(err)) - - x_vals = list() - y_vals = list() - y_mins = list() - y_maxs = list() - nr_of_samples = list() - for key, val in y_tmp_vals.items(): - name = re.sub(REGEX_NIC, u"", key.replace(u'-ndrpdr', u''). - replace(u'2n1l-', u'')) - x_vals.append(name) # dir 1 - y_vals.append(mean(val[1]) if val[1] else None) - y_mins.append(mean(val[0]) if val[0] else None) - y_maxs.append(mean(val[2]) if val[2] else None) - nr_of_samples.append(len(val[1]) if val[1] else 0) - x_vals.append(name) # dir 2 - y_vals.append(mean(val[4]) if val[4] else None) - y_mins.append(mean(val[3]) if val[3] else None) - y_maxs.append(mean(val[5]) if val[5] else None) - nr_of_samples.append(len(val[3]) if val[3] else 0) - - traces = list() - annotations = list() - - for idx, _ in enumerate(x_vals): - if not bool(int(idx % 2)): - direction = u"West-East" - else: - direction = u"East-West" - hovertext = ( - f"No. of Runs: {nr_of_samples[idx]}
" - f"Test: {x_vals[idx]}
" - f"Direction: {direction}
" - ) - if isinstance(y_maxs[idx], float): - hovertext += f"Max: {y_maxs[idx]:.2f}uSec
" - if isinstance(y_vals[idx], float): - hovertext += f"Mean: {y_vals[idx]:.2f}uSec
" - if isinstance(y_mins[idx], float): - hovertext += f"Min: {y_mins[idx]:.2f}uSec" - - if isinstance(y_maxs[idx], float) and isinstance(y_vals[idx], float): - array = [y_maxs[idx] - y_vals[idx], ] - else: - array = [None, ] - if isinstance(y_mins[idx], float) and isinstance(y_vals[idx], float): - arrayminus = [y_vals[idx] - y_mins[idx], ] - else: - arrayminus = [None, ] - traces.append(plgo.Scatter( - x=[idx, ], - y=[y_vals[idx], ], - name=x_vals[idx], - legendgroup=x_vals[idx], - showlegend=bool(int(idx % 2)), - mode=u"markers", - error_y=dict( - type=u"data", - symmetric=False, - array=array, - arrayminus=arrayminus, - color=COLORS[int(idx / 2)] - ), - marker=dict( - size=10, - color=COLORS[int(idx / 2)], - ), - text=hovertext, - hoverinfo=u"text", - )) - annotations.append(dict( - x=idx, - y=0, - xref=u"x", - yref=u"y", - xanchor=u"center", - yanchor=u"top", - text=u"E-W" if bool(int(idx % 2)) else u"W-E", - font=dict( - size=16, - ), - align=u"center", - showarrow=False - )) - - try: - # Create plot - file_type = plot.get(u"output-file-type", u".html") - logging.info(f" Writing file {plot[u'output-file']}{file_type}.") - layout = deepcopy(plot[u"layout"]) - if layout.get(u"title", None): - layout[u"title"] = f"Latency: {layout[u'title']}" - layout[u"annotations"] = annotations - plpl = plgo.Figure(data=traces, layout=layout) - - # Export Plot - ploff.plot( - plpl, - show_link=False, auto_open=False, - filename=f"{plot[u'output-file']}{file_type}" - ) - except PlotlyError as err: - logging.error( - f" Finished with error: {repr(err)}".replace(u"\n", u" ") - ) - return - - def plot_tsa_name(plot, input_data): """Generate the plot(s) with algorithm: plot_tsa_name @@ -647,43 +686,62 @@ def plot_tsa_name(plot, input_data): f" Creating data set for the {plot.get(u'type', u'')} {plot_title}." ) data = input_data.filter_tests_by_name( - plot, params=[u"throughput", u"parent", u"tags", u"type"]) + plot, + params=[u"throughput", u"gbps", u"parent", u"tags", u"type"] + ) if data is None: logging.error(u"No data.") return - y_vals = OrderedDict() - for job in data: - for build in job: - for test in build: - if y_vals.get(test[u"parent"], None) is None: - y_vals[test[u"parent"]] = { - u"1": list(), - u"2": list(), - u"4": list() - } - try: - if test[u"type"] not in (u"NDRPDR",): - continue + plot_title = plot_title.lower() - if u"-pdr" in plot_title.lower(): - ttype = u"PDR" - elif u"-ndr" in plot_title.lower(): - ttype = u"NDR" - else: - continue + if u"-gbps" in plot_title: + value = u"gbps" + h_unit = u"Gbps" + multiplier = 1e6 + else: + value = u"throughput" + h_unit = u"Mpps" + multiplier = 1.0 - if u"1C" in test[u"tags"]: - y_vals[test[u"parent"]][u"1"]. \ - append(test[u"throughput"][ttype][u"LOWER"]) - elif u"2C" in test[u"tags"]: - y_vals[test[u"parent"]][u"2"]. \ - append(test[u"throughput"][ttype][u"LOWER"]) - elif u"4C" in test[u"tags"]: - y_vals[test[u"parent"]][u"4"]. \ - append(test[u"throughput"][ttype][u"LOWER"]) - except (KeyError, TypeError): - pass + y_vals = OrderedDict() + for item in plot.get(u"include", tuple()): + reg_ex = re.compile(str(item).lower()) + for job in data: + for build in job: + for test_id, test in build.iteritems(): + if re.match(reg_ex, str(test_id).lower()): + if y_vals.get(test[u"parent"], None) is None: + y_vals[test[u"parent"]] = { + u"1": list(), + u"2": list(), + u"4": list() + } + try: + if test[u"type"] not in (u"NDRPDR", u"CPS"): + continue + + if u"-pdr" in plot_title: + ttype = u"PDR" + elif u"-ndr" in plot_title: + ttype = u"NDR" + else: + continue + + if u"1C" in test[u"tags"]: + y_vals[test[u"parent"]][u"1"].append( + test[value][ttype][u"LOWER"] * multiplier + ) + elif u"2C" in test[u"tags"]: + y_vals[test[u"parent"]][u"2"].append( + test[value][ttype][u"LOWER"] * multiplier + ) + elif u"4C" in test[u"tags"]: + y_vals[test[u"parent"]][u"4"].append( + test[value][ttype][u"LOWER"] * multiplier + ) + except (KeyError, TypeError): + pass if not y_vals: logging.warning(f"No data for the plot {plot.get(u'title', u'')}") @@ -695,7 +753,7 @@ def plot_tsa_name(plot, input_data): if test_val: avg_val = sum(test_val) / len(test_val) y_vals[test_name][key] = [avg_val, len(test_val)] - ideal = avg_val / (int(key) * 1000000.0) + ideal = avg_val / (int(key) * 1e6) if test_name not in y_1c_max or ideal > y_1c_max[test_name]: y_1c_max[test_name] = ideal @@ -703,7 +761,7 @@ def plot_tsa_name(plot, input_data): y_max = list() nic_limit = 0 lnk_limit = 0 - pci_limit = plot[u"limits"][u"pci"][u"pci-g3-x8"] + pci_limit = 0 for test_name, test_vals in y_vals.items(): try: if test_vals[u"1"][1]: @@ -713,10 +771,10 @@ def plot_tsa_name(plot, input_data): test_name.replace(u'-ndrpdr', u'').replace(u'2n1l-', u'') ) vals[name] = OrderedDict() - y_val_1 = test_vals[u"1"][0] / 1000000.0 - y_val_2 = test_vals[u"2"][0] / 1000000.0 if test_vals[u"2"][0] \ + y_val_1 = test_vals[u"1"][0] / 1e6 + y_val_2 = test_vals[u"2"][0] / 1e6 if test_vals[u"2"][0] \ else None - y_val_4 = test_vals[u"4"][0] / 1000000.0 if test_vals[u"4"][0] \ + y_val_4 = test_vals[u"4"][0] / 1e6 if test_vals[u"4"][0] \ else None vals[name][u"val"] = [y_val_1, y_val_2, y_val_4] @@ -766,6 +824,8 @@ def plot_tsa_name(plot, input_data): limit = plot[u"limits"][u"nic"][u"xl710"] elif u"x553" in test_name: limit = plot[u"limits"][u"nic"][u"x553"] + elif u"cx556a" in test_name: + limit = plot[u"limits"][u"nic"][u"cx556a"] else: limit = 0 if limit > nic_limit: @@ -785,108 +845,110 @@ def plot_tsa_name(plot, input_data): if limit > lnk_limit: lnk_limit = limit + if u"cx556a" in test_name: + limit = plot[u"limits"][u"pci"][u"pci-g3-x8"] + else: + limit = plot[u"limits"][u"pci"][u"pci-g3-x16"] + if limit > pci_limit: + pci_limit = limit + traces = list() annotations = list() x_vals = [1, 2, 4] # Limits: - try: - threshold = 1.1 * max(y_max) # 10% - except ValueError as err: - logging.error(err) - return - nic_limit /= 1000000.0 - traces.append(plgo.Scatter( - x=x_vals, - y=[nic_limit, ] * len(x_vals), - name=f"NIC: {nic_limit:.2f}Mpps", - showlegend=False, - mode=u"lines", - line=dict( - dash=u"dot", - color=COLORS[-1], - width=1), - hoverinfo=u"none" - )) - annotations.append(dict( - x=1, - y=nic_limit, - xref=u"x", - yref=u"y", - xanchor=u"left", - yanchor=u"bottom", - text=f"NIC: {nic_limit:.2f}Mpps", - font=dict( - size=14, - color=COLORS[-1], - ), - align=u"left", - showarrow=False - )) - y_max.append(nic_limit) - - lnk_limit /= 1000000.0 - if lnk_limit < threshold: - traces.append(plgo.Scatter( - x=x_vals, - y=[lnk_limit, ] * len(x_vals), - name=f"Link: {lnk_limit:.2f}Mpps", - showlegend=False, - mode=u"lines", - line=dict( - dash=u"dot", - color=COLORS[-2], - width=1), - hoverinfo=u"none" - )) - annotations.append(dict( - x=1, - y=lnk_limit, - xref=u"x", - yref=u"y", - xanchor=u"left", - yanchor=u"bottom", - text=f"Link: {lnk_limit:.2f}Mpps", - font=dict( - size=14, - color=COLORS[-2], - ), - align=u"left", - showarrow=False - )) - y_max.append(lnk_limit) - - pci_limit /= 1000000.0 - if (pci_limit < threshold and - (pci_limit < lnk_limit * 0.95 or lnk_limit > lnk_limit * 1.05)): - traces.append(plgo.Scatter( - x=x_vals, - y=[pci_limit, ] * len(x_vals), - name=f"PCIe: {pci_limit:.2f}Mpps", - showlegend=False, - mode=u"lines", - line=dict( - dash=u"dot", - color=COLORS[-3], - width=1), - hoverinfo=u"none" - )) - annotations.append(dict( - x=1, - y=pci_limit, - xref=u"x", - yref=u"y", - xanchor=u"left", - yanchor=u"bottom", - text=f"PCIe: {pci_limit:.2f}Mpps", - font=dict( - size=14, - color=COLORS[-3], - ), - align=u"left", - showarrow=False - )) - y_max.append(pci_limit) + if u"-gbps" not in plot_title and u"-cps-" not in plot_title: + nic_limit /= 1e6 + lnk_limit /= 1e6 + pci_limit /= 1e6 + min_limit = min((nic_limit, lnk_limit, pci_limit)) + if nic_limit == min_limit: + traces.append(plgo.Scatter( + x=x_vals, + y=[nic_limit, ] * len(x_vals), + name=f"NIC: {nic_limit:.2f}Mpps", + showlegend=False, + mode=u"lines", + line=dict( + dash=u"dot", + color=COLORS[-1], + width=1), + hoverinfo=u"none" + )) + annotations.append(dict( + x=1, + y=nic_limit, + xref=u"x", + yref=u"y", + xanchor=u"left", + yanchor=u"bottom", + text=f"NIC: {nic_limit:.2f}Mpps", + font=dict( + size=14, + color=COLORS[-1], + ), + align=u"left", + showarrow=False + )) + y_max.append(nic_limit) + elif lnk_limit == min_limit: + traces.append(plgo.Scatter( + x=x_vals, + y=[lnk_limit, ] * len(x_vals), + name=f"Link: {lnk_limit:.2f}Mpps", + showlegend=False, + mode=u"lines", + line=dict( + dash=u"dot", + color=COLORS[-1], + width=1), + hoverinfo=u"none" + )) + annotations.append(dict( + x=1, + y=lnk_limit, + xref=u"x", + yref=u"y", + xanchor=u"left", + yanchor=u"bottom", + text=f"Link: {lnk_limit:.2f}Mpps", + font=dict( + size=14, + color=COLORS[-1], + ), + align=u"left", + showarrow=False + )) + y_max.append(lnk_limit) + elif pci_limit == min_limit: + traces.append(plgo.Scatter( + x=x_vals, + y=[pci_limit, ] * len(x_vals), + name=f"PCIe: {pci_limit:.2f}Mpps", + showlegend=False, + mode=u"lines", + line=dict( + dash=u"dot", + color=COLORS[-1], + width=1), + hoverinfo=u"none" + )) + annotations.append(dict( + x=1, + y=pci_limit, + xref=u"x", + yref=u"y", + xanchor=u"left", + yanchor=u"bottom", + text=f"PCIe: {pci_limit:.2f}Mpps", + font=dict( + size=14, + color=COLORS[-1], + ), + align=u"left", + showarrow=False + )) + y_max.append(pci_limit) # Perfect and measured: cidx = 0 @@ -898,7 +960,7 @@ def plot_tsa_name(plot, input_data): if isinstance(val[u"val"][idx], float): htext += ( f"No. of Runs: {val[u'count'][idx]}
" - f"Mean: {val[u'val'][idx]:.2f}Mpps
" + f"Mean: {val[u'val'][idx]:.2f}{h_unit}
" ) if isinstance(val[u"diff"][idx], float): htext += f"Diff: {round(val[u'diff'][idx]):.0f}%
" @@ -1071,7 +1133,7 @@ def plot_nf_heatmap(plot, input_data): regex_cn = re.compile(r'^(\d*)R(\d*)C$') regex_test_name = re.compile(r'^.*-(\d+ch|\d+pl)-' r'(\d+mif|\d+vh)-' - r'(\d+vm\d+t|\d+dcr\d+t).*$') + r'(\d+vm\d+t|\d+dcr\d+t|\d+dcr\d+c).*$') vals = dict() # Transform the data @@ -1370,15 +1432,12 @@ def plot_nf_heatmap(plot, input_data): plpl = plgo.Figure(data=traces, layout=layout) # Export Plot - logging.info( - f" Writing file {plot[u'output-file']}" - f"{plot[u'output-file-type']}." - ) + logging.info(f" Writing file {plot[u'output-file']}.html") ploff.plot( plpl, show_link=False, auto_open=False, - filename=f"{plot[u'output-file']}{plot[u'output-file-type']}" + filename=f"{plot[u'output-file']}.html" ) except PlotlyError as err: logging.error(