X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_plots.py;h=32f146bca84d5e412a1cc7ee6759496b1f0fd896;hp=f406539eaee8fc441a416f879fb375002ac80de6;hb=670a905fcf26395e2064aab79449fe582eec5853;hpb=32bbad707629a44d17a7e9958b6ec462ba77fe49 diff --git a/resources/tools/presentation/generator_plots.py b/resources/tools/presentation/generator_plots.py index f406539eae..32f146bca8 100644 --- a/resources/tools/presentation/generator_plots.py +++ b/resources/tools/presentation/generator_plots.py @@ -107,7 +107,7 @@ def plot_performance_box(plot, input_data): y_sorted = OrderedDict() y_tags_l = {s: [t.lower() for t in ts] for s, ts in y_tags.items()} for tag in order: - logging.info(tag) + logging.debug(tag) for suite, tags in y_tags_l.items(): if "not " in tag: tag = tag.split(" ")[-1] @@ -119,9 +119,9 @@ def plot_performance_box(plot, input_data): try: y_sorted[suite] = y_vals.pop(suite) y_tags_l.pop(suite) - logging.info(suite) + logging.debug(suite) except KeyError as err: - logging.error("Not found: {0}".format(err)) + logging.error("Not found: {0}".format(repr(err))) finally: break else: @@ -129,9 +129,11 @@ def plot_performance_box(plot, input_data): # Add None to the lists with missing data max_len = 0 + nr_of_samples = list() for val in y_sorted.values(): if len(val) > max_len: max_len = len(val) + nr_of_samples.append(len(val)) for key, val in y_sorted.items(): if len(val) < max_len: val.extend([None for _ in range(max_len - len(val))]) @@ -142,9 +144,23 @@ def plot_performance_box(plot, input_data): df.head() y_max = list() for i, col in enumerate(df.columns): - name = "{0}. {1}".format(i + 1, col.lower().replace('-ndrpdrdisc', ''). - replace('-ndrpdr', '')) - logging.info(name) + name = "{nr}. ({samples:02d} run{plural}) {name}".\ + format(nr=(i + 1), + samples=nr_of_samples[i], + plural='s' if nr_of_samples[i] > 1 else '', + name=col.lower().replace('-ndrpdr', '')) + if len(name) > 50: + name_lst = name.split('-') + name = "" + split_name = True + for segment in name_lst: + if (len(name) + len(segment) + 1) > 50 and split_name: + name += "
" + split_name = False + name += segment + '-' + name = name[:-1] + + logging.debug(name) traces.append(plgo.Box(x=[str(i + 1) + '.'] * len(df[col]), y=[y / 1000000 if y else None for y in df[col]], name=name, @@ -152,7 +168,7 @@ def plot_performance_box(plot, input_data): try: val_max = max(df[col]) except ValueError as err: - logging.error(err) + logging.error(repr(err)) continue if val_max: y_max.append(int(val_max / 1000000) + 1) @@ -175,7 +191,7 @@ def plot_performance_box(plot, input_data): plot["output-file-type"])) except PlotlyError as err: logging.error(" Finished with error: {}". - format(str(err).replace("\n", " "))) + format(repr(err).replace("\n", " "))) return @@ -204,6 +220,11 @@ def plot_latency_error_bars(plot, input_data): for job in data: for build in job: for test in build: + try: + logging.debug("test['latency']: {0}\n". + format(test["latency"])) + except ValueError as err: + logging.warning(repr(err)) if y_tmp_vals.get(test["parent"], None) is None: y_tmp_vals[test["parent"]] = [ list(), # direction1, min @@ -221,6 +242,8 @@ def plot_latency_error_bars(plot, input_data): elif "-ndr" in plot_title.lower(): ttype = "NDR" else: + logging.warning("Invalid test type: {0}". + format(test["type"])) continue y_tmp_vals[test["parent"]][0].append( test["latency"][ttype]["direction1"]["min"]) @@ -235,9 +258,12 @@ def plot_latency_error_bars(plot, input_data): y_tmp_vals[test["parent"]][5].append( test["latency"][ttype]["direction2"]["max"]) else: + logging.warning("Invalid test type: {0}". + format(test["type"])) continue - except (KeyError, TypeError): - pass + except (KeyError, TypeError) as err: + logging.warning(repr(err)) + logging.debug("y_tmp_vals: {0}\n".format(y_tmp_vals)) # Sort the tests order = plot.get("sort", None) @@ -245,7 +271,7 @@ def plot_latency_error_bars(plot, input_data): y_sorted = OrderedDict() y_tags_l = {s: [t.lower() for t in ts] for s, ts in y_tags.items()} for tag in order: - logging.info(tag) + logging.debug(tag) for suite, tags in y_tags_l.items(): if "not " in tag: tag = tag.split(" ")[-1] @@ -257,44 +283,65 @@ def plot_latency_error_bars(plot, input_data): try: y_sorted[suite] = y_tmp_vals.pop(suite) y_tags_l.pop(suite) - logging.info(suite) + logging.debug(suite) except KeyError as err: - logging.error("Not found: {0}".format(err)) + logging.error("Not found: {0}".format(repr(err))) finally: break else: y_sorted = y_tmp_vals + logging.debug("y_sorted: {0}\n".format(y_sorted)) x_vals = list() y_vals = list() y_mins = list() y_maxs = list() + nr_of_samples = list() for key, val in y_sorted.items(): - key = "-".join(key.split("-")[1:-1]) - x_vals.append(key) # dir 1 + name = "-".join(key.split("-")[1:-1]) + if len(name) > 50: + name_lst = name.split('-') + name = "" + split_name = True + for segment in name_lst: + if (len(name) + len(segment) + 1) > 50 and split_name: + name += "
" + split_name = False + name += segment + '-' + name = name[:-1] + x_vals.append(name) # dir 1 y_vals.append(mean(val[1]) if val[1] else None) y_mins.append(mean(val[0]) if val[0] else None) y_maxs.append(mean(val[2]) if val[2] else None) - x_vals.append(key) # dir 2 + nr_of_samples.append(len(val[1]) if val[1] else 0) + x_vals.append(name) # dir 2 y_vals.append(mean(val[4]) if val[4] else None) y_mins.append(mean(val[3]) if val[3] else None) y_maxs.append(mean(val[5]) if val[5] else None) + nr_of_samples.append(len(val[3]) if val[3] else 0) + logging.debug("x_vals :{0}\n".format(x_vals)) + logging.debug("y_vals :{0}\n".format(y_vals)) + logging.debug("y_mins :{0}\n".format(y_mins)) + logging.debug("y_maxs :{0}\n".format(y_maxs)) + logging.debug("nr_of_samples :{0}\n".format(nr_of_samples)) traces = list() annotations = list() for idx in range(len(x_vals)): if not bool(int(idx % 2)): - direction = "West - East" + direction = "West-East" else: - direction = "East - West" - hovertext = ("Test: {test}
" + direction = "East-West" + hovertext = ("No. of Runs: {nr}
" + "Test: {test}
" "Direction: {dir}
".format(test=x_vals[idx], - dir=direction)) + dir=direction, + nr=nr_of_samples[idx])) if isinstance(y_maxs[idx], float): hovertext += "Max: {max:.2f}uSec
".format(max=y_maxs[idx]) if isinstance(y_vals[idx], float): - hovertext += "Avg: {avg:.2f}uSec
".format(avg=y_vals[idx]) + hovertext += "Mean: {avg:.2f}uSec
".format(avg=y_vals[idx]) if isinstance(y_mins[idx], float): hovertext += "Min: {min:.2f}uSec".format(min=y_mins[idx]) @@ -306,6 +353,9 @@ def plot_latency_error_bars(plot, input_data): arrayminus = [y_vals[idx] - y_mins[idx], ] else: arrayminus = [None, ] + logging.debug("y_vals[{1}] :{0}\n".format(y_vals[idx], idx)) + logging.debug("array :{0}\n".format(array)) + logging.debug("arrayminus :{0}\n".format(arrayminus)) traces.append(plgo.Scatter( x=[idx, ], y=[y_vals[idx], ], @@ -423,9 +473,11 @@ def plot_throughput_speedup_analysis(plot, input_data): for test_name, test_vals in y_vals.items(): for key, test_val in test_vals.items(): if test_val: - y_vals[test_name][key] = sum(test_val) / len(test_val) - if key == "1": - y_1c_max[test_name] = max(test_val) / 1000000.0 + avg_val = sum(test_val) / len(test_val) + y_vals[test_name][key] = (avg_val, len(test_val)) + ideal = avg_val / (int(key) * 1000000.0) + if test_name not in y_1c_max or ideal > y_1c_max[test_name]: + y_1c_max[test_name] = ideal vals = dict() y_max = list() @@ -433,38 +485,57 @@ def plot_throughput_speedup_analysis(plot, input_data): lnk_limit = 0 pci_limit = plot["limits"]["pci"]["pci-g3-x8"] for test_name, test_vals in y_vals.items(): - if test_vals["1"]: - name = "-".join(test_name.split('-')[1:-1]) - - vals[name] = dict() - y_val_1 = test_vals["1"] / 1000000.0 - y_val_2 = test_vals["2"] / 1000000.0 if test_vals["2"] else None - y_val_4 = test_vals["4"] / 1000000.0 if test_vals["4"] else None - - vals[name]["val"] = [y_val_1, y_val_2, y_val_4] - vals[name]["rel"] = [1.0, None, None] - vals[name]["ideal"] = [y_1c_max[test_name], - y_1c_max[test_name] * 2, - y_1c_max[test_name] * 4] - vals[name]["diff"] = \ - [(y_val_1 - y_1c_max[test_name]) * 100 / y_val_1, None, None] - - try: - val_max = max(max(vals[name]["val"], vals[name]["ideal"])) - except ValueError as err: - logging.error(err) - continue - if val_max: - y_max.append(int((val_max / 10) + 1) * 10) - - if y_val_2: - vals[name]["rel"][1] = round(y_val_2 / y_val_1, 2) - vals[name]["diff"][1] = \ - (y_val_2 - vals[name]["ideal"][1]) * 100 / y_val_2 - if y_val_4: - vals[name]["rel"][2] = round(y_val_4 / y_val_1, 2) - vals[name]["diff"][2] = \ - (y_val_4 - vals[name]["ideal"][2]) * 100 / y_val_4 + try: + if test_vals["1"][1]: + name = "-".join(test_name.split('-')[1:-1]) + if len(name) > 50: + name_lst = name.split('-') + name = "" + split_name = True + for segment in name_lst: + if (len(name) + len(segment) + 1) > 50 and split_name: + name += "
" + split_name = False + name += segment + '-' + name = name[:-1] + + vals[name] = dict() + y_val_1 = test_vals["1"][0] / 1000000.0 + y_val_2 = test_vals["2"][0] / 1000000.0 if test_vals["2"][0] \ + else None + y_val_4 = test_vals["4"][0] / 1000000.0 if test_vals["4"][0] \ + else None + + vals[name]["val"] = [y_val_1, y_val_2, y_val_4] + vals[name]["rel"] = [1.0, None, None] + vals[name]["ideal"] = [y_1c_max[test_name], + y_1c_max[test_name] * 2, + y_1c_max[test_name] * 4] + vals[name]["diff"] = [(y_val_1 - y_1c_max[test_name]) * 100 / + y_val_1, None, None] + vals[name]["count"] = [test_vals["1"][1], + test_vals["2"][1], + test_vals["4"][1]] + + try: + val_max = max(max(vals[name]["val"], vals[name]["ideal"])) + except ValueError as err: + logging.error(err) + continue + if val_max: + y_max.append(int((val_max / 10) + 1) * 10) + + if y_val_2: + vals[name]["rel"][1] = round(y_val_2 / y_val_1, 2) + vals[name]["diff"][1] = \ + (y_val_2 - vals[name]["ideal"][1]) * 100 / y_val_2 + if y_val_4: + vals[name]["rel"][2] = round(y_val_4 / y_val_1, 2) + vals[name]["diff"][2] = \ + (y_val_4 - vals[name]["ideal"][2]) * 100 / y_val_4 + except IndexError as err: + logging.warning("No data for '{0}'".format(test_name)) + logging.warning(repr(err)) # Limits: if "x520" in test_name: @@ -475,6 +546,8 @@ def plot_throughput_speedup_analysis(plot, input_data): limit = plot["limits"]["nic"]["xxv710"] elif "xl710" in test_name: limit = plot["limits"]["nic"]["xl710"] + elif "x553" in test_name: + limit = plot["limits"]["nic"]["x553"] else: limit = 0 if limit > nic_limit: @@ -620,45 +693,51 @@ def plot_throughput_speedup_analysis(plot, input_data): cidx = 0 for name, val in y_sorted.iteritems(): hovertext = list() - for idx in range(len(val["val"])): - htext = "" - if isinstance(val["val"][idx], float): - htext += "value: {0:.2f}Mpps
".format(val["val"][idx]) - if isinstance(val["diff"][idx], float): - htext += "diff: {0:.0f}%
".format(round(val["diff"][idx])) - if isinstance(val["rel"][idx], float): - htext += "speedup: {0:.2f}".format(val["rel"][idx]) - hovertext.append(htext) - traces.append(plgo.Scatter(x=x_vals, - y=val["val"], - name=name, - legendgroup=name, - mode="lines+markers", - line=dict( - color=COLORS[cidx], - width=2), - marker=dict( - symbol="circle", - size=10 - ), - text=hovertext, - hoverinfo="text+name" - )) - traces.append(plgo.Scatter(x=x_vals, - y=val["ideal"], - name="{0} perfect".format(name), - legendgroup=name, - showlegend=False, - mode="lines", - line=dict( - color=COLORS[cidx], - width=2, - dash="dash"), - text=["perfect: {0:.2f}Mpps".format(y) - for y in val["ideal"]], - hoverinfo="text" - )) - cidx += 1 + try: + for idx in range(len(val["val"])): + htext = "" + if isinstance(val["val"][idx], float): + htext += "No. of Runs: {1}
" \ + "Mean: {0:.2f}Mpps
".format(val["val"][idx], + val["count"][idx]) + if isinstance(val["diff"][idx], float): + htext += "Diff: {0:.0f}%
".format(round(val["diff"][idx])) + if isinstance(val["rel"][idx], float): + htext += "Speedup: {0:.2f}".format(val["rel"][idx]) + hovertext.append(htext) + traces.append(plgo.Scatter(x=x_vals, + y=val["val"], + name=name, + legendgroup=name, + mode="lines+markers", + line=dict( + color=COLORS[cidx], + width=2), + marker=dict( + symbol="circle", + size=10 + ), + text=hovertext, + hoverinfo="text+name" + )) + traces.append(plgo.Scatter(x=x_vals, + y=val["ideal"], + name="{0} perfect".format(name), + legendgroup=name, + showlegend=False, + mode="lines", + line=dict( + color=COLORS[cidx], + width=2, + dash="dash"), + text=["Perfect: {0:.2f}Mpps".format(y) + for y in val["ideal"]], + hoverinfo="text" + )) + cidx += 1 + except (IndexError, ValueError, KeyError) as err: + logging.warning("No data for '{0}'".format(name)) + logging.warning(repr(err)) try: # Create plot @@ -714,9 +793,11 @@ def plot_http_server_performance_box(plot, input_data): # Add None to the lists with missing data max_len = 0 + nr_of_samples = list() for val in y_vals.values(): if len(val) > max_len: max_len = len(val) + nr_of_samples.append(len(val)) for key, val in y_vals.items(): if len(val) < max_len: val.extend([None for _ in range(max_len - len(val))]) @@ -726,8 +807,22 @@ def plot_http_server_performance_box(plot, input_data): df = pd.DataFrame(y_vals) df.head() for i, col in enumerate(df.columns): - name = "{0}. {1}".format(i + 1, col.lower().replace('-cps', ''). - replace('-rps', '')) + name = "{nr}. ({samples:02d} run{plural}) {name}".\ + format(nr=(i + 1), + samples=nr_of_samples[i], + plural='s' if nr_of_samples[i] > 1 else '', + name=col.lower().replace('-ndrpdr', '')) + if len(name) > 50: + name_lst = name.split('-') + name = "" + split_name = True + for segment in name_lst: + if (len(name) + len(segment) + 1) > 50 and split_name: + name += "
" + split_name = False + name += segment + '-' + name = name[:-1] + traces.append(plgo.Box(x=[str(i + 1) + '.'] * len(df[col]), y=df[col], name=name,