X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fpresentation%2Fgenerator_tables.py;h=a995711bcb54c08fa2cfb10770cf006f71ce80b7;hp=f2eec7e28cbba6bdf8578e22221ed3a2fbf13483;hb=c98f749024b1f42d0065a16ac1ee904a4c9ca704;hpb=3fc6ce9f08f0a1c1293dfc032fdddfe5e3f7eb16 diff --git a/resources/tools/presentation/generator_tables.py b/resources/tools/presentation/generator_tables.py index f2eec7e28c..a995711bcb 100644 --- a/resources/tools/presentation/generator_tables.py +++ b/resources/tools/presentation/generator_tables.py @@ -17,6 +17,7 @@ import logging import csv +import math import re from collections import OrderedDict @@ -28,6 +29,7 @@ from copy import deepcopy import plotly.graph_objects as go import plotly.offline as ploff import pandas as pd +import prettytable from numpy import nan, isnan from yaml import load, FullLoader, YAMLError @@ -58,7 +60,8 @@ def generate_tables(spec, data): u"table_failed_tests_html": table_failed_tests_html, u"table_oper_data_html": table_oper_data_html, u"table_comparison": table_comparison, - u"table_weekly_comparison": table_weekly_comparison + u"table_weekly_comparison": table_weekly_comparison, + u"table_job_spec_duration": table_job_spec_duration } logging.info(u"Generating the tables ...") @@ -75,6 +78,96 @@ def generate_tables(spec, data): logging.info(u"Done.") +def table_job_spec_duration(table, input_data): + """Generate the table(s) with algorithm: table_job_spec_duration + specified in the specification file. + + :param table: Table to generate. + :param input_data: Data to process. + :type table: pandas.Series + :type input_data: InputData + """ + + _ = input_data + + logging.info(f" Generating the table {table.get(u'title', u'')} ...") + + jb_type = table.get(u"jb-type", None) + + tbl_lst = list() + if jb_type == u"iterative": + for line in table.get(u"lines", tuple()): + tbl_itm = { + u"name": line.get(u"job-spec", u""), + u"data": list() + } + for job, builds in line.get(u"data-set", dict()).items(): + for build_nr in builds: + try: + minutes = input_data.metadata( + job, str(build_nr) + )[u"elapsedtime"] // 60000 + except (KeyError, IndexError, ValueError, AttributeError): + continue + tbl_itm[u"data"].append(minutes) + tbl_itm[u"mean"] = mean(tbl_itm[u"data"]) + tbl_itm[u"stdev"] = stdev(tbl_itm[u"data"]) + tbl_lst.append(tbl_itm) + elif jb_type == u"coverage": + job = table.get(u"data", None) + if not job: + return + for line in table.get(u"lines", tuple()): + try: + tbl_itm = { + u"name": line.get(u"job-spec", u""), + u"mean": input_data.metadata( + list(job.keys())[0], str(line[u"build"]) + )[u"elapsedtime"] // 60000, + u"stdev": float(u"nan") + } + tbl_itm[u"data"] = [tbl_itm[u"mean"], ] + except (KeyError, IndexError, ValueError, AttributeError): + continue + tbl_lst.append(tbl_itm) + else: + logging.warning(f"Wrong type of job-spec: {jb_type}. Skipping.") + return + + for line in tbl_lst: + line[u"mean"] = \ + f"{int(line[u'mean'] // 60):02d}:{int(line[u'mean'] % 60):02d}" + if math.isnan(line[u"stdev"]): + line[u"stdev"] = u"" + else: + line[u"stdev"] = \ + f"{int(line[u'stdev'] //60):02d}:{int(line[u'stdev'] % 60):02d}" + + if not tbl_lst: + return + + rows = list() + for itm in tbl_lst: + rows.append([ + itm[u"name"], + f"{len(itm[u'data'])}", + f"{itm[u'mean']} +- {itm[u'stdev']}" + if itm[u"stdev"] != u"" else f"{itm[u'mean']}" + ]) + + txt_table = prettytable.PrettyTable( + [u"Job Specification", u"Nr of Runs", u"Duration [HH:MM]"] + ) + for row in rows: + txt_table.add_row(row) + txt_table.align = u"r" + txt_table.align[u"Job Specification"] = u"l" + + file_name = f"{table.get(u'output-file', u'')}.txt" + with open(file_name, u"wt", encoding='utf-8') as txt_file: + txt_file.write(str(txt_table)) + + def table_oper_data_html(table, input_data): """Generate the table(s) with algorithm: html_table_oper_data specified in the specification file. @@ -93,7 +186,7 @@ def table_oper_data_html(table, input_data): ) data = input_data.filter_data( table, - params=[u"name", u"parent", u"show-run", u"type"], + params=[u"name", u"parent", u"telemetry-show-run", u"type"], continue_on_error=True ) if data.empty: @@ -146,7 +239,8 @@ def table_oper_data_html(table, input_data): ) thead.text = u"\t" - if tst_data.get(u"show-run", u"No Data") == u"No Data": + if tst_data.get(u"telemetry-show-run", None) is None or \ + isinstance(tst_data[u"telemetry-show-run"], str): trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]) ) @@ -176,17 +270,56 @@ def table_oper_data_html(table, input_data): u"Average Vector Size" ) - for dut_data in tst_data[u"show-run"].values(): + for dut_data in tst_data[u"telemetry-show-run"].values(): trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]) ) tcol = ET.SubElement( trow, u"td", attrib=dict(align=u"left", colspan=u"6") ) - if dut_data.get(u"threads", None) is None: + if dut_data.get(u"runtime", None) is None: tcol.text = u"No Data" continue + runtime = dict() + for item in dut_data[u"runtime"].get(u"data", tuple()): + tid = int(item[u"labels"][u"thread_id"]) + if runtime.get(tid, None) is None: + runtime[tid] = dict() + gnode = item[u"labels"][u"graph_node"] + if runtime[tid].get(gnode, None) is None: + runtime[tid][gnode] = dict() + try: + runtime[tid][gnode][item[u"name"]] = float(item[u"value"]) + except ValueError: + runtime[tid][gnode][item[u"name"]] = item[u"value"] + + threads = dict({idx: list() for idx in range(len(runtime))}) + for idx, run_data in runtime.items(): + for gnode, gdata in run_data.items(): + if gdata[u"vectors"] > 0: + clocks = gdata[u"clocks"] / gdata[u"vectors"] + elif gdata[u"calls"] > 0: + clocks = gdata[u"clocks"] / gdata[u"calls"] + elif gdata[u"suspends"] > 0: + clocks = gdata[u"clocks"] / gdata[u"suspends"] + else: + clocks = 0.0 + if gdata[u"calls"] > 0: + vectors_call = gdata[u"vectors"] / gdata[u"calls"] + else: + vectors_call = 0.0 + if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \ + int(gdata[u"suspends"]): + threads[idx].append([ + gnode, + int(gdata[u"calls"]), + int(gdata[u"vectors"]), + int(gdata[u"suspends"]), + clocks, + vectors_call + ]) + bold = ET.SubElement(tcol, u"b") bold.text = ( f"Host IP: {dut_data.get(u'host', '')}, " @@ -200,7 +333,7 @@ def table_oper_data_html(table, input_data): ) thead.text = u"\t" - for thread_nr, thread in dut_data[u"threads"].items(): + for thread_nr, thread in threads.items(): trow = ET.SubElement( tbl, u"tr", attrib=dict(bgcolor=colors[u"header"]) ) @@ -323,7 +456,8 @@ def table_merged_details(table, input_data): suite_name = suite[u"name"] table_lst = list() for test in data.keys(): - if data[test][u"parent"] not in suite_name: + if data[test][u"status"] != u"PASS" or \ + data[test][u"parent"] not in suite_name: continue row_lst = list() for column in table[u"columns"]: @@ -348,13 +482,14 @@ def table_merged_details(table, input_data): # Temporary solution: remove NDR results from message: if bool(table.get(u'remove-ndr', False)): try: - col_data = col_data.split(u" |br| ", 1)[1] + col_data = col_data.split(u"\n", 1)[1] except IndexError: pass + col_data = col_data.replace(u'\n', u' |br| ').\ + replace(u'\r', u'').replace(u'"', u"'") col_data = f" |prein| {col_data} |preout| " - elif column[u"data"].split(u" ")[1] in \ - (u"conf-history", u"show-run"): - col_data = col_data.replace(u" |br| ", u"", 1) + elif column[u"data"].split(u" ")[1] in (u"conf-history", ): + col_data = col_data.replace(u'\n', u' |br| ') col_data = f" |prein| {col_data[:-5]} |preout| " row_lst.append(f'"{col_data}"') except KeyError: @@ -386,12 +521,7 @@ def _tpc_modify_test_name(test_name, ignore_nic=False): :rtype: str """ test_name_mod = test_name.\ - replace(u"-ndrpdrdisc", u""). \ replace(u"-ndrpdr", u"").\ - replace(u"-pdrdisc", u""). \ - replace(u"-ndrdisc", u"").\ - replace(u"-pdr", u""). \ - replace(u"-ndr", u""). \ replace(u"1t1c", u"1c").\ replace(u"2t1c", u"1c"). \ replace(u"2t2c", u"2c").\ @@ -425,7 +555,7 @@ def _tpc_insert_data(target, src, include_tests): """Insert src data to the target structure. :param target: Target structure where the data is placed. - :param src: Source data to be placed into the target stucture. + :param src: Source data to be placed into the target structure. :param include_tests: Which results will be included (MRR, NDR, PDR). :type target: list :type src: dict @@ -439,6 +569,13 @@ def _tpc_insert_data(target, src, include_tests): target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"]) elif include_tests == u"NDR": target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"]) + elif u"latency" in include_tests: + keys = include_tests.split(u"-") + if len(keys) == 4: + lat = src[keys[0]][keys[1]][keys[2]][keys[3]] + target[u"data"].append( + float(u"nan") if lat == -1 else lat * 1e6 + ) except (KeyError, TypeError): pass @@ -879,7 +1016,11 @@ def table_perf_trending_dash(table, input_data): if len(data_t) < 2: continue - classification_lst, avgs, _ = classify_anomalies(data_t) + try: + classification_lst, avgs, _ = classify_anomalies(data_t) + except ValueError as err: + logging.info(f"{err} Skipping") + return win_size = min(len(data_t), table[u"window"]) long_win_size = min(len(data_t), table[u"long-trend-window"]) @@ -970,6 +1111,8 @@ def _generate_url(testbed, test_name): nic = u"x553" elif u"cx556" in test_name or u"cx556a" in test_name: nic = u"cx556a" + elif u"ena" in test_name: + nic = u"nitro50g" else: nic = u"" @@ -990,27 +1133,30 @@ def _generate_url(testbed, test_name): if u"1t1c" in test_name or \ (u"-1c-" in test_name and - testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")): + testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")): cores = u"1t1c" elif u"2t2c" in test_name or \ (u"-2c-" in test_name and - testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")): + testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")): cores = u"2t2c" elif u"4t4c" in test_name or \ (u"-4c-" in test_name and - testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")): + testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")): cores = u"4t4c" elif u"2t1c" in test_name or \ (u"-1c-" in test_name and - testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")): + testbed in + (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")): cores = u"2t1c" elif u"4t2c" in test_name or \ (u"-2c-" in test_name and - testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")): + testbed in + (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")): cores = u"4t2c" elif u"8t4c" in test_name or \ (u"-4c-" in test_name and - testbed in (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2")): + testbed in + (u"2n-skx", u"3n-skx", u"2n-clx", u"2n-zn2", u"2n-aws", u"3n-aws")): cores = u"8t4c" else: cores = u"" @@ -1025,6 +1171,8 @@ def _generate_url(testbed, test_name): driver = u"rdma" elif u"dnv" in testbed or u"tsh" in testbed: driver = u"ixgbe" + elif u"ena" in test_name: + driver = u"ena" else: driver = u"dpdk" @@ -1058,6 +1206,10 @@ def _generate_url(testbed, test_name): bsf = u"udp-pps" elif u"-pps" in test_name and u"ethip4tcp" in test_name: bsf = u"tcp-pps" + elif u"-tput" in test_name and u"ethip4udp" in test_name: + bsf = u"udp-tput" + elif u"-tput" in test_name and u"ethip4tcp" in test_name: + bsf = u"tcp-tput" elif u"udpsrcscale" in test_name: bsf = u"features-udp" elif u"iacl" in test_name: @@ -1097,6 +1249,8 @@ def _generate_url(testbed, test_name): domain += u"-cps" elif u"-pps" in test_name: domain += u"-pps" + elif u"-tput" in test_name: + domain += u"-tput" elif u"testpmd" in test_name or u"l3fwd" in test_name: domain = u"dpdk" elif u"memif" in test_name: @@ -1125,6 +1279,8 @@ def _generate_url(testbed, test_name): bsf += u"-hw" elif u"ethip4vxlan" in test_name: domain = u"ip4_tunnels" + elif u"ethip4udpgeneve" in test_name: + domain = u"ip4_tunnels" elif u"ip4base" in test_name or u"ip4scale" in test_name: domain = u"ip4" elif u"ip6base" in test_name or u"ip6scale" in test_name: @@ -1184,6 +1340,9 @@ def table_perf_trending_dash_html(table, input_data): try: with open(table[u"input-file"], u'rt') as csv_file: csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"')) + except FileNotFoundError as err: + logging.warning(f"{err}") + return except KeyError: logging.warning(u"The input file is not defined.") return @@ -1244,8 +1403,8 @@ def table_perf_trending_dash_html(table, input_data): u"a", attrib=dict( href=f"{lnk_dir}" - f"{_generate_url(table.get(u'testbed', ''), item)}" - f"{lnk_sufix}" + f"{_generate_url(table.get(u'testbed', ''), item)}" + f"{lnk_sufix}" ) ) ref.text = item @@ -1295,6 +1454,8 @@ def table_last_failed_tests(table, input_data): build = str(build) try: version = input_data.metadata(job, build).get(u"version", u"") + duration = \ + input_data.metadata(job, build).get(u"elapsedtime", u"") except KeyError: logging.error(f"Data for {job}: {build} is not present.") return @@ -1312,16 +1473,21 @@ def table_last_failed_tests(table, input_data): if not groups: continue nic = groups.group(0) - failed_tests.append(f"{nic}-{tst_data[u'name']}") - tbl_list.append(str(passed)) - tbl_list.append(str(failed)) + msg = tst_data[u'msg'].replace(u"\n", u"") + msg = re.sub(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', + 'xxx.xxx.xxx.xxx', msg) + msg = msg.split(u'Also teardown failed')[0] + failed_tests.append(f"{nic}-{tst_data[u'name']}###{msg}") + tbl_list.append(passed) + tbl_list.append(failed) + tbl_list.append(duration) tbl_list.extend(failed_tests) file_name = f"{table[u'output-file']}{table[u'output-file-ext']}" logging.info(f" Writing file: {file_name}") with open(file_name, u"wt") as file_handler: for test in tbl_list: - file_handler.write(test + u'\n') + file_handler.write(f"{test}\n") def table_failed_tests(table, input_data): @@ -1517,8 +1683,8 @@ def table_failed_tests_html(table, input_data): u"a", attrib=dict( href=f"{lnk_dir}" - f"{_generate_url(table.get(u'testbed', ''), item)}" - f"{lnk_sufix}" + f"{_generate_url(table.get(u'testbed', ''), item)}" + f"{lnk_sufix}" ) ) ref.text = item @@ -1567,7 +1733,14 @@ def table_comparison(table, input_data): tag = col.get(u"tag", None) data = input_data.filter_data( table, - params=[u"throughput", u"result", u"name", u"parent", u"tags"], + params=[ + u"throughput", + u"result", + u"latency", + u"name", + u"parent", + u"tags" + ], data=col[u"data-set"], continue_on_error=True ) @@ -1605,7 +1778,14 @@ def table_comparison(table, input_data): if replacement: rpl_data = input_data.filter_data( table, - params=[u"throughput", u"result", u"name", u"parent", u"tags"], + params=[ + u"throughput", + u"result", + u"latency", + u"name", + u"parent", + u"tags" + ], data=replacement, continue_on_error=True ) @@ -1639,7 +1819,8 @@ def table_comparison(table, input_data): include_tests=table[u"include-tests"] ) - if table[u"include-tests"] in (u"NDR", u"PDR"): + if table[u"include-tests"] in (u"NDR", u"PDR") or \ + u"latency" in table[u"include-tests"]: for tst_name, tst_data in col_data[u"data"].items(): if tst_data[u"data"]: tst_data[u"mean"] = mean(tst_data[u"data"]) @@ -1724,11 +1905,14 @@ def table_comparison(table, input_data): cmp_itm[u"mean"] is not None and \ ref_itm[u"stdev"] is not None and \ cmp_itm[u"stdev"] is not None: - delta, d_stdev = relative_change_stdev( - ref_itm[u"mean"], cmp_itm[u"mean"], - ref_itm[u"stdev"], cmp_itm[u"stdev"] - ) - if delta is None: + try: + delta, d_stdev = relative_change_stdev( + ref_itm[u"mean"], cmp_itm[u"mean"], + ref_itm[u"stdev"], cmp_itm[u"stdev"] + ) + except ZeroDivisionError: + break + if delta is None or math.isnan(delta): break new_row.append({ u"mean": delta * 1e6, @@ -2055,7 +2239,7 @@ def table_weekly_comparison(table, in_data): # Reorganize header in txt table txt_table = list() with open(txt_file_name, u"rt", encoding='utf-8') as file_handler: - for line in file_handler: + for line in list(file_handler): txt_table.append(line) try: txt_table.insert(5, txt_table.pop(2))