+ # Generate csv tables:
+ csv_file = f"{table[u'output-file']}.csv"
+ with open(csv_file, u"wt") as file_handler:
+ file_handler.write(header_str)
+ for test in tbl_lst:
+ file_handler.write(u";".join([str(item) for item in test]) + u"\n")
+
+ convert_csv_to_pretty_txt(
+ csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
+ )
+ with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
+ txt_file.write(legend)
+
+ # Generate html table:
+ _tpc_generate_html_table(
+ header,
+ tbl_lst,
+ table[u'output-file'],
+ legend=legend
+ )
+
+
+def table_perf_trending_dash(table, input_data):
+ """Generate the table(s) with algorithm:
+ table_perf_trending_dash
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ # Transform the data
+ logging.info(
+ f" Creating the data set for the {table.get(u'type', u'')} "
+ f"{table.get(u'title', u'')}."
+ )
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ # Prepare the header of the tables
+ header = [
+ u"Test Case",
+ u"Trend [Mpps]",
+ u"Short-Term Change [%]",
+ u"Long-Term Change [%]",
+ u"Regressions [#]",
+ u"Progressions [#]"
+ ]
+ header_str = u",".join(header) + u"\n"
+
+ # Prepare data to the table:
+ tbl_dict = dict()
+ for job, builds in table[u"data"].items():
+ for build in builds:
+ for tst_name, tst_data in data[job][str(build)].items():
+ if tst_name.lower() in table.get(u"ignore-list", list()):
+ continue
+ if tbl_dict.get(tst_name, None) is None:
+ groups = re.search(REGEX_NIC, tst_data[u"parent"])
+ if not groups:
+ continue
+ nic = groups.group(0)
+ tbl_dict[tst_name] = {
+ u"name": f"{nic}-{tst_data[u'name']}",
+ u"data": OrderedDict()
+ }
+ try:
+ tbl_dict[tst_name][u"data"][str(build)] = \
+ tst_data[u"result"][u"receive-rate"]
+ except (TypeError, KeyError):
+ pass # No data in output.xml for this test
+
+ tbl_lst = list()
+ for tst_name in tbl_dict:
+ data_t = tbl_dict[tst_name][u"data"]
+ if len(data_t) < 2:
+ continue
+
+ classification_lst, avgs = classify_anomalies(data_t)
+
+ win_size = min(len(data_t), table[u"window"])
+ long_win_size = min(len(data_t), table[u"long-trend-window"])
+
+ try:
+ max_long_avg = max(
+ [x for x in avgs[-long_win_size:-win_size]
+ if not isnan(x)])
+ except ValueError:
+ max_long_avg = nan
+ last_avg = avgs[-1]
+ avg_week_ago = avgs[max(-win_size, -len(avgs))]
+
+ if isnan(last_avg) or isnan(avg_week_ago) or avg_week_ago == 0.0:
+ rel_change_last = nan
+ else:
+ rel_change_last = round(
+ ((last_avg - avg_week_ago) / avg_week_ago) * 1e2, 2)
+
+ if isnan(max_long_avg) or isnan(last_avg) or max_long_avg == 0.0:
+ rel_change_long = nan
+ else:
+ rel_change_long = round(
+ ((last_avg - max_long_avg) / max_long_avg) * 1e2, 2)
+
+ if classification_lst:
+ if isnan(rel_change_last) and isnan(rel_change_long):
+ continue
+ if isnan(last_avg) or isnan(rel_change_last) or \
+ isnan(rel_change_long):
+ continue
+ tbl_lst.append(
+ [tbl_dict[tst_name][u"name"],
+ round(last_avg / 1e6, 2),
+ rel_change_last,
+ rel_change_long,
+ classification_lst[-win_size:].count(u"regression"),
+ classification_lst[-win_size:].count(u"progression")])
+
+ tbl_lst.sort(key=lambda rel: rel[0])
+
+ tbl_sorted = list()
+ for nrr in range(table[u"window"], -1, -1):
+ tbl_reg = [item for item in tbl_lst if item[4] == nrr]
+ for nrp in range(table[u"window"], -1, -1):
+ tbl_out = [item for item in tbl_reg if item[5] == nrp]
+ tbl_out.sort(key=lambda rel: rel[2])
+ tbl_sorted.extend(tbl_out)
+
+ file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
+
+ logging.info(f" Writing file: {file_name}")
+ with open(file_name, u"wt") as file_handler:
+ file_handler.write(header_str)
+ for test in tbl_sorted:
+ file_handler.write(u",".join([str(item) for item in test]) + u'\n')
+
+ logging.info(f" Writing file: {table[u'output-file']}.txt")
+ convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
+
+
+def _generate_url(testbed, test_name):
+ """Generate URL to a trending plot from the name of the test case.
+
+ :param testbed: The testbed used for testing.
+ :param test_name: The name of the test case.
+ :type testbed: str
+ :type test_name: str
+ :returns: The URL to the plot with the trending data for the given test
+ case.
+ :rtype str
+ """
+
+ if u"x520" in test_name:
+ nic = u"x520"
+ elif u"x710" in test_name:
+ nic = u"x710"
+ elif u"xl710" in test_name:
+ nic = u"xl710"
+ elif u"xxv710" in test_name:
+ nic = u"xxv710"
+ elif u"vic1227" in test_name:
+ nic = u"vic1227"
+ elif u"vic1385" in test_name:
+ nic = u"vic1385"
+ elif u"x553" in test_name:
+ nic = u"x553"
+ elif u"cx556" in test_name or u"cx556a" in test_name:
+ nic = u"cx556a"
+ else:
+ nic = u""
+
+ if u"64b" in test_name:
+ frame_size = u"64b"
+ elif u"78b" in test_name:
+ frame_size = u"78b"
+ elif u"imix" in test_name:
+ frame_size = u"imix"
+ elif u"9000b" in test_name:
+ frame_size = u"9000b"
+ elif u"1518b" in test_name:
+ frame_size = u"1518b"
+ elif u"114b" in test_name:
+ frame_size = u"114b"
+ else:
+ frame_size = u""
+
+ if u"1t1c" in test_name or \
+ (u"-1c-" in test_name and
+ testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+ cores = u"1t1c"
+ elif u"2t2c" in test_name or \
+ (u"-2c-" in test_name and
+ testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+ cores = u"2t2c"
+ elif u"4t4c" in test_name or \
+ (u"-4c-" in test_name and
+ testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+ cores = u"4t4c"
+ elif u"2t1c" in test_name or \
+ (u"-1c-" in test_name and
+ testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
+ cores = u"2t1c"
+ elif u"4t2c" in test_name or \
+ (u"-2c-" in test_name and
+ testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
+ cores = u"4t2c"
+ elif u"8t4c" in test_name or \
+ (u"-4c-" in test_name and
+ testbed in (u"2n-skx", u"3n-skx", u"2n-clx")):
+ cores = u"8t4c"
+ else:
+ cores = u""
+
+ if u"testpmd" in test_name:
+ driver = u"testpmd"
+ elif u"l3fwd" in test_name:
+ driver = u"l3fwd"
+ elif u"avf" in test_name:
+ driver = u"avf"
+ elif u"rdma" in test_name:
+ driver = u"rdma"
+ elif u"dnv" in testbed or u"tsh" in testbed:
+ driver = u"ixgbe"
+ else:
+ driver = u"dpdk"
+
+ if u"acl" in test_name or \
+ u"macip" in test_name or \
+ u"nat" in test_name or \
+ u"policer" in test_name or \
+ u"cop" in test_name:
+ bsf = u"features"
+ elif u"scale" in test_name:
+ bsf = u"scale"
+ elif u"base" in test_name:
+ bsf = u"base"
+ else:
+ bsf = u"base"
+
+ if u"114b" in test_name and u"vhost" in test_name:
+ domain = u"vts"
+ elif u"testpmd" in test_name or u"l3fwd" in test_name:
+ domain = u"dpdk"
+ elif u"memif" in test_name:
+ domain = u"container_memif"
+ elif u"srv6" in test_name:
+ domain = u"srv6"
+ elif u"vhost" in test_name:
+ domain = u"vhost"
+ if u"vppl2xc" in test_name:
+ driver += u"-vpp"
+ else:
+ driver += u"-testpmd"
+ if u"lbvpplacp" in test_name:
+ bsf += u"-link-bonding"
+ elif u"ch" in test_name and u"vh" in test_name and u"vm" in test_name:
+ domain = u"nf_service_density_vnfc"
+ elif u"ch" in test_name and u"mif" in test_name and u"dcr" in test_name:
+ domain = u"nf_service_density_cnfc"
+ elif u"pl" in test_name and u"mif" in test_name and u"dcr" in test_name:
+ domain = u"nf_service_density_cnfp"
+ elif u"ipsec" in test_name:
+ domain = u"ipsec"
+ if u"sw" in test_name:
+ bsf += u"-sw"
+ elif u"hw" in test_name:
+ bsf += u"-hw"
+ elif u"ethip4vxlan" in test_name:
+ domain = u"ip4_tunnels"
+ elif u"ip4base" in test_name or u"ip4scale" in test_name:
+ domain = u"ip4"
+ elif u"ip6base" in test_name or u"ip6scale" in test_name:
+ domain = u"ip6"
+ elif u"l2xcbase" in test_name or \
+ u"l2xcscale" in test_name or \
+ u"l2bdbasemaclrn" in test_name or \
+ u"l2bdscale" in test_name or \
+ u"l2patch" in test_name:
+ domain = u"l2"
+ else:
+ domain = u""
+
+ file_name = u"-".join((domain, testbed, nic)) + u".html#"
+ anchor_name = u"-".join((frame_size, cores, bsf, driver))
+
+ return file_name + anchor_name
+
+
+def table_perf_trending_dash_html(table, input_data):
+ """Generate the table(s) with algorithm:
+ table_perf_trending_dash_html specified in the specification
+ file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: dict
+ :type input_data: InputData
+ """
+
+ _ = input_data
+
+ if not table.get(u"testbed", None):
+ logging.error(
+ f"The testbed is not defined for the table "
+ f"{table.get(u'title', u'')}."
+ )
+ return
+
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ try:
+ with open(table[u"input-file"], u'rt') as csv_file:
+ csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
+ except KeyError:
+ logging.warning(u"The input file is not defined.")
+ return
+ except csv.Error as err:
+ logging.warning(
+ f"Not possible to process the file {table[u'input-file']}.\n"
+ f"{repr(err)}"
+ )
+ return
+
+ # Table:
+ dashboard = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
+
+ # Table header:
+ trow = ET.SubElement(dashboard, u"tr", attrib=dict(bgcolor=u"#7eade7"))
+ for idx, item in enumerate(csv_lst[0]):
+ alignment = u"left" if idx == 0 else u"center"
+ thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
+ thead.text = item
+
+ # Rows:
+ colors = {
+ u"regression": (
+ u"#ffcccc",
+ u"#ff9999"
+ ),
+ u"progression": (
+ u"#c6ecc6",
+ u"#9fdf9f"
+ ),
+ u"normal": (
+ u"#e9f1fb",
+ u"#d4e4f7"
+ )
+ }
+ for r_idx, row in enumerate(csv_lst[1:]):
+ if int(row[4]):
+ color = u"regression"
+ elif int(row[5]):
+ color = u"progression"
+ else:
+ color = u"normal"
+ trow = ET.SubElement(
+ dashboard, u"tr", attrib=dict(bgcolor=colors[color][r_idx % 2])
+ )
+
+ # Columns:
+ for c_idx, item in enumerate(row):
+ tdata = ET.SubElement(
+ trow,
+ u"td",
+ attrib=dict(align=u"left" if c_idx == 0 else u"center")
+ )
+ # Name:
+ if c_idx == 0:
+ ref = ET.SubElement(
+ tdata,
+ u"a",
+ attrib=dict(
+ href=f"../trending/"
+ f"{_generate_url(table.get(u'testbed', ''), item)}"
+ )
+ )
+ ref.text = item
+ else:
+ tdata.text = item
+ try:
+ with open(table[u"output-file"], u'w') as html_file:
+ logging.info(f" Writing file: {table[u'output-file']}")
+ html_file.write(u".. raw:: html\n\n\t")
+ html_file.write(str(ET.tostring(dashboard, encoding=u"unicode")))
+ html_file.write(u"\n\t<p><br><br></p>\n")
+ except KeyError:
+ logging.warning(u"The output file is not defined.")
+ return
+
+
+def table_last_failed_tests(table, input_data):
+ """Generate the table(s) with algorithm: table_last_failed_tests
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ # Transform the data
+ logging.info(
+ f" Creating the data set for the {table.get(u'type', u'')} "
+ f"{table.get(u'title', u'')}."
+ )
+
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ if data is None or data.empty:
+ logging.warning(
+ f" No data for the {table.get(u'type', u'')} "
+ f"{table.get(u'title', u'')}."
+ )
+ return
+
+ tbl_list = list()
+ for job, builds in table[u"data"].items():
+ for build in builds:
+ build = str(build)
+ try:
+ version = input_data.metadata(job, build).get(u"version", u"")
+ except KeyError:
+ logging.error(f"Data for {job}: {build} is not present.")
+ return
+ tbl_list.append(build)
+ tbl_list.append(version)
+ failed_tests = list()
+ passed = 0
+ failed = 0
+ for tst_data in data[job][build].values:
+ if tst_data[u"status"] != u"FAIL":
+ passed += 1
+ continue
+ failed += 1
+ groups = re.search(REGEX_NIC, tst_data[u"parent"])
+ if not groups:
+ continue
+ nic = groups.group(0)
+ failed_tests.append(f"{nic}-{tst_data[u'name']}")
+ tbl_list.append(str(passed))
+ tbl_list.append(str(failed))
+ tbl_list.extend(failed_tests)
+
+ file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
+ logging.info(f" Writing file: {file_name}")
+ with open(file_name, u"wt") as file_handler:
+ for test in tbl_list:
+ file_handler.write(test + u'\n')
+
+
+def table_failed_tests(table, input_data):
+ """Generate the table(s) with algorithm: table_failed_tests
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ # Transform the data
+ logging.info(
+ f" Creating the data set for the {table.get(u'type', u'')} "
+ f"{table.get(u'title', u'')}."
+ )
+ data = input_data.filter_data(table, continue_on_error=True)
+
+ # Prepare the header of the tables
+ header = [
+ u"Test Case",
+ u"Failures [#]",
+ u"Last Failure [Time]",
+ u"Last Failure [VPP-Build-Id]",
+ u"Last Failure [CSIT-Job-Build-Id]"
+ ]
+
+ # Generate the data for the table according to the model in the table
+ # specification
+
+ now = dt.utcnow()
+ timeperiod = timedelta(int(table.get(u"window", 7)))
+
+ tbl_dict = dict()
+ for job, builds in table[u"data"].items():
+ for build in builds:
+ build = str(build)
+ for tst_name, tst_data in data[job][build].items():
+ if tst_name.lower() in table.get(u"ignore-list", list()):
+ continue
+ if tbl_dict.get(tst_name, None) is None:
+ groups = re.search(REGEX_NIC, tst_data[u"parent"])
+ if not groups:
+ continue
+ nic = groups.group(0)
+ tbl_dict[tst_name] = {
+ u"name": f"{nic}-{tst_data[u'name']}",
+ u"data": OrderedDict()
+ }
+ try:
+ generated = input_data.metadata(job, build).\
+ get(u"generated", u"")
+ if not generated:
+ continue
+ then = dt.strptime(generated, u"%Y%m%d %H:%M")
+ if (now - then) <= timeperiod:
+ tbl_dict[tst_name][u"data"][build] = (
+ tst_data[u"status"],
+ generated,
+ input_data.metadata(job, build).get(u"version",
+ u""),
+ build
+ )
+ except (TypeError, KeyError) as err:
+ logging.warning(f"tst_name: {tst_name} - err: {repr(err)}")
+
+ max_fails = 0
+ tbl_lst = list()
+ for tst_data in tbl_dict.values():
+ fails_nr = 0
+ fails_last_date = u""
+ fails_last_vpp = u""
+ fails_last_csit = u""
+ for val in tst_data[u"data"].values():
+ if val[0] == u"FAIL":
+ fails_nr += 1
+ fails_last_date = val[1]
+ fails_last_vpp = val[2]
+ fails_last_csit = val[3]
+ if fails_nr:
+ max_fails = fails_nr if fails_nr > max_fails else max_fails
+ tbl_lst.append(
+ [
+ tst_data[u"name"],
+ fails_nr,
+ fails_last_date,
+ fails_last_vpp,
+ f"mrr-daily-build-{fails_last_csit}"
+ ]
+ )
+
+ tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
+ tbl_sorted = list()
+ for nrf in range(max_fails, -1, -1):
+ tbl_fails = [item for item in tbl_lst if item[1] == nrf]
+ tbl_sorted.extend(tbl_fails)
+
+ file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
+ logging.info(f" Writing file: {file_name}")
+ with open(file_name, u"wt") as file_handler:
+ file_handler.write(u",".join(header) + u"\n")
+ for test in tbl_sorted:
+ file_handler.write(u",".join([str(item) for item in test]) + u'\n')
+
+ logging.info(f" Writing file: {table[u'output-file']}.txt")
+ convert_csv_to_pretty_txt(file_name, f"{table[u'output-file']}.txt")
+
+
+def table_failed_tests_html(table, input_data):
+ """Generate the table(s) with algorithm: table_failed_tests_html
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+
+ _ = input_data
+
+ if not table.get(u"testbed", None):
+ logging.error(
+ f"The testbed is not defined for the table "
+ f"{table.get(u'title', u'')}."
+ )
+ return
+
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ try:
+ with open(table[u"input-file"], u'rt') as csv_file:
+ csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
+ except KeyError:
+ logging.warning(u"The input file is not defined.")
+ return
+ except csv.Error as err:
+ logging.warning(
+ f"Not possible to process the file {table[u'input-file']}.\n"
+ f"{repr(err)}"
+ )
+ return
+
+ # Table:
+ failed_tests = ET.Element(u"table", attrib=dict(width=u"100%", border=u'0'))
+
+ # Table header:
+ trow = ET.SubElement(failed_tests, u"tr", attrib=dict(bgcolor=u"#7eade7"))
+ for idx, item in enumerate(csv_lst[0]):
+ alignment = u"left" if idx == 0 else u"center"
+ thead = ET.SubElement(trow, u"th", attrib=dict(align=alignment))
+ thead.text = item
+
+ # Rows:
+ colors = (u"#e9f1fb", u"#d4e4f7")
+ for r_idx, row in enumerate(csv_lst[1:]):
+ background = colors[r_idx % 2]
+ trow = ET.SubElement(
+ failed_tests, u"tr", attrib=dict(bgcolor=background)
+ )
+
+ # Columns:
+ for c_idx, item in enumerate(row):
+ tdata = ET.SubElement(
+ trow,
+ u"td",
+ attrib=dict(align=u"left" if c_idx == 0 else u"center")
+ )
+ # Name:
+ if c_idx == 0:
+ ref = ET.SubElement(
+ tdata,
+ u"a",
+ attrib=dict(
+ href=f"../trending/"
+ f"{_generate_url(table.get(u'testbed', ''), item)}"
+ )
+ )
+ ref.text = item
+ else:
+ tdata.text = item
+ try:
+ with open(table[u"output-file"], u'w') as html_file:
+ logging.info(f" Writing file: {table[u'output-file']}")
+ html_file.write(u".. raw:: html\n\n\t")
+ html_file.write(str(ET.tostring(failed_tests, encoding=u"unicode")))
+ html_file.write(u"\n\t<p><br><br></p>\n")
+ except KeyError:
+ logging.warning(u"The output file is not defined.")
+ return
+
+
+def table_comparison(table, input_data):
+ """Generate the table(s) with algorithm: table_comparison
+ specified in the specification file.
+
+ :param table: Table to generate.
+ :param input_data: Data to process.
+ :type table: pandas.Series
+ :type input_data: InputData
+ """
+ logging.info(f" Generating the table {table.get(u'title', u'')} ...")
+
+ # Transform the data
+ logging.info(
+ f" Creating the data set for the {table.get(u'type', u'')} "
+ f"{table.get(u'title', u'')}."
+ )
+
+ columns = table.get(u"columns", None)
+ if not columns:
+ logging.error(
+ f"No columns specified for {table.get(u'title', u'')}. Skipping."
+ )
+ return
+
+ cols = list()
+ for idx, col in enumerate(columns):
+ if col.get(u"data", None) is None:
+ logging.warning(f"No data for column {col.get(u'title', u'')}")
+ continue
+ data = input_data.filter_data(
+ table,
+ params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+ data=col[u"data"],
+ continue_on_error=True
+ )
+ col_data = {
+ u"title": col.get(u"title", f"Column{idx}"),
+ u"data": dict()
+ }
+ for builds in data.values:
+ for build in builds:
+ for tst_name, tst_data in build.items():
+ tst_name_mod = \
+ _tpc_modify_test_name(tst_name).replace(u"2n1l-", u"")
+ if col_data[u"data"].get(tst_name_mod, None) is None:
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
+ if u"across testbeds" in table[u"title"].lower() or \
+ u"across topologies" in table[u"title"].lower():
+ name = _tpc_modify_displayed_test_name(name)
+ col_data[u"data"][tst_name_mod] = {
+ u"name": name,
+ u"replace": True,
+ u"data": list(),
+ u"mean": None,
+ u"stdev": None
+ }
+ _tpc_insert_data(
+ target=col_data[u"data"][tst_name_mod][u"data"],
+ src=tst_data,
+ include_tests=table[u"include-tests"]
+ )
+
+ replacement = col.get(u"data-replacement", None)
+ if replacement:
+ rpl_data = input_data.filter_data(
+ table,
+ params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+ data=replacement,
+ continue_on_error=True
+ )
+ for builds in rpl_data.values:
+ for build in builds:
+ for tst_name, tst_data in build.items():
+ tst_name_mod = \
+ _tpc_modify_test_name(tst_name).\
+ replace(u"2n1l-", u"")
+ if col_data[u"data"].get(tst_name_mod, None) is None:
+ name = tst_data[u'name'].rsplit(u'-', 1)[0]
+ if u"across testbeds" in table[u"title"].lower() \
+ or u"across topologies" in \
+ table[u"title"].lower():
+ name = _tpc_modify_displayed_test_name(name)
+ col_data[u"data"][tst_name_mod] = {
+ u"name": name,
+ u"replace": False,
+ u"data": list(),
+ u"mean": None,
+ u"stdev": None
+ }
+ if col_data[u"data"][tst_name_mod][u"replace"]:
+ col_data[u"data"][tst_name_mod][u"replace"] = False
+ col_data[u"data"][tst_name_mod][u"data"] = list()
+ _tpc_insert_data(
+ target=col_data[u"data"][tst_name_mod][u"data"],
+ src=tst_data,
+ include_tests=table[u"include-tests"]
+ )
+
+ if table[u"include-tests"] in (u"NDR", u"PDR"):
+ for tst_name, tst_data in col_data[u"data"].items():
+ if tst_data[u"data"]:
+ tst_data[u"mean"] = mean(tst_data[u"data"])
+ tst_data[u"stdev"] = stdev(tst_data[u"data"])
+ elif table[u"include-tests"] in (u"MRR", ):
+ for tst_name, tst_data in col_data[u"data"].items():
+ if tst_data[u"data"]:
+ tst_data[u"mean"] = tst_data[u"data"][0]
+ tst_data[u"stdev"] = tst_data[u"data"][0]
+
+ cols.append(col_data)
+
+ tbl_dict = dict()
+ for col in cols:
+ for tst_name, tst_data in col[u"data"].items():
+ if tbl_dict.get(tst_name, None) is None:
+ tbl_dict[tst_name] = {
+ "name": tst_data[u"name"]
+ }
+ tbl_dict[tst_name][col[u"title"]] = {
+ u"mean": tst_data[u"mean"],
+ u"stdev": tst_data[u"stdev"]
+ }
+
+ tbl_lst = list()
+ for tst_data in tbl_dict.values():
+ row = [tst_data[u"name"], ]
+ for col in cols:
+ row.append(tst_data.get(col[u"title"], None))
+ tbl_lst.append(row)
+
+ comparisons = table.get(u"comparisons", None)
+ if comparisons and isinstance(comparisons, list):
+ for idx, comp in enumerate(comparisons):
+ try:
+ col_ref = int(comp[u"reference"])
+ col_cmp = int(comp[u"compare"])
+ except KeyError:
+ logging.warning(u"Comparison: No references defined! Skipping.")
+ comparisons.pop(idx)
+ continue
+ if not (0 < col_ref <= len(cols) and
+ 0 < col_cmp <= len(cols)) or \
+ col_ref == col_cmp:
+ logging.warning(f"Wrong values of reference={col_ref} "
+ f"and/or compare={col_cmp}. Skipping.")
+ comparisons.pop(idx)
+ continue
+
+ tbl_cmp_lst = list()
+ if comparisons:
+ for row in tbl_lst:
+ new_row = deepcopy(row)
+ add_to_tbl = False
+ for comp in comparisons:
+ ref_itm = row[int(comp[u"reference"])]
+ if ref_itm is None and \
+ comp.get(u"reference-alt", None) is not None:
+ ref_itm = row[int(comp[u"reference-alt"])]
+ cmp_itm = row[int(comp[u"compare"])]
+ if ref_itm is not None and cmp_itm is not None and \
+ ref_itm[u"mean"] is not None and \
+ cmp_itm[u"mean"] is not None and \
+ ref_itm[u"stdev"] is not None and \
+ cmp_itm[u"stdev"] is not None:
+ delta, d_stdev = relative_change_stdev(
+ ref_itm[u"mean"], cmp_itm[u"mean"],
+ ref_itm[u"stdev"], cmp_itm[u"stdev"]
+ )
+ new_row.append(
+ {
+ u"mean": delta * 1e6,
+ u"stdev": d_stdev * 1e6
+ }
+ )
+ add_to_tbl = True