Report: latency comparison
[csit.git] / resources / tools / presentation / generator_tables.py
index 0d8b272..351250a 100644 (file)
@@ -17,6 +17,7 @@
 
 import logging
 import csv
+import math
 import re
 
 from collections import OrderedDict
@@ -24,6 +25,7 @@ from xml.etree import ElementTree as ET
 from datetime import datetime as dt
 from datetime import timedelta
 from copy import deepcopy
+from json import loads
 
 import plotly.graph_objects as go
 import plotly.offline as ploff
@@ -93,7 +95,7 @@ def table_oper_data_html(table, input_data):
     )
     data = input_data.filter_data(
         table,
-        params=[u"name", u"parent", u"show-run", u"type"],
+        params=[u"name", u"parent", u"telemetry-show-run", u"type"],
         continue_on_error=True
     )
     if data.empty:
@@ -146,7 +148,8 @@ def table_oper_data_html(table, input_data):
         )
         thead.text = u"\t"
 
-        if tst_data.get(u"show-run", u"No Data") == u"No Data":
+        if tst_data.get(u"telemetry-show-run", None) is None or \
+                isinstance(tst_data[u"telemetry-show-run"], str):
             trow = ET.SubElement(
                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
             )
@@ -176,17 +179,56 @@ def table_oper_data_html(table, input_data):
             u"Average Vector Size"
         )
 
-        for dut_data in tst_data[u"show-run"].values():
+        for dut_data in tst_data[u"telemetry-show-run"].values():
             trow = ET.SubElement(
                 tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
             )
             tcol = ET.SubElement(
                 trow, u"td", attrib=dict(align=u"left", colspan=u"6")
             )
-            if dut_data.get(u"threads", None) is None:
+            if dut_data.get(u"runtime", None) is None:
                 tcol.text = u"No Data"
                 continue
 
+            runtime = dict()
+            for item in dut_data[u"runtime"].get(u"data", tuple()):
+                tid = int(item[u"labels"][u"thread_id"])
+                if runtime.get(tid, None) is None:
+                    runtime[tid] = dict()
+                gnode = item[u"labels"][u"graph_node"]
+                if runtime[tid].get(gnode, None) is None:
+                    runtime[tid][gnode] = dict()
+                try:
+                    runtime[tid][gnode][item[u"name"]] = float(item[u"value"])
+                except ValueError:
+                    runtime[tid][gnode][item[u"name"]] = item[u"value"]
+
+            threads = dict({idx: list() for idx in range(len(runtime))})
+            for idx, run_data in runtime.items():
+                for gnode, gdata in run_data.items():
+                    if gdata[u"vectors"] > 0:
+                        clocks = gdata[u"clocks"] / gdata[u"vectors"]
+                    elif gdata[u"calls"] > 0:
+                        clocks = gdata[u"clocks"] / gdata[u"calls"]
+                    elif gdata[u"suspends"] > 0:
+                        clocks = gdata[u"clocks"] / gdata[u"suspends"]
+                    else:
+                        clocks = 0.0
+                    if gdata[u"calls"] > 0:
+                        vectors_call = gdata[u"vectors"] / gdata[u"calls"]
+                    else:
+                        vectors_call = 0.0
+                    if int(gdata[u"calls"]) + int(gdata[u"vectors"]) + \
+                            int(gdata[u"suspends"]):
+                        threads[idx].append([
+                            gnode,
+                            int(gdata[u"calls"]),
+                            int(gdata[u"vectors"]),
+                            int(gdata[u"suspends"]),
+                            clocks,
+                            vectors_call
+                        ])
+
             bold = ET.SubElement(tcol, u"b")
             bold.text = (
                 f"Host IP: {dut_data.get(u'host', '')}, "
@@ -200,7 +242,7 @@ def table_oper_data_html(table, input_data):
             )
             thead.text = u"\t"
 
-            for thread_nr, thread in dut_data[u"threads"].items():
+            for thread_nr, thread in threads.items():
                 trow = ET.SubElement(
                     tbl, u"tr", attrib=dict(bgcolor=colors[u"header"])
                 )
@@ -323,7 +365,8 @@ def table_merged_details(table, input_data):
         suite_name = suite[u"name"]
         table_lst = list()
         for test in data.keys():
-            if data[test][u"parent"] not in suite_name:
+            if data[test][u"status"] != u"PASS" or \
+                    data[test][u"parent"] not in suite_name:
                 continue
             row_lst = list()
             for column in table[u"columns"]:
@@ -348,13 +391,14 @@ def table_merged_details(table, input_data):
                         # Temporary solution: remove NDR results from message:
                         if bool(table.get(u'remove-ndr', False)):
                             try:
-                                col_data = col_data.split(u" |br| ", 1)[1]
+                                col_data = col_data.split(u"\n", 1)[1]
                             except IndexError:
                                 pass
+                        col_data = col_data.replace(u'\n', u' |br| ').\
+                            replace(u'\r', u'').replace(u'"', u"'")
                         col_data = f" |prein| {col_data} |preout| "
-                    elif column[u"data"].split(u" ")[1] in \
-                            (u"conf-history", u"show-run"):
-                        col_data = col_data.replace(u" |br| ", u"", 1)
+                    elif column[u"data"].split(u" ")[1] in (u"conf-history", ):
+                        col_data = col_data.replace(u'\n', u' |br| ')
                         col_data = f" |prein| {col_data[:-5]} |preout| "
                     row_lst.append(f'"{col_data}"')
                 except KeyError:
@@ -386,12 +430,7 @@ def _tpc_modify_test_name(test_name, ignore_nic=False):
     :rtype: str
     """
     test_name_mod = test_name.\
-        replace(u"-ndrpdrdisc", u""). \
         replace(u"-ndrpdr", u"").\
-        replace(u"-pdrdisc", u""). \
-        replace(u"-ndrdisc", u"").\
-        replace(u"-pdr", u""). \
-        replace(u"-ndr", u""). \
         replace(u"1t1c", u"1c").\
         replace(u"2t1c", u"1c"). \
         replace(u"2t2c", u"2c").\
@@ -425,7 +464,7 @@ def _tpc_insert_data(target, src, include_tests):
     """Insert src data to the target structure.
 
     :param target: Target structure where the data is placed.
-    :param src: Source data to be placed into the target stucture.
+    :param src: Source data to be placed into the target structure.
     :param include_tests: Which results will be included (MRR, NDR, PDR).
     :type target: list
     :type src: dict
@@ -439,6 +478,13 @@ def _tpc_insert_data(target, src, include_tests):
             target[u"data"].append(src[u"throughput"][u"PDR"][u"LOWER"])
         elif include_tests == u"NDR":
             target[u"data"].append(src[u"throughput"][u"NDR"][u"LOWER"])
+        elif u"latency" in include_tests:
+            keys = include_tests.split(u"-")
+            if len(keys) == 4:
+                lat = src[keys[0]][keys[1]][keys[2]][keys[3]]
+                target[u"data"].append(
+                    float(u"nan") if lat == -1 else lat * 1e6
+                )
     except (KeyError, TypeError):
         pass
 
@@ -879,7 +925,11 @@ def table_perf_trending_dash(table, input_data):
         if len(data_t) < 2:
             continue
 
-        classification_lst, avgs, _ = classify_anomalies(data_t)
+        try:
+            classification_lst, avgs, _ = classify_anomalies(data_t)
+        except ValueError as err:
+            logging.info(f"{err} Skipping")
+            return
 
         win_size = min(len(data_t), table[u"window"])
         long_win_size = min(len(data_t), table[u"long-trend-window"])
@@ -990,15 +1040,15 @@ def _generate_url(testbed, test_name):
 
     if u"1t1c" in test_name or \
         (u"-1c-" in test_name and
-         testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+         testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
         cores = u"1t1c"
     elif u"2t2c" in test_name or \
          (u"-2c-" in test_name and
-          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
         cores = u"2t2c"
     elif u"4t4c" in test_name or \
          (u"-4c-" in test_name and
-          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv")):
+          testbed in (u"3n-hsw", u"3n-tsh", u"2n-dnv", u"3n-dnv", u"2n-tx2")):
         cores = u"4t4c"
     elif u"2t1c" in test_name or \
          (u"-1c-" in test_name and
@@ -1031,7 +1081,7 @@ def _generate_url(testbed, test_name):
     if u"macip-iacl1s" in test_name:
         bsf = u"features-macip-iacl1"
     elif u"macip-iacl10s" in test_name:
-        bsf = u"features-macip-iacl01"
+        bsf = u"features-macip-iacl10"
     elif u"macip-iacl50s" in test_name:
         bsf = u"features-macip-iacl50"
     elif u"iacl1s" in test_name:
@@ -1058,6 +1108,10 @@ def _generate_url(testbed, test_name):
         bsf = u"udp-pps"
     elif u"-pps" in test_name and u"ethip4tcp" in test_name:
         bsf = u"tcp-pps"
+    elif u"-tput" in test_name and u"ethip4udp" in test_name:
+        bsf = u"udp-tput"
+    elif u"-tput" in test_name and u"ethip4tcp" in test_name:
+        bsf = u"tcp-tput"
     elif u"udpsrcscale" in test_name:
         bsf = u"features-udp"
     elif u"iacl" in test_name:
@@ -1097,6 +1151,8 @@ def _generate_url(testbed, test_name):
             domain += u"-cps"
         elif u"-pps" in test_name:
             domain += u"-pps"
+        elif u"-tput" in test_name:
+            domain += u"-tput"
     elif u"testpmd" in test_name or u"l3fwd" in test_name:
         domain = u"dpdk"
     elif u"memif" in test_name:
@@ -1125,6 +1181,8 @@ def _generate_url(testbed, test_name):
             bsf += u"-hw"
     elif u"ethip4vxlan" in test_name:
         domain = u"ip4_tunnels"
+    elif u"ethip4udpgeneve" in test_name:
+        domain = u"ip4_tunnels"
     elif u"ip4base" in test_name or u"ip4scale" in test_name:
         domain = u"ip4"
     elif u"ip6base" in test_name or u"ip6scale" in test_name:
@@ -1184,6 +1242,9 @@ def table_perf_trending_dash_html(table, input_data):
     try:
         with open(table[u"input-file"], u'rt') as csv_file:
             csv_lst = list(csv.reader(csv_file, delimiter=u',', quotechar=u'"'))
+    except FileNotFoundError as err:
+        logging.warning(f"{err}")
+        return
     except KeyError:
         logging.warning(u"The input file is not defined.")
         return
@@ -1244,8 +1305,8 @@ def table_perf_trending_dash_html(table, input_data):
                     u"a",
                     attrib=dict(
                         href=f"{lnk_dir}"
-                             f"{_generate_url(table.get(u'testbed', ''), item)}"
-                             f"{lnk_sufix}"
+                        f"{_generate_url(table.get(u'testbed', ''), item)}"
+                        f"{lnk_sufix}"
                     )
                 )
                 ref.text = item
@@ -1295,6 +1356,8 @@ def table_last_failed_tests(table, input_data):
             build = str(build)
             try:
                 version = input_data.metadata(job, build).get(u"version", u"")
+                duration = \
+                    input_data.metadata(job, build).get(u"elapsedtime", u"")
             except KeyError:
                 logging.error(f"Data for {job}: {build} is not present.")
                 return
@@ -1313,15 +1376,16 @@ def table_last_failed_tests(table, input_data):
                     continue
                 nic = groups.group(0)
                 failed_tests.append(f"{nic}-{tst_data[u'name']}")
-            tbl_list.append(str(passed))
-            tbl_list.append(str(failed))
+            tbl_list.append(passed)
+            tbl_list.append(failed)
+            tbl_list.append(duration)
             tbl_list.extend(failed_tests)
 
     file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
     logging.info(f"    Writing file: {file_name}")
     with open(file_name, u"wt") as file_handler:
         for test in tbl_list:
-            file_handler.write(test + u'\n')
+            file_handler.write(f"{test}\n")
 
 
 def table_failed_tests(table, input_data):
@@ -1517,8 +1581,8 @@ def table_failed_tests_html(table, input_data):
                     u"a",
                     attrib=dict(
                         href=f"{lnk_dir}"
-                             f"{_generate_url(table.get(u'testbed', ''), item)}"
-                             f"{lnk_sufix}"
+                        f"{_generate_url(table.get(u'testbed', ''), item)}"
+                        f"{lnk_sufix}"
                     )
                 )
                 ref.text = item
@@ -1567,7 +1631,14 @@ def table_comparison(table, input_data):
         tag = col.get(u"tag", None)
         data = input_data.filter_data(
             table,
-            params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+            params=[
+                u"throughput",
+                u"result",
+                u"latency",
+                u"name",
+                u"parent",
+                u"tags"
+            ],
             data=col[u"data-set"],
             continue_on_error=True
         )
@@ -1605,7 +1676,14 @@ def table_comparison(table, input_data):
         if replacement:
             rpl_data = input_data.filter_data(
                 table,
-                params=[u"throughput", u"result", u"name", u"parent", u"tags"],
+                params=[
+                    u"throughput",
+                    u"result",
+                    u"latency",
+                    u"name",
+                    u"parent",
+                    u"tags"
+                ],
                 data=replacement,
                 continue_on_error=True
             )
@@ -1639,7 +1717,8 @@ def table_comparison(table, input_data):
                             include_tests=table[u"include-tests"]
                         )
 
-        if table[u"include-tests"] in (u"NDR", u"PDR"):
+        if table[u"include-tests"] in (u"NDR", u"PDR") or \
+                u"latency" in table[u"include-tests"]:
             for tst_name, tst_data in col_data[u"data"].items():
                 if tst_data[u"data"]:
                     tst_data[u"mean"] = mean(tst_data[u"data"])
@@ -1724,11 +1803,14 @@ def table_comparison(table, input_data):
                         cmp_itm[u"mean"] is not None and \
                         ref_itm[u"stdev"] is not None and \
                         cmp_itm[u"stdev"] is not None:
-                    delta, d_stdev = relative_change_stdev(
-                        ref_itm[u"mean"], cmp_itm[u"mean"],
-                        ref_itm[u"stdev"], cmp_itm[u"stdev"]
-                    )
-                    if delta is None:
+                    try:
+                        delta, d_stdev = relative_change_stdev(
+                            ref_itm[u"mean"], cmp_itm[u"mean"],
+                            ref_itm[u"stdev"], cmp_itm[u"stdev"]
+                        )
+                    except ZeroDivisionError:
+                        break
+                    if delta is None or math.isnan(delta):
                         break
                     new_row.append({
                         u"mean": delta * 1e6,
@@ -2055,7 +2137,7 @@ def table_weekly_comparison(table, in_data):
     # Reorganize header in txt table
     txt_table = list()
     with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
-        for line in file_handler:
+        for line in list(file_handler):
             txt_table.append(line)
     try:
         txt_table.insert(5, txt_table.pop(2))