CSIT-1500: Add comparison table for SOAK vs NDRPDR
[csit.git] / resources / tools / presentation / generator_tables.py
index 7590daa..1a15605 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (c) 2018 Cisco and/or its affiliates.
+# Copyright (c) 2019 Cisco and/or its affiliates.
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at:
@@ -96,7 +96,7 @@ def table_details(table, input_data):
                     try:
                         col_data = str(data[job][build][test][column["data"].
                                        split(" ")[1]]).replace('"', '""')
-                        if column["data"].split(" ")[1] in ("vat-history",
+                        if column["data"].split(" ")[1] in ("conf-history",
                                                             "show-run"):
                             col_data = replace(col_data, " |br| ", "",
                                                maxreplace=1)
@@ -161,7 +161,9 @@ def table_merged_details(table, input_data):
                     try:
                         col_data = str(data[test][column["data"].
                                        split(" ")[1]]).replace('"', '""')
-                        if column["data"].split(" ")[1] in ("vat-history",
+                        col_data = replace(col_data, "No Data",
+                                           "Not Captured     ")
+                        if column["data"].split(" ")[1] in ("conf-history",
                                                             "show-run"):
                             col_data = replace(col_data, " |br| ", "",
                                                maxreplace=1)
@@ -169,7 +171,7 @@ def table_merged_details(table, input_data):
                                 format(col_data[:-5])
                         row_lst.append('"{0}"'.format(col_data))
                     except KeyError:
-                        row_lst.append("No data")
+                        row_lst.append('"Not captured"')
                 table_lst.append(row_lst)
 
         # Write the data to file
@@ -245,9 +247,10 @@ def table_performance_comparison(table, input_data):
                 if "across topologies" in table["title"].lower():
                     tst_name_mod = tst_name_mod.replace("2n1l-", "")
                 if tbl_dict.get(tst_name_mod, None) is None:
-                    name = "{0}-{1}".format(tst_data["parent"].split("-")[0],
-                                            "-".join(tst_data["name"].
-                                                     split("-")[:-1]))
+                    groups = re.search(REGEX_NIC, tst_data["parent"])
+                    nic = groups.group(0) if groups else ""
+                    name = "{0}-{1}".format(nic, "-".join(tst_data["name"].
+                                                          split("-")[:-1]))
                     if "across testbeds" in table["title"].lower() or \
                             "across topologies" in table["title"].lower():
                         name = name.\
@@ -412,6 +415,225 @@ def table_performance_comparison(table, input_data):
     convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
 
 
+def table_nics_comparison(table, input_data):
+    """Generate the table(s) with algorithm: table_nics_comparison
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: pandas.Series
+    :type input_data: InputData
+    """
+
+    logging.info("  Generating the table {0} ...".
+                 format(table.get("title", "")))
+
+    # Transform the data
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
+    data = input_data.filter_data(table, continue_on_error=True)
+
+    # Prepare the header of the tables
+    try:
+        header = ["Test case", ]
+
+        if table["include-tests"] == "MRR":
+            hdr_param = "Receive Rate"
+        else:
+            hdr_param = "Throughput"
+
+        header.extend(
+            ["{0} {1} [Mpps]".format(table["reference"]["title"], hdr_param),
+             "{0} Stdev [Mpps]".format(table["reference"]["title"]),
+             "{0} {1} [Mpps]".format(table["compare"]["title"], hdr_param),
+             "{0} Stdev [Mpps]".format(table["compare"]["title"]),
+             "Delta [%]"])
+        header_str = ",".join(header) + "\n"
+    except (AttributeError, KeyError) as err:
+        logging.error("The model is invalid, missing parameter: {0}".
+                      format(err))
+        return
+
+    # Prepare data to the table:
+    tbl_dict = dict()
+    for job, builds in table["data"].items():
+        for build in builds:
+            for tst_name, tst_data in data[job][str(build)].iteritems():
+                tst_name_mod = tst_name.replace("-ndrpdrdisc", "").\
+                    replace("-ndrpdr", "").replace("-pdrdisc", "").\
+                    replace("-ndrdisc", "").replace("-pdr", "").\
+                    replace("-ndr", "").\
+                    replace("1t1c", "1c").replace("2t1c", "1c").\
+                    replace("2t2c", "2c").replace("4t2c", "2c").\
+                    replace("4t4c", "4c").replace("8t4c", "4c")
+                tst_name_mod = re.sub(REGEX_NIC, "", tst_name_mod)
+                if tbl_dict.get(tst_name_mod, None) is None:
+                    name = "-".join(tst_data["name"].split("-")[:-1])
+                    tbl_dict[tst_name_mod] = {"name": name,
+                                              "ref-data": list(),
+                                              "cmp-data": list()}
+                try:
+                    if table["include-tests"] == "MRR":
+                        result = tst_data["result"]["receive-rate"].avg
+                    elif table["include-tests"] == "PDR":
+                        result = tst_data["throughput"]["PDR"]["LOWER"]
+                    elif table["include-tests"] == "NDR":
+                        result = tst_data["throughput"]["NDR"]["LOWER"]
+                    else:
+                        result = None
+
+                    if result:
+                        if table["reference"]["nic"] in tst_data["tags"]:
+                            tbl_dict[tst_name_mod]["ref-data"].append(result)
+                        elif table["compare"]["nic"] in tst_data["tags"]:
+                            tbl_dict[tst_name_mod]["cmp-data"].append(result)
+                except (TypeError, KeyError) as err:
+                    logging.debug("No data for {0}".format(tst_name))
+                    logging.debug(repr(err))
+                    # No data in output.xml for this test
+
+    tbl_lst = list()
+    for tst_name in tbl_dict.keys():
+        item = [tbl_dict[tst_name]["name"], ]
+        data_t = tbl_dict[tst_name]["ref-data"]
+        if data_t:
+            item.append(round(mean(data_t) / 1000000, 2))
+            item.append(round(stdev(data_t) / 1000000, 2))
+        else:
+            item.extend([None, None])
+        data_t = tbl_dict[tst_name]["cmp-data"]
+        if data_t:
+            item.append(round(mean(data_t) / 1000000, 2))
+            item.append(round(stdev(data_t) / 1000000, 2))
+        else:
+            item.extend([None, None])
+        if item[-4] is not None and item[-2] is not None and item[-4] != 0:
+            item.append(int(relative_change(float(item[-4]), float(item[-2]))))
+        if len(item) == len(header):
+            tbl_lst.append(item)
+
+    # Sort the table according to the relative change
+    tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+
+    # Generate csv tables:
+    csv_file = "{0}.csv".format(table["output-file"])
+    with open(csv_file, "w") as file_handler:
+        file_handler.write(header_str)
+        for test in tbl_lst:
+            file_handler.write(",".join([str(item) for item in test]) + "\n")
+
+    convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
+
+
+def table_soak_vs_ndr(table, input_data):
+    """Generate the table(s) with algorithm: table_soak_vs_ndr
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: pandas.Series
+    :type input_data: InputData
+    """
+
+    logging.info("  Generating the table {0} ...".
+                 format(table.get("title", "")))
+
+    # Transform the data
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
+    data = input_data.filter_data(table, continue_on_error=True)
+
+    # Prepare the header of the table
+    try:
+        header = [
+            "Test case",
+            "{0} Throughput [Mpps]".format(table["reference"]["title"]),
+            "{0} Stdev [Mpps]".format(table["reference"]["title"]),
+            "{0} Throughput [Mpps]".format(table["compare"]["title"]),
+            "{0} Stdev [Mpps]".format(table["compare"]["title"]),
+            "Delta [%]"]
+        header_str = ",".join(header) + "\n"
+    except (AttributeError, KeyError) as err:
+        logging.error("The model is invalid, missing parameter: {0}".
+                      format(err))
+        return
+
+    # Create a list of available SOAK test results:
+    tbl_dict = dict()
+    for job, builds in table["compare"]["data"].items():
+        for build in builds:
+            for tst_name, tst_data in data[job][str(build)].iteritems():
+                if tst_data["type"] == "SOAK":
+                    tst_name_mod = tst_name.replace("-soak", "")
+                    if tbl_dict.get(tst_name_mod, None) is None:
+                        tbl_dict[tst_name_mod] = {
+                            "name": tst_name_mod,
+                            "ref-data": list(),
+                            "cmp-data": list()
+                        }
+                    try:
+                        tbl_dict[tst_name_mod]["cmp-data"].append(
+                            tst_data["throughput"]["LOWER"])
+                    except (KeyError, TypeError):
+                        pass
+    tests_lst = tbl_dict.keys()
+
+    # Add corresponding NDR test results:
+    for job, builds in table["reference"]["data"].items():
+        for build in builds:
+            for tst_name, tst_data in data[job][str(build)].iteritems():
+                tst_name_mod = tst_name.replace("-ndrpdr", "").\
+                    replace("-mrr", "")
+                if tst_name_mod in tests_lst:
+                    try:
+                        if tst_data["type"] in ("NDRPDR", "MRR", "BMRR"):
+                            if table["include-tests"] == "MRR":
+                                result = tst_data["result"]["receive-rate"].avg
+                            elif table["include-tests"] == "PDR":
+                                result = tst_data["throughput"]["PDR"]["LOWER"]
+                            elif table["include-tests"] == "NDR":
+                                result = tst_data["throughput"]["NDR"]["LOWER"]
+                            else:
+                                result = None
+                            if result is not None:
+                                tbl_dict[tst_name_mod]["ref-data"].append(
+                                    result)
+                    except (KeyError, TypeError):
+                        continue
+
+    tbl_lst = list()
+    for tst_name in tbl_dict.keys():
+        item = [tbl_dict[tst_name]["name"], ]
+        data_t = tbl_dict[tst_name]["ref-data"]
+        if data_t:
+            item.append(round(mean(data_t) / 1000000, 2))
+            item.append(round(stdev(data_t) / 1000000, 2))
+        else:
+            item.extend([None, None])
+        data_t = tbl_dict[tst_name]["cmp-data"]
+        if data_t:
+            item.append(round(mean(data_t) / 1000000, 2))
+            item.append(round(stdev(data_t) / 1000000, 2))
+        else:
+            item.extend([None, None])
+        if item[-4] is not None and item[-2] is not None and item[-4] != 0:
+            item.append(int(relative_change(float(item[-4]), float(item[-2]))))
+        if len(item) == len(header):
+            tbl_lst.append(item)
+
+    # Sort the table according to the relative change
+    tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+
+    # Generate csv tables:
+    csv_file = "{0}.csv".format(table["output-file"])
+    with open(csv_file, "w") as file_handler:
+        file_handler.write(header_str)
+        for test in tbl_lst:
+            file_handler.write(",".join([str(item) for item in test]) + "\n")
+
+    convert_csv_to_pretty_txt(csv_file, "{0}.txt".format(table["output-file"]))
+
+
 def table_performance_trending_dashboard(table, input_data):
     """Generate the table(s) with algorithm:
     table_performance_trending_dashboard
@@ -497,12 +719,15 @@ def table_performance_trending_dashboard(table, input_data):
         if classification_lst:
             if isnan(rel_change_last) and isnan(rel_change_long):
                 continue
+            if (isnan(last_avg) or
+                isnan(rel_change_last) or
+                isnan(rel_change_long)):
+                continue
             tbl_lst.append(
                 [tbl_dict[tst_name]["name"],
-                 '-' if isnan(last_avg) else
                  round(last_avg / 1000000, 2),
-                 '-' if isnan(rel_change_last) else rel_change_last,
-                 '-' if isnan(rel_change_long) else rel_change_long,
+                 rel_change_last,
+                 rel_change_long,
                  classification_lst[-win_size:].count("regression"),
                  classification_lst[-win_size:].count("progression")])
 
@@ -569,9 +794,9 @@ def _generate_url(base, testbed, test_name):
             file_name = "vm_vhost_l2"
             if "114b" in test_name:
                 feature = ""
-            elif "l2xcbase" in test_name:
+            elif "l2xcbase" in test_name and "x520" in test_name:
                 feature = "-base-l2xc"
-            elif "l2bdbasemaclrn" in test_name:
+            elif "l2bdbasemaclrn" in test_name and "x520" in test_name:
                 feature = "-base-l2bd"
             else:
                 feature = "-base"
@@ -625,6 +850,10 @@ def _generate_url(base, testbed, test_name):
         nic = "xl710-"
     elif "xxv710" in test_name:
         nic = "xxv710-"
+    elif "vic1227" in test_name:
+        nic = "vic1227-"
+    elif "vic1385" in test_name:
+        nic = "vic1385-"
     else:
         nic = ""
     anchor += nic
@@ -740,6 +969,57 @@ def table_performance_trending_dashboard_html(table, input_data):
         return
 
 
+def table_last_failed_tests(table, input_data):
+    """Generate the table(s) with algorithm: table_last_failed_tests
+    specified in the specification file.
+
+    :param table: Table to generate.
+    :param input_data: Data to process.
+    :type table: pandas.Series
+    :type input_data: InputData
+    """
+
+    logging.info("  Generating the table {0} ...".
+                 format(table.get("title", "")))
+
+    # Transform the data
+    logging.info("    Creating the data set for the {0} '{1}'.".
+                 format(table.get("type", ""), table.get("title", "")))
+    data = input_data.filter_data(table, continue_on_error=True)
+
+    if data is None or data.empty:
+        logging.warn("    No data for the {0} '{1}'.".
+                     format(table.get("type", ""), table.get("title", "")))
+        return
+
+    tbl_list = list()
+    for job, builds in table["data"].items():
+        for build in builds:
+            build = str(build)
+            try:
+                version = input_data.metadata(job, build).get("version", "")
+            except KeyError:
+                logging.error("Data for {job}: {build} is not present.".
+                              format(job=job, build=build))
+                return
+            tbl_list.append(build)
+            tbl_list.append(version)
+            for tst_name, tst_data in data[job][build].iteritems():
+                if tst_data["status"] != "FAIL":
+                    continue
+                groups = re.search(REGEX_NIC, tst_data["parent"])
+                if not groups:
+                    continue
+                nic = groups.group(0)
+                tbl_list.append("{0}-{1}".format(nic, tst_data["name"]))
+
+    file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])
+    logging.info("    Writing file: '{0}'".format(file_name))
+    with open(file_name, "w") as file_handler:
+        for test in tbl_list:
+            file_handler.write(test + '\n')
+
+
 def table_failed_tests(table, input_data):
     """Generate the table(s) with algorithm: table_failed_tests
     specified in the specification file.
@@ -798,9 +1078,11 @@ def table_failed_tests(table, input_data):
                             generated,
                             input_data.metadata(job, build).get("version", ""),
                             build)
-                except (TypeError, KeyError):
-                    pass  # No data in output.xml for this test
+                except (TypeError, KeyError) as err:
+                    logging.warning("tst_name: {} - err: {}".
+                                    format(tst_name, repr(err)))
 
+    max_fails = 0
     tbl_lst = list()
     for tst_data in tbl_dict.values():
         fails_nr = 0
@@ -811,6 +1093,7 @@ def table_failed_tests(table, input_data):
                 fails_last_vpp = val[2]
                 fails_last_csit = val[3]
         if fails_nr:
+            max_fails = fails_nr if fails_nr > max_fails else max_fails
             tbl_lst.append([tst_data["name"],
                             fails_nr,
                             fails_last_date,
@@ -819,7 +1102,7 @@ def table_failed_tests(table, input_data):
 
     tbl_lst.sort(key=lambda rel: rel[2], reverse=True)
     tbl_sorted = list()
-    for nrf in range(table["window"], -1, -1):
+    for nrf in range(max_fails, -1, -1):
         tbl_fails = [item for item in tbl_lst if item[1] == nrf]
         tbl_sorted.extend(tbl_fails)
     file_name = "{0}{1}".format(table["output-file"], table["output-file-ext"])