path = u"_tmp/src/vpp_performance_tests/comparisons/"
else:
path = u"_tmp/src/dpdk_performance_tests/comparisons/"
+ logging.info(f" Writing the HTML file to {path}{file_name}.rst")
with open(f"{path}{file_name}.rst", u"wt") as rst_file:
rst_file.write(
u"\n"
f'</iframe>\n\n'
)
- # TODO: Use html (rst) list for legend and footnote
if legend:
- rst_file.write(legend[1:].replace(u"\n", u" |br| "))
+ try:
+ itm_lst = legend[1:-2].split(u"\n")
+ rst_file.write(
+ f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
+ )
+ except IndexError as err:
+ logging.error(f"Legend cannot be written to html file\n{err}")
if footnote:
- rst_file.write(footnote.replace(u"\n", u" |br| ")[1:])
+ try:
+ itm_lst = footnote[1:].split(u"\n")
+ rst_file.write(
+ f"{itm_lst[0]}\n\n- " + u'\n- '.join(itm_lst[1:]) + u"\n\n"
+ )
+ except IndexError as err:
+ logging.error(f"Footnote cannot be written to html file\n{err}")
def table_soak_vs_ndr(table, input_data):
tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
# Generate csv tables:
- csv_file = f"{table[u'output-file']}.csv"
- with open(csv_file, u"wt") as file_handler:
+ csv_file_name = f"{table[u'output-file']}.csv"
+ with open(csv_file_name, u"wt") as file_handler:
file_handler.write(header_str)
for test in tbl_lst:
file_handler.write(u";".join([str(item) for item in test]) + u"\n")
convert_csv_to_pretty_txt(
- csv_file, f"{table[u'output-file']}.txt", delimiter=u";"
+ csv_file_name, f"{table[u'output-file']}.txt", delimiter=u";"
)
- with open(f"{table[u'output-file']}.txt", u'a') as txt_file:
- txt_file.write(legend)
+ with open(f"{table[u'output-file']}.txt", u'a') as file_handler:
+ file_handler.write(legend)
# Generate html table:
_tpc_generate_html_table(
classification_lst[-win_size+1:].count(u"progression")])
tbl_lst.sort(key=lambda rel: rel[0])
+ tbl_lst.sort(key=lambda rel: rel[3])
+ tbl_lst.sort(key=lambda rel: rel[2])
tbl_sorted = list()
for nrr in range(table[u"window"], -1, -1):
tbl_reg = [item for item in tbl_lst if item[4] == nrr]
for nrp in range(table[u"window"], -1, -1):
tbl_out = [item for item in tbl_reg if item[5] == nrp]
- tbl_out.sort(key=lambda rel: rel[2])
tbl_sorted.extend(tbl_out)
file_name = f"{table[u'output-file']}{table[u'output-file-ext']}"
else:
driver = u"dpdk"
- if u"acl" in test_name or \
- u"macip" in test_name or \
- u"nat" in test_name or \
- u"policer" in test_name or \
- u"cop" in test_name:
+ if u"macip-iacl1s" in test_name:
+ bsf = u"features-macip-iacl1"
+ elif u"macip-iacl10s" in test_name:
+ bsf = u"features-macip-iacl01"
+ elif u"macip-iacl50s" in test_name:
+ bsf = u"features-macip-iacl50"
+ elif u"iacl1s" in test_name:
+ bsf = u"features-iacl1"
+ elif u"iacl10s" in test_name:
+ bsf = u"features-iacl10"
+ elif u"iacl50s" in test_name:
+ bsf = u"features-iacl50"
+ elif u"oacl1s" in test_name:
+ bsf = u"features-oacl1"
+ elif u"oacl10s" in test_name:
+ bsf = u"features-oacl10"
+ elif u"oacl50s" in test_name:
+ bsf = u"features-oacl50"
+ elif u"udpsrcscale" in test_name:
+ bsf = u"features-udp"
+ elif u"iacl" in test_name:
+ bsf = u"features"
+ elif u"policer" in test_name:
+ bsf = u"features"
+ elif u"cop" in test_name:
+ bsf = u"features"
+ elif u"nat" in test_name:
+ bsf = u"features"
+ elif u"macip" in test_name:
bsf = u"features"
elif u"scale" in test_name:
bsf = u"scale"
if not table.get(u"testbed", None):
logging.error(
f"The testbed is not defined for the table "
- f"{table.get(u'title', u'')}."
+ f"{table.get(u'title', u'')}. Skipping."
)
return
+ test_type = table.get(u"test-type", u"MRR")
+ if test_type not in (u"MRR", u"NDR", u"PDR"):
+ logging.error(
+ f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
+ f"Skipping."
+ )
+ return
+
+ if test_type in (u"NDR", u"PDR"):
+ lnk_dir = u"../ndrpdr_trending/"
+ lnk_sufix = f"-{test_type.lower()}"
+ else:
+ lnk_dir = u"../trending/"
+ lnk_sufix = u""
+
logging.info(f" Generating the table {table.get(u'title', u'')} ...")
try:
tdata,
u"a",
attrib=dict(
- href=f"../trending/"
+ href=f"{lnk_dir}"
f"{_generate_url(table.get(u'testbed', ''), item)}"
+ f"{lnk_sufix}"
)
)
ref.text = item
if not table.get(u"testbed", None):
logging.error(
f"The testbed is not defined for the table "
- f"{table.get(u'title', u'')}."
+ f"{table.get(u'title', u'')}. Skipping."
)
return
+ test_type = table.get(u"test-type", u"MRR")
+ if test_type not in (u"MRR", u"NDR", u"PDR", u"NDRPDR"):
+ logging.error(
+ f"Test type {table.get(u'test-type', u'MRR')} is not defined. "
+ f"Skipping."
+ )
+ return
+
+ if test_type in (u"NDRPDR", u"NDR", u"PDR"):
+ lnk_dir = u"../ndrpdr_trending/"
+ lnk_sufix = u"-pdr"
+ else:
+ lnk_dir = u"../trending/"
+ lnk_sufix = u""
+
logging.info(f" Generating the table {table.get(u'title', u'')} ...")
try:
attrib=dict(align=u"left" if c_idx == 0 else u"center")
)
# Name:
- if c_idx == 0:
+ if c_idx == 0 and table.get(u"add-links", True):
ref = ET.SubElement(
tdata,
u"a",
attrib=dict(
- href=f"../trending/"
+ href=f"{lnk_dir}"
f"{_generate_url(table.get(u'testbed', ''), item)}"
+ f"{lnk_sufix}"
)
)
ref.text = item
for line in tbl_cmp_lst:
row = [line[0], ]
for idx, itm in enumerate(line[1:]):
- if itm is None:
+ if itm is None or not isinstance(itm, dict) or\
+ itm.get(u'mean', None) is None or \
+ itm.get(u'stdev', None) is None:
row.append(u"NT")
row.append(u"NT")
else:
legend = u"\n" + u"\n".join(legend_lst) + u"\n"
footnote = u""
- for rca in rcas:
- footnote += f"\n{rca[u'title']}:\n"
- footnote += rca[u"data"].get(u"footnote", u"")
+ if rcas:
+ footnote += u"\nRCA:\n"
+ for rca in rcas:
+ footnote += rca[u"data"].get(u"footnote", u"")
- csv_file = f"{table[u'output-file']}-csv.csv"
- with open(csv_file, u"wt", encoding='utf-8') as file_handler:
+ csv_file_name = f"{table[u'output-file']}-csv.csv"
+ with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
file_handler.write(
u",".join([f'"{itm}"' for itm in header_csv]) + u"\n"
)
for line in tbl_cmp_lst:
row = [line[0], ]
for idx, itm in enumerate(line[1:]):
- if itm is None:
+ if itm is None or not isinstance(itm, dict) or \
+ itm.get(u'mean', None) is None or \
+ itm.get(u'stdev', None) is None:
new_itm = u"NT"
else:
if idx < len(cols):
header.extend([rca[u"title"] for rca in rcas])
# Generate csv tables:
- csv_file = f"{table[u'output-file']}.csv"
- with open(csv_file, u"wt", encoding='utf-8') as file_handler:
+ csv_file_name = f"{table[u'output-file']}.csv"
+ logging.info(f" Writing the file {csv_file_name}")
+ with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
file_handler.write(u";".join(header) + u"\n")
for test in tbl_final:
file_handler.write(u";".join([str(item) for item in test]) + u"\n")
# Generate txt table:
txt_file_name = f"{table[u'output-file']}.txt"
- convert_csv_to_pretty_txt(csv_file, txt_file_name, delimiter=u";")
+ logging.info(f" Writing the file {txt_file_name}")
+ convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u";")
- with open(txt_file_name, u'a', encoding='utf-8') as txt_file:
- txt_file.write(legend)
- txt_file.write(footnote)
+ with open(txt_file_name, u'a', encoding='utf-8') as file_handler:
+ file_handler.write(legend)
+ file_handler.write(footnote)
# Generate html table:
_tpc_generate_html_table(
ref_data = tst_data.get(idx_ref, None)
cmp_data = tst_data.get(idx_cmp, None)
if ref_data is None or cmp_data is None:
- cmp_dict[tst_name].append(float('nan'))
+ cmp_dict[tst_name].append(float(u'nan'))
else:
cmp_dict[tst_name].append(
relative_change(ref_data, cmp_data)
)
+ tbl_lst_none = list()
tbl_lst = list()
for tst_name, tst_data in tbl_dict.items():
itm_lst = [tst_data[u"name"], ]
for itm in cmp_dict[tst_name]
]
)
- tbl_lst.append(itm_lst)
+ if str(itm_lst[-1]) == u"nan" or itm_lst[-1] is None:
+ tbl_lst_none.append(itm_lst)
+ else:
+ tbl_lst.append(itm_lst)
+ tbl_lst_none.sort(key=lambda rel: rel[0], reverse=False)
tbl_lst.sort(key=lambda rel: rel[0], reverse=False)
- tbl_lst.sort(key=lambda rel: rel[-1], reverse=True)
+ tbl_lst.sort(key=lambda rel: rel[-1], reverse=False)
+ tbl_lst.extend(tbl_lst_none)
# Generate csv table:
- csv_file = f"{table[u'output-file']}.csv"
- with open(csv_file, u"wt", encoding='utf-8') as file_handler:
+ csv_file_name = f"{table[u'output-file']}.csv"
+ logging.info(f" Writing the file {csv_file_name}")
+ with open(csv_file_name, u"wt", encoding='utf-8') as file_handler:
for hdr in header:
file_handler.write(u",".join(hdr) + u"\n")
for test in tbl_lst:
]
) + u"\n")
- txt_file = f"{table[u'output-file']}.txt"
- convert_csv_to_pretty_txt(csv_file, txt_file, delimiter=u",")
+ txt_file_name = f"{table[u'output-file']}.txt"
+ logging.info(f" Writing the file {txt_file_name}")
+ convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u",")
# Reorganize header in txt table
txt_table = list()
- with open(txt_file, u"rt", encoding='utf-8') as file_handler:
+ with open(txt_file_name, u"rt", encoding='utf-8') as file_handler:
for line in file_handler:
txt_table.append(line)
try:
txt_table.insert(5, txt_table.pop(2))
- with open(txt_file, u"wt", encoding='utf-8') as file_handler:
+ with open(txt_file_name, u"wt", encoding='utf-8') as file_handler:
file_handler.writelines(txt_table)
except IndexError:
pass