1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
21 from collections import OrderedDict
22 from datetime import datetime
23 from copy import deepcopy
26 import plotly.offline as ploff
27 import plotly.graph_objs as plgo
28 import plotly.exceptions as plerr
30 from pal_utils import archive_input_data, execute_command, classify_anomalies
33 # Command to build the html format of the report
34 HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \
37 u'-D version="{date}" ' \
41 # .css file for the html format of the report
42 THEME_OVERRIDES = u"""/* override table width restrictions */
44 max-width: 1200px !important;
46 .rst-content blockquote {
52 display: inline-block;
60 .wy-menu-vertical li.current a {
62 border-right: solid 1px #c9c9c9;
65 .wy-menu-vertical li.toctree-l2.current > a {
69 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
74 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
79 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
86 border-top-width: medium;
87 border-bottom-width: medium;
88 border-top-style: none;
89 border-bottom-style: none;
90 border-top-color: currentcolor;
91 border-bottom-color: currentcolor;
92 padding-left: 2em -4px;
123 def generate_cpta(spec, data):
124 """Generate all formats and versions of the Continuous Performance Trending
127 :param spec: Specification read from the specification file.
128 :param data: Full data set.
129 :type spec: Specification
130 :type data: InputData
133 logging.info(u"Generating the Continuous Performance Trending and Analysis "
136 ret_code = _generate_all_charts(spec, data)
138 cmd = HTML_BUILDER.format(
139 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
140 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
141 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
144 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
146 css_file.write(THEME_OVERRIDES)
148 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
150 css_file.write(THEME_OVERRIDES)
152 if spec.environment.get(u"archive-inputs", False):
153 archive_input_data(spec)
155 logging.info(u"Done.")
160 def _generate_trending_traces(in_data, job_name, build_info,
161 name=u"", color=u"", incl_tests=u"mrr"):
162 """Generate the trending traces:
164 - outliers, regress, progress
165 - average of normal samples (trending line)
167 :param in_data: Full data set.
168 :param job_name: The name of job which generated the data.
169 :param build_info: Information about the builds.
170 :param name: Name of the plot
171 :param color: Name of the color for the plot.
172 :param incl_tests: Included tests, accepted values: mrr, ndr, pdr
173 :type in_data: OrderedDict
175 :type build_info: dict
178 :type incl_tests: str
179 :returns: Generated traces (list) and the evaluated result.
180 :rtype: tuple(traces, result)
183 if incl_tests not in (u"mrr", u"ndr", u"pdr", u"pdr-lat"):
186 data_x = list(in_data.keys())
189 data_y_stdev = list()
190 if incl_tests == u"pdr-lat":
191 for item in in_data.values():
192 data_y_pps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
193 data_y_stdev.append(float(u"nan"))
194 data_y_mpps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
197 for item in in_data.values():
198 data_y_pps.append(float(item[u"receive-rate"]))
199 data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
200 data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
204 for index, key in enumerate(data_x):
206 date = build_info[job_name][str_key][0]
207 hover_str = (u"date: {date}<br>"
208 u"{property} [Mpps]: <val><br>"
210 u"{sut}-ref: {build}<br>"
211 u"csit-ref: {test}-{period}-build-{build_nr}<br>"
212 u"testbed: {testbed}")
213 if incl_tests == u"mrr":
214 hover_str = hover_str.replace(
215 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
218 hover_str = hover_str.replace(u"<stdev>", u"")
219 if incl_tests == u"pdr-lat":
220 hover_str = hover_str.replace(
221 u"throughput [Mpps]", u"latency [s]"
223 hover_str = hover_str.replace(u"<val>", u"{value:.1e}")
225 hover_str = hover_str.replace(u"<val>", u"{value:.3f}")
227 hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]").\
228 replace(u"throughput", u"connection rate")
229 if u"dpdk" in job_name:
230 hover_text.append(hover_str.format(
232 property=u"average" if incl_tests == u"mrr" else u"throughput",
233 value=data_y_mpps[index],
235 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
239 testbed=build_info[job_name][str_key][2]))
240 elif u"vpp" in job_name:
241 hover_text.append(hover_str.format(
243 property=u"average" if incl_tests == u"mrr" else u"throughput",
244 value=data_y_mpps[index],
246 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
248 period=u"daily" if incl_tests == u"mrr" else u"weekly",
250 testbed=build_info[job_name][str_key][2]))
252 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
253 int(date[9:11]), int(date[12:])))
255 data_pd = OrderedDict()
256 for key, value in zip(xaxis, data_y_pps):
260 anomaly_classification, avgs_pps, stdevs_pps = \
261 classify_anomalies(data_pd)
262 except ValueError as err:
263 logging.info(f"{err} Skipping")
265 avgs_mpps = [avg_pps / multi for avg_pps in avgs_pps]
266 stdevs_mpps = [stdev_pps / multi for stdev_pps in stdevs_pps]
268 anomalies = OrderedDict()
269 anomalies_colors = list()
270 anomalies_avgs = list()
276 if anomaly_classification:
277 for index, (key, value) in enumerate(data_pd.items()):
278 if anomaly_classification[index] in (u"regression", u"progression"):
279 anomalies[key] = value / multi
280 anomalies_colors.append(
281 anomaly_color[anomaly_classification[index]])
282 anomalies_avgs.append(avgs_mpps[index])
283 anomalies_colors.extend([0.0, 0.5, 1.0])
287 trace_samples = plgo.Scatter(
300 u"symbol": u"circle",
303 hoverinfo=u"text+name"
305 traces = [trace_samples, ]
307 trend_hover_text = list()
308 for idx in range(len(data_x)):
309 if incl_tests == u"pdr-lat":
311 f"trend [s]: {avgs_mpps[idx]:.1e}<br>"
315 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
316 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
318 trend_hover_text.append(trend_hover_str)
320 trace_trend = plgo.Scatter(
332 text=trend_hover_text,
333 hoverinfo=u"text+name"
335 traces.append(trace_trend)
337 if incl_tests == u"pdr-lat":
346 ticktext = [u"Progression", u"Normal", u"Regression"]
356 ticktext = [u"Regression", u"Normal", u"Progression"]
357 trace_anomalies = plgo.Scatter(
358 x=list(anomalies.keys()),
364 name=f"{name}-anomalies",
367 u"symbol": u"circle-open",
368 u"color": anomalies_colors,
369 u"colorscale": colorscale,
377 u"title": u"Circles Marking Data Classification",
378 u"titleside": u"right",
382 u"tickmode": u"array",
383 u"tickvals": [0.167, 0.500, 0.833],
384 u"ticktext": ticktext,
392 traces.append(trace_anomalies)
394 if anomaly_classification:
395 return traces, anomaly_classification[-1]
400 def _generate_all_charts(spec, input_data):
401 """Generate all charts specified in the specification file.
403 :param spec: Specification.
404 :param input_data: Full data set.
405 :type spec: Specification
406 :type input_data: InputData
409 def _generate_chart(graph):
410 """Generates the chart.
412 :param graph: The graph to be generated
414 :returns: Dictionary with the job name, csv table with results and
415 list of tests classification results.
419 logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
421 job_name = list(graph[u"data"].keys())[0]
425 f" Creating the data set for the {graph.get(u'type', u'')} "
426 f"{graph.get(u'title', u'')}."
429 data = input_data.filter_tests_by_name(
431 params=[u"type", u"result", u"throughput", u"latency", u"tags"],
432 continue_on_error=True
435 if data is None or data.empty:
436 logging.error(u"No data.")
441 for ttype in graph.get(u"test-type", (u"mrr", )):
442 for core in graph.get(u"core", tuple()):
444 csv_tbl_lat_1 = list()
445 csv_tbl_lat_2 = list()
449 for item in graph.get(u"include", tuple()):
450 reg_ex = re.compile(str(item.format(core=core)).lower())
451 for job, job_data in data.items():
454 for index, bld in job_data.items():
455 for test_id, test in bld.items():
456 if not re.match(reg_ex, str(test_id).lower()):
458 if chart_data.get(test_id, None) is None:
459 chart_data[test_id] = OrderedDict()
464 rate = test[u"result"][u"receive-rate"]
466 test[u"result"][u"receive-stdev"]
467 elif ttype == u"ndr":
469 test["throughput"][u"NDR"][u"LOWER"]
470 stdev = float(u"nan")
471 elif ttype == u"pdr":
473 test["throughput"][u"PDR"][u"LOWER"]
474 stdev = float(u"nan")
475 lat_1 = test[u"latency"][u"PDR50"]\
476 [u"direction1"][u"avg"]
477 lat_2 = test[u"latency"][u"PDR50"]\
478 [u"direction2"][u"avg"]
481 chart_data[test_id][int(index)] = {
482 u"receive-rate": rate,
483 u"receive-stdev": stdev
486 chart_data[test_id][int(index)].update(
492 chart_tags[test_id] = \
493 test.get(u"tags", None)
494 except (KeyError, TypeError):
497 # Add items to the csv table:
498 for tst_name, tst_data in chart_data.items():
500 tst_lst_lat_1 = list()
501 tst_lst_lat_2 = list()
502 for bld in builds_dict[job_name]:
503 itm = tst_data.get(int(bld), dict())
504 # CSIT-1180: Itm will be list, compute stats.
506 tst_lst.append(str(itm.get(u"receive-rate", u"")))
508 tst_lst_lat_1.append(
509 str(itm.get(u"lat_1", u""))
511 tst_lst_lat_2.append(
512 str(itm.get(u"lat_2", u""))
514 except AttributeError:
517 tst_lst_lat_1.append(u"")
518 tst_lst_lat_2.append(u"")
519 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
520 csv_tbl_lat_1.append(
521 f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n"
523 csv_tbl_lat_2.append(
524 f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n"
531 groups = graph.get(u"groups", None)
538 for tst_name, test_data in chart_data.items():
541 f"No data for the test {tst_name}"
544 if tag not in chart_tags[tst_name]:
547 trace, rslt = _generate_trending_traces(
550 build_info=build_info,
551 name=u'-'.join(tst_name.split(u'.')[-1].
557 logging.error(f"Out of colors: index: "
558 f"{index}, test: {tst_name}")
563 [True for _ in range(len(trace))]
568 visibility.append(visible)
570 for tst_name, test_data in chart_data.items():
572 logging.warning(f"No data for the test {tst_name}")
575 trace, rslt = _generate_trending_traces(
578 build_info=build_info,
580 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
585 trace_lat, _ = _generate_trending_traces(
588 build_info=build_info,
590 tst_name.split(u'.')[-1].split(
593 incl_tests=u"pdr-lat"
595 traces_lat.extend(trace_lat)
598 f"Out of colors: index: "
599 f"{index}, test: {tst_name}"
608 # Generate the chart:
610 layout = deepcopy(graph[u"layout"])
611 except KeyError as err:
612 logging.error(u"Finished with error: No layout defined")
613 logging.error(repr(err))
617 for i in range(len(visibility)):
619 for vis_idx, _ in enumerate(visibility):
620 for _ in range(len(visibility[vis_idx])):
621 visible.append(i == vis_idx)
629 [True for _ in range(len(show[0]))]}, ]
631 for i in range(len(groups)):
633 label = graph[u"group-names"][i]
634 except (IndexError, KeyError):
635 label = f"Group {i + 1}"
639 args=[{u"visible": show[i]}, ]
642 layout[u"updatemenus"] = list([
656 f"{spec.cpta[u'output-file']}/"
657 f"{graph[u'output-file-name']}.html"
659 name_file = name_file.format(core=core, test_type=ttype)
661 logging.info(f" Writing the file {name_file}")
662 plpl = plgo.Figure(data=traces, layout=layout)
670 except plerr.PlotlyEmptyDataError:
671 logging.warning(u"No data for the plot. Skipped.")
675 layout = deepcopy(graph[u"layout"])
676 layout[u"yaxis"][u"title"] = u"Latency [s]"
677 layout[u"yaxis"][u"tickformat"] = u".3s"
678 except KeyError as err:
679 logging.error(u"Finished with error: No layout defined")
680 logging.error(repr(err))
683 f"{spec.cpta[u'output-file']}/"
684 f"{graph[u'output-file-name']}-lat.html"
686 name_file = name_file.format(core=core, test_type=ttype)
688 logging.info(f" Writing the file {name_file}")
689 plpl = plgo.Figure(data=traces_lat, layout=layout)
697 except plerr.PlotlyEmptyDataError:
698 logging.warning(u"No data for the plot. Skipped.")
702 u"job_name": job_name,
703 u"csv_table": csv_tbl,
704 u"csv_lat_1": csv_tbl_lat_1,
705 u"csv_lat_2": csv_tbl_lat_2,
713 for job, builds in spec.input.items():
714 if builds_dict.get(job, None) is None:
715 builds_dict[job] = list()
717 if build[u"status"] not in (u"failed", u"not found", u"removed",
719 builds_dict[job].append(str(build[u"build"]))
721 # Create "build ID": "date" dict:
723 tb_tbl = spec.environment.get(u"testbeds", None)
724 for job_name, job_data in builds_dict.items():
725 if build_info.get(job_name, None) is None:
726 build_info[job_name] = OrderedDict()
727 for build in job_data:
729 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
731 testbed = tb_tbl.get(tb_ip, u"")
732 build_info[job_name][build] = (
733 input_data.metadata(job_name, build).get(u"generated", u""),
734 input_data.metadata(job_name, build).get(u"version", u""),
738 anomaly_classifications = dict()
740 # Create the table header:
742 csv_tables_l1 = dict()
743 csv_tables_l2 = dict()
744 for job_name in builds_dict:
745 if csv_tables.get(job_name, None) is None:
746 csv_tables[job_name] = list()
747 if csv_tables_l1.get(job_name, None) is None:
748 csv_tables_l1[job_name] = list()
749 if csv_tables_l2.get(job_name, None) is None:
750 csv_tables_l2[job_name] = list()
751 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
752 csv_tables[job_name].append(header)
753 csv_tables_l1[job_name].append(header)
754 csv_tables_l2[job_name].append(header)
755 build_dates = [x[0] for x in build_info[job_name].values()]
756 header = f"Build Date:,{u','.join(build_dates)}\n"
757 csv_tables[job_name].append(header)
758 csv_tables_l1[job_name].append(header)
759 csv_tables_l2[job_name].append(header)
760 versions = [x[1] for x in build_info[job_name].values()]
761 header = f"Version:,{u','.join(versions)}\n"
762 csv_tables[job_name].append(header)
763 csv_tables_l1[job_name].append(header)
764 csv_tables_l2[job_name].append(header)
765 testbed = [x[2] for x in build_info[job_name].values()]
766 header = f"Test bed:,{u','.join(testbed)}\n"
767 csv_tables[job_name].append(header)
768 csv_tables_l1[job_name].append(header)
769 csv_tables_l2[job_name].append(header)
771 for chart in spec.cpta[u"plots"]:
772 results = _generate_chart(chart)
776 for result in results:
777 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
778 csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"])
779 csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"])
781 if anomaly_classifications.get(result[u"job_name"], None) is None:
782 anomaly_classifications[result[u"job_name"]] = dict()
783 anomaly_classifications[result[u"job_name"]].\
784 update(result[u"results"])
787 for job_name, csv_table in csv_tables.items():
788 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
789 with open(f"{file_name}.csv", u"wt") as file_handler:
790 file_handler.writelines(csv_table)
793 with open(f"{file_name}.csv", u"rt") as csv_file:
794 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
796 for row in csv_content:
797 if txt_table is None:
798 txt_table = prettytable.PrettyTable(row)
801 for idx, item in enumerate(row):
803 row[idx] = str(round(float(item) / 1000000, 2))
807 txt_table.add_row(row)
808 # PrettyTable raises Exception
809 except Exception as err:
811 f"Error occurred while generating TXT table:\n{err}"
814 txt_table.align[u"Build Number:"] = u"l"
815 with open(f"{file_name}.txt", u"wt") as txt_file:
816 txt_file.write(str(txt_table))
818 for job_name, csv_table in csv_tables_l1.items():
819 file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1"
820 with open(f"{file_name}.csv", u"wt") as file_handler:
821 file_handler.writelines(csv_table)
822 for job_name, csv_table in csv_tables_l2.items():
823 file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2"
824 with open(f"{file_name}.csv", u"wt") as file_handler:
825 file_handler.writelines(csv_table)
828 if anomaly_classifications:
830 for job_name, job_data in anomaly_classifications.items():
832 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
833 with open(file_name, u'w') as txt_file:
834 for test_name, classification in job_data.items():
835 if classification == u"regression":
836 txt_file.write(test_name + u'\n')
837 if classification in (u"regression", u"outlier"):
840 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
841 with open(file_name, u'w') as txt_file:
842 for test_name, classification in job_data.items():
843 if classification == u"progression":
844 txt_file.write(test_name + u'\n')
848 logging.info(f"Partial results: {anomaly_classifications}")
849 logging.info(f"Result: {result}")