1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
21 from collections import OrderedDict
22 from datetime import datetime
23 from copy import deepcopy
26 import plotly.offline as ploff
27 import plotly.graph_objs as plgo
28 import plotly.exceptions as plerr
30 from pal_utils import archive_input_data, execute_command, classify_anomalies
33 # Command to build the html format of the report
34 HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \
37 u'-D version="{date}" ' \
41 # .css file for the html format of the report
42 THEME_OVERRIDES = u"""/* override table width restrictions */
44 max-width: 1200px !important;
46 .rst-content blockquote {
52 display: inline-block;
60 .wy-menu-vertical li.current a {
62 border-right: solid 1px #c9c9c9;
65 .wy-menu-vertical li.toctree-l2.current > a {
69 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
74 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
79 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
86 border-top-width: medium;
87 border-bottom-width: medium;
88 border-top-style: none;
89 border-bottom-style: none;
90 border-top-color: currentcolor;
91 border-bottom-color: currentcolor;
92 padding-left: 2em -4px;
123 def generate_cpta(spec, data):
124 """Generate all formats and versions of the Continuous Performance Trending
127 :param spec: Specification read from the specification file.
128 :param data: Full data set.
129 :type spec: Specification
130 :type data: InputData
133 logging.info(u"Generating the Continuous Performance Trending and Analysis "
136 ret_code = _generate_all_charts(spec, data)
138 cmd = HTML_BUILDER.format(
139 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
140 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
141 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
144 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
146 css_file.write(THEME_OVERRIDES)
148 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
150 css_file.write(THEME_OVERRIDES)
152 if spec.environment.get(u"archive-inputs", False):
153 archive_input_data(spec)
155 logging.info(u"Done.")
160 def _generate_trending_traces(in_data, job_name, build_info,
161 name=u"", color=u"", incl_tests=u"mrr"):
162 """Generate the trending traces:
164 - outliers, regress, progress
165 - average of normal samples (trending line)
167 :param in_data: Full data set.
168 :param job_name: The name of job which generated the data.
169 :param build_info: Information about the builds.
170 :param name: Name of the plot
171 :param color: Name of the color for the plot.
172 :param incl_tests: Included tests, accepted values: mrr, ndr, pdr
173 :type in_data: OrderedDict
175 :type build_info: dict
178 :type incl_tests: str
179 :returns: Generated traces (list) and the evaluated result.
180 :rtype: tuple(traces, result)
183 if incl_tests not in (u"mrr", u"ndr", u"pdr", u"pdr-lat"):
186 data_x = list(in_data.keys())
189 data_y_stdev = list()
190 if incl_tests == u"pdr-lat":
191 for item in in_data.values():
192 data_y_pps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
193 data_y_stdev.append(float(u"nan"))
194 data_y_mpps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
197 for item in in_data.values():
198 data_y_pps.append(float(item[u"receive-rate"]))
199 data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
200 data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
204 for index, key in enumerate(data_x):
206 date = build_info[job_name][str_key][0]
207 hover_str = (u"date: {date}<br>"
208 u"{property} [Mpps]: <val><br>"
210 u"{sut}-ref: {build}<br>"
211 u"csit-ref: {test}-{period}-build-{build_nr}<br>"
212 u"testbed: {testbed}")
213 if incl_tests == u"mrr":
214 hover_str = hover_str.replace(
215 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
218 hover_str = hover_str.replace(u"<stdev>", u"")
219 if incl_tests == u"pdr-lat":
220 hover_str = hover_str.replace(u"<val>", u"{value:.1e}")
222 hover_str = hover_str.replace(u"<val>", u"{value:.3f}")
224 hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]").\
225 replace(u"throughput", u"connection rate")
226 if u"dpdk" in job_name:
227 hover_str = hover_str.format(
229 property=u"average" if incl_tests == u"mrr" else u"throughput",
230 value=data_y_mpps[index],
232 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
236 testbed=build_info[job_name][str_key][2])
237 elif u"vpp" in job_name:
238 hover_str = hover_str.format(
240 property=u"average" if incl_tests == u"mrr" else u"throughput",
241 value=data_y_mpps[index],
243 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
245 period=u"daily" if incl_tests == u"mrr" else u"weekly",
247 testbed=build_info[job_name][str_key][2])
248 if incl_tests == u"pdr-lat":
249 hover_str = hover_str.replace(
250 u"throughput [Mpps]", u"latency [s]"
252 hover_text.append(hover_str)
253 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
254 int(date[9:11]), int(date[12:])))
256 data_pd = OrderedDict()
257 for key, value in zip(xaxis, data_y_pps):
261 anomaly_classification, avgs_pps, stdevs_pps = \
262 classify_anomalies(data_pd)
263 except ValueError as err:
264 logging.info(f"{err} Skipping")
266 avgs_mpps = [avg_pps / multi for avg_pps in avgs_pps]
267 stdevs_mpps = [stdev_pps / multi for stdev_pps in stdevs_pps]
269 anomalies = OrderedDict()
270 anomalies_colors = list()
271 anomalies_avgs = list()
277 if anomaly_classification:
278 for index, (key, value) in enumerate(data_pd.items()):
279 if anomaly_classification[index] in (u"regression", u"progression"):
280 anomalies[key] = value / multi
281 anomalies_colors.append(
282 anomaly_color[anomaly_classification[index]])
283 anomalies_avgs.append(avgs_mpps[index])
284 anomalies_colors.extend([0.0, 0.5, 1.0])
288 trace_samples = plgo.Scatter(
301 u"symbol": u"circle",
304 hoverinfo=u"text+name"
306 traces = [trace_samples, ]
308 trend_hover_text = list()
309 for idx in range(len(data_x)):
310 if incl_tests == u"pdr-lat":
312 f"trend [s]: {avgs_mpps[idx]:.1e}<br>"
316 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
317 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
319 trend_hover_text.append(trend_hover_str)
321 trace_trend = plgo.Scatter(
333 text=trend_hover_text,
334 hoverinfo=u"text+name"
336 traces.append(trace_trend)
338 if incl_tests == u"pdr-lat":
347 ticktext = [u"Progression", u"Normal", u"Regression"]
357 ticktext = [u"Regression", u"Normal", u"Progression"]
358 trace_anomalies = plgo.Scatter(
359 x=list(anomalies.keys()),
365 name=f"{name}-anomalies",
368 u"symbol": u"circle-open",
369 u"color": anomalies_colors,
370 u"colorscale": colorscale,
378 u"title": u"Circles Marking Data Classification",
379 u"titleside": u"right",
383 u"tickmode": u"array",
384 u"tickvals": [0.167, 0.500, 0.833],
385 u"ticktext": ticktext,
393 traces.append(trace_anomalies)
395 if anomaly_classification:
396 return traces, anomaly_classification[-1]
401 def _generate_all_charts(spec, input_data):
402 """Generate all charts specified in the specification file.
404 :param spec: Specification.
405 :param input_data: Full data set.
406 :type spec: Specification
407 :type input_data: InputData
410 def _generate_chart(graph):
411 """Generates the chart.
413 :param graph: The graph to be generated
415 :returns: Dictionary with the job name, csv table with results and
416 list of tests classification results.
420 logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
422 job_name = list(graph[u"data"].keys())[0]
426 f" Creating the data set for the {graph.get(u'type', u'')} "
427 f"{graph.get(u'title', u'')}."
430 data = input_data.filter_tests_by_name(
432 params=[u"type", u"result", u"throughput", u"latency", u"tags"],
433 continue_on_error=True
436 if data is None or data.empty:
437 logging.error(u"No data.")
442 for ttype in graph.get(u"test-type", (u"mrr", )):
443 for core in graph.get(u"core", tuple()):
445 csv_tbl_lat_1 = list()
446 csv_tbl_lat_2 = list()
450 for item in graph.get(u"include", tuple()):
451 reg_ex = re.compile(str(item.format(core=core)).lower())
452 for job, job_data in data.items():
455 for index, bld in job_data.items():
456 for test_id, test in bld.items():
457 if not re.match(reg_ex, str(test_id).lower()):
459 if chart_data.get(test_id, None) is None:
460 chart_data[test_id] = OrderedDict()
465 rate = test[u"result"][u"receive-rate"]
467 test[u"result"][u"receive-stdev"]
468 elif ttype == u"ndr":
470 test["throughput"][u"NDR"][u"LOWER"]
471 stdev = float(u"nan")
472 elif ttype == u"pdr":
474 test["throughput"][u"PDR"][u"LOWER"]
475 stdev = float(u"nan")
476 lat_1 = test[u"latency"][u"PDR50"]\
477 [u"direction1"][u"avg"]
478 lat_2 = test[u"latency"][u"PDR50"]\
479 [u"direction2"][u"avg"]
482 chart_data[test_id][int(index)] = {
483 u"receive-rate": rate,
484 u"receive-stdev": stdev
487 chart_data[test_id][int(index)].update(
493 chart_tags[test_id] = \
494 test.get(u"tags", None)
495 except (KeyError, TypeError):
498 # Add items to the csv table:
499 for tst_name, tst_data in chart_data.items():
501 tst_lst_lat_1 = list()
502 tst_lst_lat_2 = list()
503 for bld in builds_dict[job_name]:
504 itm = tst_data.get(int(bld), dict())
505 # CSIT-1180: Itm will be list, compute stats.
507 tst_lst.append(str(itm.get(u"receive-rate", u"")))
509 tst_lst_lat_1.append(
510 str(itm.get(u"lat_1", u""))
512 tst_lst_lat_2.append(
513 str(itm.get(u"lat_2", u""))
515 except AttributeError:
518 tst_lst_lat_1.append(u"")
519 tst_lst_lat_2.append(u"")
520 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
521 csv_tbl_lat_1.append(
522 f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n"
524 csv_tbl_lat_2.append(
525 f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n"
532 groups = graph.get(u"groups", None)
539 for tst_name, test_data in chart_data.items():
542 f"No data for the test {tst_name}"
545 if tag not in chart_tags[tst_name]:
548 trace, rslt = _generate_trending_traces(
551 build_info=build_info,
552 name=u'-'.join(tst_name.split(u'.')[-1].
558 logging.error(f"Out of colors: index: "
559 f"{index}, test: {tst_name}")
564 [True for _ in range(len(trace))]
569 visibility.append(visible)
571 for tst_name, test_data in chart_data.items():
573 logging.warning(f"No data for the test {tst_name}")
576 trace, rslt = _generate_trending_traces(
579 build_info=build_info,
581 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
586 trace_lat, _ = _generate_trending_traces(
589 build_info=build_info,
591 tst_name.split(u'.')[-1].split(
594 incl_tests=u"pdr-lat"
596 traces_lat.extend(trace_lat)
599 f"Out of colors: index: "
600 f"{index}, test: {tst_name}"
609 # Generate the chart:
611 layout = deepcopy(graph[u"layout"])
612 except KeyError as err:
613 logging.error(u"Finished with error: No layout defined")
614 logging.error(repr(err))
618 for i in range(len(visibility)):
620 for vis_idx, _ in enumerate(visibility):
621 for _ in range(len(visibility[vis_idx])):
622 visible.append(i == vis_idx)
630 [True for _ in range(len(show[0]))]}, ]
632 for i in range(len(groups)):
634 label = graph[u"group-names"][i]
635 except (IndexError, KeyError):
636 label = f"Group {i + 1}"
640 args=[{u"visible": show[i]}, ]
643 layout[u"updatemenus"] = list([
657 f"{spec.cpta[u'output-file']}/"
658 f"{graph[u'output-file-name']}.html"
660 name_file = name_file.format(core=core, test_type=ttype)
662 logging.info(f" Writing the file {name_file}")
663 plpl = plgo.Figure(data=traces, layout=layout)
671 except plerr.PlotlyEmptyDataError:
672 logging.warning(u"No data for the plot. Skipped.")
676 layout = deepcopy(graph[u"layout"])
677 layout[u"yaxis"][u"title"] = u"Latency [s]"
678 layout[u"yaxis"][u"tickformat"] = u".3s"
679 except KeyError as err:
680 logging.error(u"Finished with error: No layout defined")
681 logging.error(repr(err))
684 f"{spec.cpta[u'output-file']}/"
685 f"{graph[u'output-file-name']}-lat.html"
687 name_file = name_file.format(core=core, test_type=ttype)
689 logging.info(f" Writing the file {name_file}")
690 plpl = plgo.Figure(data=traces_lat, layout=layout)
698 except plerr.PlotlyEmptyDataError:
699 logging.warning(u"No data for the plot. Skipped.")
703 u"job_name": job_name,
704 u"csv_table": csv_tbl,
705 u"csv_lat_1": csv_tbl_lat_1,
706 u"csv_lat_2": csv_tbl_lat_2,
714 for job, builds in spec.input.items():
715 if builds_dict.get(job, None) is None:
716 builds_dict[job] = list()
718 if build[u"status"] not in (u"failed", u"not found", u"removed",
720 builds_dict[job].append(str(build[u"build"]))
722 # Create "build ID": "date" dict:
724 tb_tbl = spec.environment.get(u"testbeds", None)
725 for job_name, job_data in builds_dict.items():
726 if build_info.get(job_name, None) is None:
727 build_info[job_name] = OrderedDict()
728 for build in job_data:
730 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
732 testbed = tb_tbl.get(tb_ip, u"")
733 build_info[job_name][build] = (
734 input_data.metadata(job_name, build).get(u"generated", u""),
735 input_data.metadata(job_name, build).get(u"version", u""),
739 anomaly_classifications = dict()
741 # Create the table header:
743 csv_tables_l1 = dict()
744 csv_tables_l2 = dict()
745 for job_name in builds_dict:
746 if csv_tables.get(job_name, None) is None:
747 csv_tables[job_name] = list()
748 if csv_tables_l1.get(job_name, None) is None:
749 csv_tables_l1[job_name] = list()
750 if csv_tables_l2.get(job_name, None) is None:
751 csv_tables_l2[job_name] = list()
752 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
753 csv_tables[job_name].append(header)
754 csv_tables_l1[job_name].append(header)
755 csv_tables_l2[job_name].append(header)
756 build_dates = [x[0] for x in build_info[job_name].values()]
757 header = f"Build Date:,{u','.join(build_dates)}\n"
758 csv_tables[job_name].append(header)
759 csv_tables_l1[job_name].append(header)
760 csv_tables_l2[job_name].append(header)
761 versions = [x[1] for x in build_info[job_name].values()]
762 header = f"Version:,{u','.join(versions)}\n"
763 csv_tables[job_name].append(header)
764 csv_tables_l1[job_name].append(header)
765 csv_tables_l2[job_name].append(header)
766 testbed = [x[2] for x in build_info[job_name].values()]
767 header = f"Test bed:,{u','.join(testbed)}\n"
768 csv_tables[job_name].append(header)
769 csv_tables_l1[job_name].append(header)
770 csv_tables_l2[job_name].append(header)
772 for chart in spec.cpta[u"plots"]:
773 results = _generate_chart(chart)
777 for result in results:
778 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
779 csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"])
780 csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"])
782 if anomaly_classifications.get(result[u"job_name"], None) is None:
783 anomaly_classifications[result[u"job_name"]] = dict()
784 anomaly_classifications[result[u"job_name"]].\
785 update(result[u"results"])
788 for job_name, csv_table in csv_tables.items():
789 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
790 with open(f"{file_name}.csv", u"wt") as file_handler:
791 file_handler.writelines(csv_table)
794 with open(f"{file_name}.csv", u"rt") as csv_file:
795 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
797 for row in csv_content:
798 if txt_table is None:
799 txt_table = prettytable.PrettyTable(row)
802 for idx, item in enumerate(row):
804 row[idx] = str(round(float(item) / 1000000, 2))
808 txt_table.add_row(row)
809 # PrettyTable raises Exception
810 except Exception as err:
812 f"Error occurred while generating TXT table:\n{err}"
815 txt_table.align[u"Build Number:"] = u"l"
816 with open(f"{file_name}.txt", u"wt") as txt_file:
817 txt_file.write(str(txt_table))
819 for job_name, csv_table in csv_tables_l1.items():
820 file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1"
821 with open(f"{file_name}.csv", u"wt") as file_handler:
822 file_handler.writelines(csv_table)
823 for job_name, csv_table in csv_tables_l2.items():
824 file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2"
825 with open(f"{file_name}.csv", u"wt") as file_handler:
826 file_handler.writelines(csv_table)
829 if anomaly_classifications:
831 for job_name, job_data in anomaly_classifications.items():
833 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
834 with open(file_name, u'w') as txt_file:
835 for test_name, classification in job_data.items():
836 if classification == u"regression":
837 txt_file.write(test_name + u'\n')
838 if classification in (u"regression", u"outlier"):
841 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
842 with open(file_name, u'w') as txt_file:
843 for test_name, classification in job_data.items():
844 if classification == u"progression":
845 txt_file.write(test_name + u'\n')
849 logging.info(f"Partial results: {anomaly_classifications}")
850 logging.info(f"Result: {result}")