1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
21 from collections import OrderedDict
22 from datetime import datetime
23 from copy import deepcopy
26 import plotly.offline as ploff
27 import plotly.graph_objs as plgo
28 import plotly.exceptions as plerr
30 from pal_utils import archive_input_data, execute_command, classify_anomalies
33 # Command to build the html format of the report
34 HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \
37 u'-D version="{date}" ' \
41 # .css file for the html format of the report
42 THEME_OVERRIDES = u"""/* override table width restrictions */
44 max-width: 1200px !important;
46 .rst-content blockquote {
52 display: inline-block;
60 .wy-menu-vertical li.current a {
62 border-right: solid 1px #c9c9c9;
65 .wy-menu-vertical li.toctree-l2.current > a {
69 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
74 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
79 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
86 border-top-width: medium;
87 border-bottom-width: medium;
88 border-top-style: none;
89 border-bottom-style: none;
90 border-top-color: currentcolor;
91 border-bottom-color: currentcolor;
92 padding-left: 2em -4px;
123 def generate_cpta(spec, data):
124 """Generate all formats and versions of the Continuous Performance Trending
127 :param spec: Specification read from the specification file.
128 :param data: Full data set.
129 :type spec: Specification
130 :type data: InputData
133 logging.info(u"Generating the Continuous Performance Trending and Analysis "
136 ret_code = _generate_all_charts(spec, data)
138 cmd = HTML_BUILDER.format(
139 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
140 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
141 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
144 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
146 css_file.write(THEME_OVERRIDES)
148 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
150 css_file.write(THEME_OVERRIDES)
152 if spec.environment.get(u"archive-inputs", False):
153 archive_input_data(spec)
155 logging.info(u"Done.")
160 def _generate_trending_traces(in_data, job_name, build_info,
161 name=u"", color=u"", incl_tests=u"mrr"):
162 """Generate the trending traces:
164 - outliers, regress, progress
165 - average of normal samples (trending line)
167 :param in_data: Full data set.
168 :param job_name: The name of job which generated the data.
169 :param build_info: Information about the builds.
170 :param name: Name of the plot
171 :param color: Name of the color for the plot.
172 :param incl_tests: Included tests, accepted values: mrr, ndr, pdr
173 :type in_data: OrderedDict
175 :type build_info: dict
178 :type incl_tests: str
179 :returns: Generated traces (list) and the evaluated result.
180 :rtype: tuple(traces, result)
183 if incl_tests not in (u"mrr", u"ndr", u"pdr"):
186 data_x = list(in_data.keys())
189 data_y_stdev = list()
190 for item in in_data.values():
191 data_y_pps.append(float(item[u"receive-rate"]))
192 data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
193 data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
197 for index, key in enumerate(data_x):
199 date = build_info[job_name][str_key][0]
200 hover_str = (u"date: {date}<br>"
201 u"{property} [Mpps]: {value:.3f}<br>"
203 u"{sut}-ref: {build}<br>"
204 u"csit-ref: {test}-{period}-build-{build_nr}<br>"
205 u"testbed: {testbed}")
206 if incl_tests == u"mrr":
207 hover_str = hover_str.replace(
208 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
211 hover_str = hover_str.replace(u"<stdev>", u"")
213 hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]")
214 if u"dpdk" in job_name:
215 hover_text.append(hover_str.format(
217 property=u"average" if incl_tests == u"mrr" else u"throughput",
218 value=data_y_mpps[index],
220 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
224 testbed=build_info[job_name][str_key][2]))
225 elif u"vpp" in job_name:
226 hover_str = hover_str.format(
228 property=u"average" if incl_tests == u"mrr" else u"throughput",
229 value=data_y_mpps[index],
231 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
233 period=u"daily" if incl_tests == u"mrr" else u"weekly",
235 testbed=build_info[job_name][str_key][2])
237 hover_str = hover_str.replace(u"throughput", u"connection rate")
238 hover_text.append(hover_str)
240 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
241 int(date[9:11]), int(date[12:])))
243 data_pd = OrderedDict()
244 for key, value in zip(xaxis, data_y_pps):
248 anomaly_classification, avgs_pps, stdevs_pps = \
249 classify_anomalies(data_pd)
250 except ValueError as err:
251 logging.info(f"{err} Skipping")
253 avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
254 stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps]
256 anomalies = OrderedDict()
257 anomalies_colors = list()
258 anomalies_avgs = list()
264 if anomaly_classification:
265 for index, (key, value) in enumerate(data_pd.items()):
266 if anomaly_classification[index] in (u"regression", u"progression"):
267 anomalies[key] = value / 1e6
268 anomalies_colors.append(
269 anomaly_color[anomaly_classification[index]])
270 anomalies_avgs.append(avgs_mpps[index])
271 anomalies_colors.extend([0.0, 0.5, 1.0])
275 trace_samples = plgo.Scatter(
288 u"symbol": u"circle",
291 hoverinfo=u"text+name"
293 traces = [trace_samples, ]
295 trend_hover_text = list()
296 for idx in range(len(data_x)):
298 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
299 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
301 trend_hover_text.append(trend_hover_str)
303 trace_trend = plgo.Scatter(
315 text=trend_hover_text,
316 hoverinfo=u"text+name"
318 traces.append(trace_trend)
320 trace_anomalies = plgo.Scatter(
321 x=list(anomalies.keys()),
327 name=f"{name}-anomalies",
330 u"symbol": u"circle-open",
331 u"color": anomalies_colors,
347 u"title": u"Circles Marking Data Classification",
348 u"titleside": u"right",
352 u"tickmode": u"array",
353 u"tickvals": [0.167, 0.500, 0.833],
354 u"ticktext": [u"Regression", u"Normal", u"Progression"],
362 traces.append(trace_anomalies)
364 if anomaly_classification:
365 return traces, anomaly_classification[-1]
370 def _generate_all_charts(spec, input_data):
371 """Generate all charts specified in the specification file.
373 :param spec: Specification.
374 :param input_data: Full data set.
375 :type spec: Specification
376 :type input_data: InputData
379 def _generate_chart(graph):
380 """Generates the chart.
382 :param graph: The graph to be generated
384 :returns: Dictionary with the job name, csv table with results and
385 list of tests classification results.
389 logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
391 job_name = list(graph[u"data"].keys())[0]
395 f" Creating the data set for the {graph.get(u'type', u'')} "
396 f"{graph.get(u'title', u'')}."
399 data = input_data.filter_tests_by_name(
401 params=[u"type", u"result", u"throughput", u"latency", u"tags"],
402 continue_on_error=True
405 if data is None or data.empty:
406 logging.error(u"No data.")
411 for ttype in graph.get(u"test-type", (u"mrr", )):
412 for core in graph.get(u"core", tuple()):
414 csv_tbl_lat_1 = list()
415 csv_tbl_lat_2 = list()
419 for item in graph.get(u"include", tuple()):
420 reg_ex = re.compile(str(item.format(core=core)).lower())
421 for job, job_data in data.items():
424 for index, bld in job_data.items():
425 for test_id, test in bld.items():
426 if not re.match(reg_ex, str(test_id).lower()):
428 if chart_data.get(test_id, None) is None:
429 chart_data[test_id] = OrderedDict()
434 rate = test[u"result"][u"receive-rate"]
436 test[u"result"][u"receive-stdev"]
437 elif ttype == u"ndr":
439 test["throughput"][u"NDR"][u"LOWER"]
440 stdev = float(u"nan")
441 elif ttype == u"pdr":
443 test["throughput"][u"PDR"][u"LOWER"]
444 stdev = float(u"nan")
445 lat_1 = test[u"latency"][u"PDR50"]\
446 [u"direction1"][u"avg"]
447 lat_2 = test[u"latency"][u"PDR50"]\
448 [u"direction2"][u"avg"]
451 chart_data[test_id][int(index)] = {
452 u"receive-rate": rate,
453 u"receive-stdev": stdev
456 chart_data[test_id][int(index)].update(
462 chart_tags[test_id] = \
463 test.get(u"tags", None)
464 except (KeyError, TypeError):
467 # Add items to the csv table:
468 for tst_name, tst_data in chart_data.items():
470 tst_lst_lat_1 = list()
471 tst_lst_lat_2 = list()
472 for bld in builds_dict[job_name]:
473 itm = tst_data.get(int(bld), dict())
474 # CSIT-1180: Itm will be list, compute stats.
476 tst_lst.append(str(itm.get(u"receive-rate", u"")))
477 tst_lst_lat_1.append(str(itm.get(u"lat_1", u"")))
478 tst_lst_lat_2.append(str(itm.get(u"lat_2", u"")))
479 except AttributeError:
481 tst_lst_lat_1.append(u"")
482 tst_lst_lat_2.append(u"")
483 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
484 csv_tbl_lat_1.append(
485 f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n"
487 csv_tbl_lat_2.append(
488 f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n"
494 groups = graph.get(u"groups", None)
501 for tst_name, test_data in chart_data.items():
504 f"No data for the test {tst_name}"
507 if tag not in chart_tags[tst_name]:
510 trace, rslt = _generate_trending_traces(
513 build_info=build_info,
514 name=u'-'.join(tst_name.split(u'.')[-1].
520 logging.error(f"Out of colors: index: "
521 f"{index}, test: {tst_name}")
526 [True for _ in range(len(trace))]
531 visibility.append(visible)
533 for tst_name, test_data in chart_data.items():
535 logging.warning(f"No data for the test {tst_name}")
538 trace, rslt = _generate_trending_traces(
541 build_info=build_info,
543 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
549 f"Out of colors: index: "
550 f"{index}, test: {tst_name}"
559 # Generate the chart:
561 layout = deepcopy(graph[u"layout"])
562 except KeyError as err:
563 logging.error(u"Finished with error: No layout defined")
564 logging.error(repr(err))
568 for i in range(len(visibility)):
570 for vis_idx, _ in enumerate(visibility):
571 for _ in range(len(visibility[vis_idx])):
572 visible.append(i == vis_idx)
580 [True for _ in range(len(show[0]))]}, ]
582 for i in range(len(groups)):
584 label = graph[u"group-names"][i]
585 except (IndexError, KeyError):
586 label = f"Group {i + 1}"
590 args=[{u"visible": show[i]}, ]
593 layout[u"updatemenus"] = list([
607 f"{spec.cpta[u'output-file']}/"
608 f"{graph[u'output-file-name']}.html"
610 name_file = name_file.format(core=core, test_type=ttype)
612 logging.info(f" Writing the file {name_file}")
613 plpl = plgo.Figure(data=traces, layout=layout)
621 except plerr.PlotlyEmptyDataError:
622 logging.warning(u"No data for the plot. Skipped.")
626 u"job_name": job_name,
627 u"csv_table": csv_tbl,
628 u"csv_lat_1": csv_tbl_lat_1,
629 u"csv_lat_2": csv_tbl_lat_2,
637 for job, builds in spec.input.items():
638 if builds_dict.get(job, None) is None:
639 builds_dict[job] = list()
641 if build[u"status"] not in (u"failed", u"not found", u"removed",
643 builds_dict[job].append(str(build[u"build"]))
645 # Create "build ID": "date" dict:
647 tb_tbl = spec.environment.get(u"testbeds", None)
648 for job_name, job_data in builds_dict.items():
649 if build_info.get(job_name, None) is None:
650 build_info[job_name] = OrderedDict()
651 for build in job_data:
653 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
655 testbed = tb_tbl.get(tb_ip, u"")
656 build_info[job_name][build] = (
657 input_data.metadata(job_name, build).get(u"generated", u""),
658 input_data.metadata(job_name, build).get(u"version", u""),
662 anomaly_classifications = dict()
664 # Create the table header:
666 csv_tables_l1 = dict()
667 csv_tables_l2 = dict()
668 for job_name in builds_dict:
669 if csv_tables.get(job_name, None) is None:
670 csv_tables[job_name] = list()
671 if csv_tables_l1.get(job_name, None) is None:
672 csv_tables_l1[job_name] = list()
673 if csv_tables_l2.get(job_name, None) is None:
674 csv_tables_l2[job_name] = list()
675 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
676 csv_tables[job_name].append(header)
677 csv_tables_l1[job_name].append(header)
678 csv_tables_l2[job_name].append(header)
679 build_dates = [x[0] for x in build_info[job_name].values()]
680 header = f"Build Date:,{u','.join(build_dates)}\n"
681 csv_tables[job_name].append(header)
682 csv_tables_l1[job_name].append(header)
683 csv_tables_l2[job_name].append(header)
684 versions = [x[1] for x in build_info[job_name].values()]
685 header = f"Version:,{u','.join(versions)}\n"
686 csv_tables[job_name].append(header)
687 csv_tables_l1[job_name].append(header)
688 csv_tables_l2[job_name].append(header)
689 testbed = [x[2] for x in build_info[job_name].values()]
690 header = f"Test bed:,{u','.join(testbed)}\n"
691 csv_tables[job_name].append(header)
692 csv_tables_l1[job_name].append(header)
693 csv_tables_l2[job_name].append(header)
695 for chart in spec.cpta[u"plots"]:
696 results = _generate_chart(chart)
700 for result in results:
701 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
702 csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"])
703 csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"])
705 if anomaly_classifications.get(result[u"job_name"], None) is None:
706 anomaly_classifications[result[u"job_name"]] = dict()
707 anomaly_classifications[result[u"job_name"]].\
708 update(result[u"results"])
711 for job_name, csv_table in csv_tables.items():
712 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
713 with open(f"{file_name}.csv", u"wt") as file_handler:
714 file_handler.writelines(csv_table)
717 with open(f"{file_name}.csv", u"rt") as csv_file:
718 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
720 for row in csv_content:
721 if txt_table is None:
722 txt_table = prettytable.PrettyTable(row)
725 for idx, item in enumerate(row):
727 row[idx] = str(round(float(item) / 1000000, 2))
731 txt_table.add_row(row)
732 # PrettyTable raises Exception
733 except Exception as err:
735 f"Error occurred while generating TXT table:\n{err}"
738 txt_table.align[u"Build Number:"] = u"l"
739 with open(f"{file_name}.txt", u"wt") as txt_file:
740 txt_file.write(str(txt_table))
742 for job_name, csv_table in csv_tables_l1.items():
743 file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1"
744 with open(f"{file_name}.csv", u"wt") as file_handler:
745 file_handler.writelines(csv_table)
746 for job_name, csv_table in csv_tables_l2.items():
747 file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2"
748 with open(f"{file_name}.csv", u"wt") as file_handler:
749 file_handler.writelines(csv_table)
752 if anomaly_classifications:
754 for job_name, job_data in anomaly_classifications.items():
756 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
757 with open(file_name, u'w') as txt_file:
758 for test_name, classification in job_data.items():
759 if classification == u"regression":
760 txt_file.write(test_name + u'\n')
761 if classification in (u"regression", u"outlier"):
764 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
765 with open(file_name, u'w') as txt_file:
766 for test_name, classification in job_data.items():
767 if classification == u"progression":
768 txt_file.write(test_name + u'\n')
772 logging.info(f"Partial results: {anomaly_classifications}")
773 logging.info(f"Result: {result}")