1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
21 from collections import OrderedDict
22 from datetime import datetime
23 from copy import deepcopy
26 import plotly.offline as ploff
27 import plotly.graph_objs as plgo
28 import plotly.exceptions as plerr
30 from pal_utils import archive_input_data, execute_command, classify_anomalies
33 # Command to build the html format of the report
34 HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \
37 u'-D version="{date}" ' \
41 # .css file for the html format of the report
42 THEME_OVERRIDES = u"""/* override table width restrictions */
44 max-width: 1200px !important;
46 .rst-content blockquote {
52 display: inline-block;
60 .wy-menu-vertical li.current a {
62 border-right: solid 1px #c9c9c9;
65 .wy-menu-vertical li.toctree-l2.current > a {
69 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
74 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
79 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
86 border-top-width: medium;
87 border-bottom-width: medium;
88 border-top-style: none;
89 border-bottom-style: none;
90 border-top-color: currentcolor;
91 border-bottom-color: currentcolor;
92 padding-left: 2em -4px;
123 def generate_cpta(spec, data):
124 """Generate all formats and versions of the Continuous Performance Trending
127 :param spec: Specification read from the specification file.
128 :param data: Full data set.
129 :type spec: Specification
130 :type data: InputData
133 logging.info(u"Generating the Continuous Performance Trending and Analysis "
136 ret_code = _generate_all_charts(spec, data)
138 cmd = HTML_BUILDER.format(
139 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
140 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
141 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
144 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
146 css_file.write(THEME_OVERRIDES)
148 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
150 css_file.write(THEME_OVERRIDES)
152 if spec.environment.get(u"archive-inputs", False):
153 archive_input_data(spec)
155 logging.info(u"Done.")
160 def _generate_trending_traces(in_data, job_name, build_info,
161 name=u"", color=u"", incl_tests=u"mrr"):
162 """Generate the trending traces:
164 - outliers, regress, progress
165 - average of normal samples (trending line)
167 :param in_data: Full data set.
168 :param job_name: The name of job which generated the data.
169 :param build_info: Information about the builds.
170 :param name: Name of the plot
171 :param color: Name of the color for the plot.
172 :param incl_tests: Included tests, accepted values: mrr, ndr, pdr
173 :type in_data: OrderedDict
175 :type build_info: dict
178 :type incl_tests: str
179 :returns: Generated traces (list) and the evaluated result.
180 :rtype: tuple(traces, result)
183 if incl_tests not in (u"mrr", u"ndr", u"pdr"):
186 data_x = list(in_data.keys())
189 data_y_stdev = list()
190 for item in in_data.values():
191 data_y_pps.append(float(item[u"receive-rate"]))
192 data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
193 data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
197 for index, key in enumerate(data_x):
199 date = build_info[job_name][str_key][0]
200 hover_str = (u"date: {date}<br>"
201 u"{property} [Mpps]: {value:.3f}<br>"
203 u"{sut}-ref: {build}<br>"
204 u"csit-ref: {test}-{period}-build-{build_nr}<br>"
205 u"testbed: {testbed}")
206 if incl_tests == u"mrr":
207 hover_str = hover_str.replace(
208 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
211 hover_str = hover_str.replace(u"<stdev>", u"")
213 hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]")
214 if u"dpdk" in job_name:
215 hover_text.append(hover_str.format(
217 property=u"average" if incl_tests == u"mrr" else u"throughput",
218 value=data_y_mpps[index],
220 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
224 testbed=build_info[job_name][str_key][2]))
225 elif u"vpp" in job_name:
226 hover_str = hover_str.format(
228 property=u"average" if incl_tests == u"mrr" else u"throughput",
229 value=data_y_mpps[index],
231 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
233 period=u"daily" if incl_tests == u"mrr" else u"weekly",
235 testbed=build_info[job_name][str_key][2])
237 hover_str = hover_str.replace(u"throughput", u"connection rate")
238 hover_text.append(hover_str)
240 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
241 int(date[9:11]), int(date[12:])))
243 data_pd = OrderedDict()
244 for key, value in zip(xaxis, data_y_pps):
247 anomaly_classification, avgs_pps, stdevs_pps = classify_anomalies(data_pd)
248 avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
249 stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps]
251 anomalies = OrderedDict()
252 anomalies_colors = list()
253 anomalies_avgs = list()
259 if anomaly_classification:
260 for index, (key, value) in enumerate(data_pd.items()):
261 if anomaly_classification[index] in (u"regression", u"progression"):
262 anomalies[key] = value / 1e6
263 anomalies_colors.append(
264 anomaly_color[anomaly_classification[index]])
265 anomalies_avgs.append(avgs_mpps[index])
266 anomalies_colors.extend([0.0, 0.5, 1.0])
270 trace_samples = plgo.Scatter(
283 u"symbol": u"circle",
286 hoverinfo=u"text+name"
288 traces = [trace_samples, ]
290 trend_hover_text = list()
291 for idx in range(len(data_x)):
293 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
294 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
296 trend_hover_text.append(trend_hover_str)
298 trace_trend = plgo.Scatter(
310 text=trend_hover_text,
311 hoverinfo=u"text+name"
313 traces.append(trace_trend)
315 trace_anomalies = plgo.Scatter(
316 x=list(anomalies.keys()),
322 name=f"{name}-anomalies",
325 u"symbol": u"circle-open",
326 u"color": anomalies_colors,
342 u"title": u"Circles Marking Data Classification",
343 u"titleside": u"right",
347 u"tickmode": u"array",
348 u"tickvals": [0.167, 0.500, 0.833],
349 u"ticktext": [u"Regression", u"Normal", u"Progression"],
357 traces.append(trace_anomalies)
359 if anomaly_classification:
360 return traces, anomaly_classification[-1]
365 def _generate_all_charts(spec, input_data):
366 """Generate all charts specified in the specification file.
368 :param spec: Specification.
369 :param input_data: Full data set.
370 :type spec: Specification
371 :type input_data: InputData
374 def _generate_chart(graph):
375 """Generates the chart.
377 :param graph: The graph to be generated
379 :returns: Dictionary with the job name, csv table with results and
380 list of tests classification results.
384 logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
386 job_name = list(graph[u"data"].keys())[0]
390 f" Creating the data set for the {graph.get(u'type', u'')} "
391 f"{graph.get(u'title', u'')}."
394 data = input_data.filter_tests_by_name(
396 params=[u"type", u"result", u"throughput", u"tags"],
397 continue_on_error=True
400 if data is None or data.empty:
401 logging.error(u"No data.")
406 for ttype in graph.get(u"test-type", (u"mrr", )):
407 for core in graph.get(u"core", tuple()):
412 for item in graph.get(u"include", tuple()):
413 reg_ex = re.compile(str(item.format(core=core)).lower())
414 for job, job_data in data.items():
417 for index, bld in job_data.items():
418 for test_id, test in bld.items():
419 if not re.match(reg_ex, str(test_id).lower()):
421 if chart_data.get(test_id, None) is None:
422 chart_data[test_id] = OrderedDict()
425 rate = test[u"result"][u"receive-rate"]
427 test[u"result"][u"receive-stdev"]
428 elif ttype == u"ndr":
430 test["throughput"][u"NDR"][u"LOWER"]
431 stdev = float(u"nan")
432 elif ttype == u"pdr":
434 test["throughput"][u"PDR"][u"LOWER"]
435 stdev = float(u"nan")
438 chart_data[test_id][int(index)] = {
439 u"receive-rate": rate,
440 u"receive-stdev": stdev
442 chart_tags[test_id] = \
443 test.get(u"tags", None)
444 except (KeyError, TypeError):
447 # Add items to the csv table:
448 for tst_name, tst_data in chart_data.items():
450 for bld in builds_dict[job_name]:
451 itm = tst_data.get(int(bld), dict())
452 # CSIT-1180: Itm will be list, compute stats.
454 tst_lst.append(str(itm.get(u"receive-rate", u"")))
455 except AttributeError:
457 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
462 groups = graph.get(u"groups", None)
469 for tst_name, test_data in chart_data.items():
472 f"No data for the test {tst_name}"
475 if tag not in chart_tags[tst_name]:
478 trace, rslt = _generate_trending_traces(
481 build_info=build_info,
482 name=u'-'.join(tst_name.split(u'.')[-1].
488 logging.error(f"Out of colors: index: "
489 f"{index}, test: {tst_name}")
494 [True for _ in range(len(trace))]
499 visibility.append(visible)
501 for tst_name, test_data in chart_data.items():
503 logging.warning(f"No data for the test {tst_name}")
506 trace, rslt = _generate_trending_traces(
509 build_info=build_info,
511 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
517 f"Out of colors: index: "
518 f"{index}, test: {tst_name}"
527 # Generate the chart:
529 layout = deepcopy(graph[u"layout"])
530 except KeyError as err:
531 logging.error(u"Finished with error: No layout defined")
532 logging.error(repr(err))
536 for i in range(len(visibility)):
538 for vis_idx, _ in enumerate(visibility):
539 for _ in range(len(visibility[vis_idx])):
540 visible.append(i == vis_idx)
548 [True for _ in range(len(show[0]))]}, ]
550 for i in range(len(groups)):
552 label = graph[u"group-names"][i]
553 except (IndexError, KeyError):
554 label = f"Group {i + 1}"
558 args=[{u"visible": show[i]}, ]
561 layout[u"updatemenus"] = list([
575 f"{spec.cpta[u'output-file']}/"
576 f"{graph[u'output-file-name']}.html"
578 name_file = name_file.format(core=core, test_type=ttype)
580 logging.info(f" Writing the file {name_file}")
581 plpl = plgo.Figure(data=traces, layout=layout)
589 except plerr.PlotlyEmptyDataError:
590 logging.warning(u"No data for the plot. Skipped.")
594 u"job_name": job_name,
595 u"csv_table": csv_tbl,
603 for job, builds in spec.input.items():
604 if builds_dict.get(job, None) is None:
605 builds_dict[job] = list()
607 if build[u"status"] not in (u"failed", u"not found", u"removed",
609 builds_dict[job].append(str(build[u"build"]))
611 # Create "build ID": "date" dict:
613 tb_tbl = spec.environment.get(u"testbeds", None)
614 for job_name, job_data in builds_dict.items():
615 if build_info.get(job_name, None) is None:
616 build_info[job_name] = OrderedDict()
617 for build in job_data:
619 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
621 testbed = tb_tbl.get(tb_ip, u"")
622 build_info[job_name][build] = (
623 input_data.metadata(job_name, build).get(u"generated", u""),
624 input_data.metadata(job_name, build).get(u"version", u""),
628 anomaly_classifications = dict()
630 # Create the table header:
632 for job_name in builds_dict:
633 if csv_tables.get(job_name, None) is None:
634 csv_tables[job_name] = list()
635 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
636 csv_tables[job_name].append(header)
637 build_dates = [x[0] for x in build_info[job_name].values()]
638 header = f"Build Date:,{u','.join(build_dates)}\n"
639 csv_tables[job_name].append(header)
640 versions = [x[1] for x in build_info[job_name].values()]
641 header = f"Version:,{u','.join(versions)}\n"
642 csv_tables[job_name].append(header)
644 for chart in spec.cpta[u"plots"]:
645 results = _generate_chart(chart)
649 for result in results:
650 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
652 if anomaly_classifications.get(result[u"job_name"], None) is None:
653 anomaly_classifications[result[u"job_name"]] = dict()
654 anomaly_classifications[result[u"job_name"]].\
655 update(result[u"results"])
658 for job_name, csv_table in csv_tables.items():
659 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
660 with open(f"{file_name}.csv", u"wt") as file_handler:
661 file_handler.writelines(csv_table)
664 with open(f"{file_name}.csv", u"rt") as csv_file:
665 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
667 for row in csv_content:
668 if txt_table is None:
669 txt_table = prettytable.PrettyTable(row)
672 for idx, item in enumerate(row):
674 row[idx] = str(round(float(item) / 1000000, 2))
678 txt_table.add_row(row)
679 # PrettyTable raises Exception
680 except Exception as err:
682 f"Error occurred while generating TXT table:\n{err}"
685 txt_table.align[u"Build Number:"] = u"l"
686 with open(f"{file_name}.txt", u"wt") as txt_file:
687 txt_file.write(str(txt_table))
690 if anomaly_classifications:
692 for job_name, job_data in anomaly_classifications.items():
694 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
695 with open(file_name, u'w') as txt_file:
696 for test_name, classification in job_data.items():
697 if classification == u"regression":
698 txt_file.write(test_name + u'\n')
699 if classification in (u"regression", u"outlier"):
702 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
703 with open(file_name, u'w') as txt_file:
704 for test_name, classification in job_data.items():
705 if classification == u"progression":
706 txt_file.write(test_name + u'\n')
710 logging.info(f"Partial results: {anomaly_classifications}")
711 logging.info(f"Result: {result}")