1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
20 from collections import OrderedDict
21 from datetime import datetime
22 from copy import deepcopy
25 import plotly.offline as ploff
26 import plotly.graph_objs as plgo
27 import plotly.exceptions as plerr
29 from pal_utils import archive_input_data, execute_command, classify_anomalies
32 # Command to build the html format of the report
33 HTML_BUILDER = u'sphinx-build -v -c conf_cpta -a ' \
36 u'-D version="{date}" ' \
40 # .css file for the html format of the report
41 THEME_OVERRIDES = u"""/* override table width restrictions */
43 max-width: 1200px !important;
45 .rst-content blockquote {
51 display: inline-block;
59 .wy-menu-vertical li.current a {
61 border-right: solid 1px #c9c9c9;
64 .wy-menu-vertical li.toctree-l2.current > a {
68 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
73 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
78 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
85 border-top-width: medium;
86 border-bottom-width: medium;
87 border-top-style: none;
88 border-bottom-style: none;
89 border-top-color: currentcolor;
90 border-bottom-color: currentcolor;
91 padding-left: 2em -4px;
96 u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
97 u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
98 u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
99 u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
100 u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
101 u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey",
102 u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
103 u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
104 u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
105 u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
106 u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
107 u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey"
111 def generate_cpta(spec, data):
112 """Generate all formats and versions of the Continuous Performance Trending
115 :param spec: Specification read from the specification file.
116 :param data: Full data set.
117 :type spec: Specification
118 :type data: InputData
121 logging.info(u"Generating the Continuous Performance Trending and Analysis "
124 ret_code = _generate_all_charts(spec, data)
126 cmd = HTML_BUILDER.format(
127 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
128 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
129 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
132 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
134 css_file.write(THEME_OVERRIDES)
136 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
138 css_file.write(THEME_OVERRIDES)
140 if spec.configuration.get(u"archive-inputs", True):
141 archive_input_data(spec)
143 logging.info(u"Done.")
148 def _generate_trending_traces(in_data, job_name, build_info,
149 name=u"", color=u"", incl_tests=u"MRR"):
150 """Generate the trending traces:
152 - outliers, regress, progress
153 - average of normal samples (trending line)
155 :param in_data: Full data set.
156 :param job_name: The name of job which generated the data.
157 :param build_info: Information about the builds.
158 :param name: Name of the plot
159 :param color: Name of the color for the plot.
160 :param incl_tests: Included tests, accepted values: MRR, NDR, PDR
161 :type in_data: OrderedDict
163 :type build_info: dict
166 :type incl_tests: str
167 :returns: Generated traces (list) and the evaluated result.
168 :rtype: tuple(traces, result)
171 if incl_tests not in (u"MRR", u"NDR", u"PDR"):
174 data_x = list(in_data.keys())
177 data_y_stdev = list()
178 for item in in_data.values():
179 data_y_pps.append(float(item[u"receive-rate"]))
180 data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
181 data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
185 for index, key in enumerate(data_x):
187 date = build_info[job_name][str_key][0]
188 hover_str = (u"date: {date}<br>"
189 u"{property} [Mpps]: {value:.3f}<br>"
191 u"{sut}-ref: {build}<br>"
192 u"csit-ref: {test}-{period}-build-{build_nr}<br>"
193 u"testbed: {testbed}")
194 if incl_tests == u"MRR":
195 hover_str = hover_str.replace(
196 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
199 hover_str = hover_str.replace(u"<stdev>", u"")
200 if u"dpdk" in job_name:
201 hover_text.append(hover_str.format(
203 property=u"average" if incl_tests == u"MRR" else u"throughput",
204 value=data_y_mpps[index],
206 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
207 test=incl_tests.lower(),
210 testbed=build_info[job_name][str_key][2]))
211 elif u"vpp" in job_name:
212 hover_text.append(hover_str.format(
214 property=u"average" if incl_tests == u"MRR" else u"throughput",
215 value=data_y_mpps[index],
217 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
218 test=incl_tests.lower(),
219 period=u"daily" if incl_tests == u"MRR" else u"weekly",
221 testbed=build_info[job_name][str_key][2]))
223 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
224 int(date[9:11]), int(date[12:])))
226 data_pd = OrderedDict()
227 for key, value in zip(xaxis, data_y_pps):
230 anomaly_classification, avgs_pps, stdevs_pps = classify_anomalies(data_pd)
231 avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
232 stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps]
234 anomalies = OrderedDict()
235 anomalies_colors = list()
236 anomalies_avgs = list()
242 if anomaly_classification:
243 for index, (key, value) in enumerate(data_pd.items()):
244 if anomaly_classification[index] in (u"regression", u"progression"):
245 anomalies[key] = value / 1e6
246 anomalies_colors.append(
247 anomaly_color[anomaly_classification[index]])
248 anomalies_avgs.append(avgs_mpps[index])
249 anomalies_colors.extend([0.0, 0.5, 1.0])
253 trace_samples = plgo.Scatter(
266 u"symbol": u"circle",
269 hoverinfo=u"text+name"
271 traces = [trace_samples, ]
273 trend_hover_text = list()
274 for idx in range(len(data_x)):
276 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
277 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
279 trend_hover_text.append(trend_hover_str)
281 trace_trend = plgo.Scatter(
293 text=trend_hover_text,
294 hoverinfo=u"text+name"
296 traces.append(trace_trend)
298 trace_anomalies = plgo.Scatter(
299 x=list(anomalies.keys()),
305 name=f"{name}-anomalies",
308 u"symbol": u"circle-open",
309 u"color": anomalies_colors,
325 u"title": u"Circles Marking Data Classification",
326 u"titleside": u"right",
330 u"tickmode": u"array",
331 u"tickvals": [0.167, 0.500, 0.833],
332 u"ticktext": [u"Regression", u"Normal", u"Progression"],
340 traces.append(trace_anomalies)
342 if anomaly_classification:
343 return traces, anomaly_classification[-1]
348 def _generate_all_charts(spec, input_data):
349 """Generate all charts specified in the specification file.
351 :param spec: Specification.
352 :param input_data: Full data set.
353 :type spec: Specification
354 :type input_data: InputData
357 def _generate_chart(graph):
358 """Generates the chart.
360 :param graph: The graph to be generated
362 :returns: Dictionary with the job name, csv table with results and
363 list of tests classification results.
367 logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
369 incl_tests = graph.get(u"include-tests", u"MRR")
371 job_name = list(graph[u"data"].keys())[0]
378 f" Creating the data set for the {graph.get(u'type', u'')} "
379 f"{graph.get(u'title', u'')}."
382 if graph.get(u"include", None):
383 data = input_data.filter_tests_by_name(
385 params=[u"type", u"result", u"throughput", u"tags"],
386 continue_on_error=True
389 data = input_data.filter_data(
391 params=[u"type", u"result", u"throughput", u"tags"],
392 continue_on_error=True)
394 if data is None or data.empty:
395 logging.error(u"No data.")
400 for job, job_data in data.items():
403 for index, bld in job_data.items():
404 for test_name, test in bld.items():
405 if chart_data.get(test_name, None) is None:
406 chart_data[test_name] = OrderedDict()
408 if incl_tests == u"MRR":
409 rate = test[u"result"][u"receive-rate"]
410 stdev = test[u"result"][u"receive-stdev"]
411 elif incl_tests == u"NDR":
412 rate = test[u"throughput"][u"NDR"][u"LOWER"]
413 stdev = float(u"nan")
414 elif incl_tests == u"PDR":
415 rate = test[u"throughput"][u"PDR"][u"LOWER"]
416 stdev = float(u"nan")
419 chart_data[test_name][int(index)] = {
420 u"receive-rate": rate,
421 u"receive-stdev": stdev
423 chart_tags[test_name] = test.get(u"tags", None)
424 except (KeyError, TypeError):
427 # Add items to the csv table:
428 for tst_name, tst_data in chart_data.items():
430 for bld in builds_dict[job_name]:
431 itm = tst_data.get(int(bld), dict())
432 # CSIT-1180: Itm will be list, compute stats.
434 tst_lst.append(str(itm.get(u"receive-rate", u"")))
435 except AttributeError:
437 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
442 groups = graph.get(u"groups", None)
449 for tst_name, test_data in chart_data.items():
451 logging.warning(f"No data for the test {tst_name}")
453 if tag not in chart_tags[tst_name]:
456 trace, rslt = _generate_trending_traces(
459 build_info=build_info,
460 name=u'-'.join(tst_name.split(u'.')[-1].
463 incl_tests=incl_tests
466 logging.error(f"Out of colors: index: "
467 f"{index}, test: {tst_name}")
471 visible.extend([True for _ in range(len(trace))])
475 visibility.append(visible)
477 for tst_name, test_data in chart_data.items():
479 logging.warning(f"No data for the test {tst_name}")
482 trace, rslt = _generate_trending_traces(
485 build_info=build_info,
487 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
489 incl_tests=incl_tests
493 f"Out of colors: index: {index}, test: {tst_name}"
502 # Generate the chart:
504 layout = deepcopy(graph[u"layout"])
505 except KeyError as err:
506 logging.error(u"Finished with error: No layout defined")
507 logging.error(repr(err))
511 for i in range(len(visibility)):
513 for vis_idx, _ in enumerate(visibility):
514 for _ in range(len(visibility[vis_idx])):
515 visible.append(i == vis_idx)
522 args=[{u"visible": [True for _ in range(len(show[0]))]}, ]
524 for i in range(len(groups)):
526 label = graph[u"group-names"][i]
527 except (IndexError, KeyError):
528 label = f"Group {i + 1}"
532 args=[{u"visible": show[i]}, ]
535 layout[u"updatemenus"] = list([
549 f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}"
550 f"{spec.cpta[u'output-file-type']}")
552 logging.info(f" Writing the file {name_file} ...")
553 plpl = plgo.Figure(data=traces, layout=layout)
555 ploff.plot(plpl, show_link=False, auto_open=False,
557 except plerr.PlotlyEmptyDataError:
558 logging.warning(u"No data for the plot. Skipped.")
560 return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res}
563 for job in spec.input[u"builds"].keys():
564 if builds_dict.get(job, None) is None:
565 builds_dict[job] = list()
566 for build in spec.input[u"builds"][job]:
567 status = build[u"status"]
568 if status not in (u"failed", u"not found", u"removed", None):
569 builds_dict[job].append(str(build[u"build"]))
571 # Create "build ID": "date" dict:
573 tb_tbl = spec.environment.get(u"testbeds", None)
574 for job_name, job_data in builds_dict.items():
575 if build_info.get(job_name, None) is None:
576 build_info[job_name] = OrderedDict()
577 for build in job_data:
579 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
581 testbed = tb_tbl.get(tb_ip, u"")
582 build_info[job_name][build] = (
583 input_data.metadata(job_name, build).get(u"generated", u""),
584 input_data.metadata(job_name, build).get(u"version", u""),
588 anomaly_classifications = dict()
590 # Create the table header:
592 for job_name in builds_dict:
593 if csv_tables.get(job_name, None) is None:
594 csv_tables[job_name] = list()
595 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
596 csv_tables[job_name].append(header)
597 build_dates = [x[0] for x in build_info[job_name].values()]
598 header = f"Build Date:,{u','.join(build_dates)}\n"
599 csv_tables[job_name].append(header)
600 versions = [x[1] for x in build_info[job_name].values()]
601 header = f"Version:,{u','.join(versions)}\n"
602 csv_tables[job_name].append(header)
604 for chart in spec.cpta[u"plots"]:
605 result = _generate_chart(chart)
609 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
611 if anomaly_classifications.get(result[u"job_name"], None) is None:
612 anomaly_classifications[result[u"job_name"]] = dict()
613 anomaly_classifications[result[u"job_name"]].update(result[u"results"])
616 for job_name, csv_table in csv_tables.items():
617 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
618 with open(f"{file_name}.csv", u"wt") as file_handler:
619 file_handler.writelines(csv_table)
622 with open(f"{file_name}.csv", u"rt") as csv_file:
623 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
625 for row in csv_content:
626 if txt_table is None:
627 txt_table = prettytable.PrettyTable(row)
630 for idx, item in enumerate(row):
632 row[idx] = str(round(float(item) / 1000000, 2))
636 txt_table.add_row(row)
637 # PrettyTable raises Exception
638 except Exception as err:
640 f"Error occurred while generating TXT table:\n{err}"
643 txt_table.align[u"Build Number:"] = u"l"
644 with open(f"{file_name}.txt", u"wt") as txt_file:
645 txt_file.write(str(txt_table))
648 if anomaly_classifications:
650 for job_name, job_data in anomaly_classifications.items():
652 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
653 with open(file_name, u'w') as txt_file:
654 for test_name, classification in job_data.items():
655 if classification == u"regression":
656 txt_file.write(test_name + u'\n')
657 if classification in (u"regression", u"outlier"):
660 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
661 with open(file_name, u'w') as txt_file:
662 for test_name, classification in job_data.items():
663 if classification == u"progression":
664 txt_file.write(test_name + u'\n')
668 logging.info(f"Partial results: {anomaly_classifications}")
669 logging.info(f"Result: {result}")