1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
20 from collections import OrderedDict
21 from datetime import datetime
22 from copy import deepcopy
25 import plotly.offline as ploff
26 import plotly.graph_objs as plgo
27 import plotly.exceptions as plerr
29 from pal_utils import archive_input_data, execute_command, classify_anomalies
32 # Command to build the html format of the report
33 HTML_BUILDER = u'sphinx-build -v -c conf_cpta -a ' \
36 u'-D version="{date}" ' \
40 # .css file for the html format of the report
41 THEME_OVERRIDES = u"""/* override table width restrictions */
43 max-width: 1200px !important;
45 .rst-content blockquote {
51 display: inline-block;
59 .wy-menu-vertical li.current a {
61 border-right: solid 1px #c9c9c9;
64 .wy-menu-vertical li.toctree-l2.current > a {
68 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
73 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
78 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
85 border-top-width: medium;
86 border-bottom-width: medium;
87 border-top-style: none;
88 border-bottom-style: none;
89 border-top-color: currentcolor;
90 border-bottom-color: currentcolor;
91 padding-left: 2em -4px;
122 def generate_cpta(spec, data):
123 """Generate all formats and versions of the Continuous Performance Trending
126 :param spec: Specification read from the specification file.
127 :param data: Full data set.
128 :type spec: Specification
129 :type data: InputData
132 logging.info(u"Generating the Continuous Performance Trending and Analysis "
135 ret_code = _generate_all_charts(spec, data)
137 cmd = HTML_BUILDER.format(
138 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
139 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
140 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
143 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
145 css_file.write(THEME_OVERRIDES)
147 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
149 css_file.write(THEME_OVERRIDES)
151 if spec.configuration.get(u"archive-inputs", True):
152 archive_input_data(spec)
154 logging.info(u"Done.")
159 def _generate_trending_traces(in_data, job_name, build_info,
160 name=u"", color=u"", incl_tests=u"MRR"):
161 """Generate the trending traces:
163 - outliers, regress, progress
164 - average of normal samples (trending line)
166 :param in_data: Full data set.
167 :param job_name: The name of job which generated the data.
168 :param build_info: Information about the builds.
169 :param name: Name of the plot
170 :param color: Name of the color for the plot.
171 :param incl_tests: Included tests, accepted values: MRR, NDR, PDR
172 :type in_data: OrderedDict
174 :type build_info: dict
177 :type incl_tests: str
178 :returns: Generated traces (list) and the evaluated result.
179 :rtype: tuple(traces, result)
182 if incl_tests not in (u"MRR", u"NDR", u"PDR"):
185 data_x = list(in_data.keys())
188 data_y_stdev = list()
189 for item in in_data.values():
190 data_y_pps.append(float(item[u"receive-rate"]))
191 data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
192 data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
196 for index, key in enumerate(data_x):
198 date = build_info[job_name][str_key][0]
199 hover_str = (u"date: {date}<br>"
200 u"{property} [Mpps]: {value:.3f}<br>"
202 u"{sut}-ref: {build}<br>"
203 u"csit-ref: {test}-{period}-build-{build_nr}<br>"
204 u"testbed: {testbed}")
205 if incl_tests == u"MRR":
206 hover_str = hover_str.replace(
207 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
210 hover_str = hover_str.replace(u"<stdev>", u"")
211 if u"dpdk" in job_name:
212 hover_text.append(hover_str.format(
214 property=u"average" if incl_tests == u"MRR" else u"throughput",
215 value=data_y_mpps[index],
217 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
218 test=incl_tests.lower(),
221 testbed=build_info[job_name][str_key][2]))
222 elif u"vpp" in job_name:
223 hover_text.append(hover_str.format(
225 property=u"average" if incl_tests == u"MRR" else u"throughput",
226 value=data_y_mpps[index],
228 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
229 test=incl_tests.lower(),
230 period=u"daily" if incl_tests == u"MRR" else u"weekly",
232 testbed=build_info[job_name][str_key][2]))
234 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
235 int(date[9:11]), int(date[12:])))
237 data_pd = OrderedDict()
238 for key, value in zip(xaxis, data_y_pps):
241 anomaly_classification, avgs_pps, stdevs_pps = classify_anomalies(data_pd)
242 avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
243 stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps]
245 anomalies = OrderedDict()
246 anomalies_colors = list()
247 anomalies_avgs = list()
253 if anomaly_classification:
254 for index, (key, value) in enumerate(data_pd.items()):
255 if anomaly_classification[index] in (u"regression", u"progression"):
256 anomalies[key] = value / 1e6
257 anomalies_colors.append(
258 anomaly_color[anomaly_classification[index]])
259 anomalies_avgs.append(avgs_mpps[index])
260 anomalies_colors.extend([0.0, 0.5, 1.0])
264 trace_samples = plgo.Scatter(
277 u"symbol": u"circle",
280 hoverinfo=u"text+name"
282 traces = [trace_samples, ]
284 trend_hover_text = list()
285 for idx in range(len(data_x)):
287 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
288 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
290 trend_hover_text.append(trend_hover_str)
292 trace_trend = plgo.Scatter(
304 text=trend_hover_text,
305 hoverinfo=u"text+name"
307 traces.append(trace_trend)
309 trace_anomalies = plgo.Scatter(
310 x=list(anomalies.keys()),
316 name=f"{name}-anomalies",
319 u"symbol": u"circle-open",
320 u"color": anomalies_colors,
336 u"title": u"Circles Marking Data Classification",
337 u"titleside": u"right",
341 u"tickmode": u"array",
342 u"tickvals": [0.167, 0.500, 0.833],
343 u"ticktext": [u"Regression", u"Normal", u"Progression"],
351 traces.append(trace_anomalies)
353 if anomaly_classification:
354 return traces, anomaly_classification[-1]
359 def _generate_all_charts(spec, input_data):
360 """Generate all charts specified in the specification file.
362 :param spec: Specification.
363 :param input_data: Full data set.
364 :type spec: Specification
365 :type input_data: InputData
368 def _generate_chart(graph):
369 """Generates the chart.
371 :param graph: The graph to be generated
373 :returns: Dictionary with the job name, csv table with results and
374 list of tests classification results.
378 logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
380 incl_tests = graph.get(u"include-tests", u"MRR")
382 job_name = list(graph[u"data"].keys())[0]
389 f" Creating the data set for the {graph.get(u'type', u'')} "
390 f"{graph.get(u'title', u'')}."
393 if graph.get(u"include", None):
394 data = input_data.filter_tests_by_name(
396 params=[u"type", u"result", u"throughput", u"tags"],
397 continue_on_error=True
400 data = input_data.filter_data(
402 params=[u"type", u"result", u"throughput", u"tags"],
403 continue_on_error=True)
405 if data is None or data.empty:
406 logging.error(u"No data.")
411 for job, job_data in data.items():
414 for index, bld in job_data.items():
415 for test_name, test in bld.items():
416 if chart_data.get(test_name, None) is None:
417 chart_data[test_name] = OrderedDict()
419 if incl_tests == u"MRR":
420 rate = test[u"result"][u"receive-rate"]
421 stdev = test[u"result"][u"receive-stdev"]
422 elif incl_tests == u"NDR":
423 rate = test[u"throughput"][u"NDR"][u"LOWER"]
424 stdev = float(u"nan")
425 elif incl_tests == u"PDR":
426 rate = test[u"throughput"][u"PDR"][u"LOWER"]
427 stdev = float(u"nan")
430 chart_data[test_name][int(index)] = {
431 u"receive-rate": rate,
432 u"receive-stdev": stdev
434 chart_tags[test_name] = test.get(u"tags", None)
435 except (KeyError, TypeError):
438 # Add items to the csv table:
439 for tst_name, tst_data in chart_data.items():
441 for bld in builds_dict[job_name]:
442 itm = tst_data.get(int(bld), dict())
443 # CSIT-1180: Itm will be list, compute stats.
445 tst_lst.append(str(itm.get(u"receive-rate", u"")))
446 except AttributeError:
448 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
453 groups = graph.get(u"groups", None)
460 for tst_name, test_data in chart_data.items():
461 logging.info(tst_name)
462 logging.info(tst_data)
463 logging.info(u"---------------------------------------")
465 logging.warning(f"No data for the test {tst_name}")
467 if tag not in chart_tags[tst_name]:
470 trace, rslt = _generate_trending_traces(
473 build_info=build_info,
474 name=u'-'.join(tst_name.split(u'.')[-1].
477 incl_tests=incl_tests
480 logging.error(f"Out of colors: index: "
481 f"{index}, test: {tst_name}")
485 visible.extend([True for _ in range(len(trace))])
489 visibility.append(visible)
491 for tst_name, test_data in chart_data.items():
493 logging.warning(f"No data for the test {tst_name}")
496 trace, rslt = _generate_trending_traces(
499 build_info=build_info,
501 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
503 incl_tests=incl_tests
507 f"Out of colors: index: {index}, test: {tst_name}"
516 # Generate the chart:
518 layout = deepcopy(graph[u"layout"])
519 except KeyError as err:
520 logging.error(u"Finished with error: No layout defined")
521 logging.error(repr(err))
525 for i in range(len(visibility)):
527 for vis_idx, _ in enumerate(visibility):
528 for _ in range(len(visibility[vis_idx])):
529 visible.append(i == vis_idx)
536 args=[{u"visible": [True for _ in range(len(show[0]))]}, ]
538 for i in range(len(groups)):
540 label = graph[u"group-names"][i]
541 except (IndexError, KeyError):
542 label = f"Group {i + 1}"
546 args=[{u"visible": show[i]}, ]
549 layout[u"updatemenus"] = list([
563 f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}"
564 f"{spec.cpta[u'output-file-type']}")
566 logging.info(f" Writing the file {name_file} ...")
567 plpl = plgo.Figure(data=traces, layout=layout)
569 ploff.plot(plpl, show_link=False, auto_open=False,
571 except plerr.PlotlyEmptyDataError:
572 logging.warning(u"No data for the plot. Skipped.")
574 return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res}
577 for job in spec.input[u"builds"].keys():
578 if builds_dict.get(job, None) is None:
579 builds_dict[job] = list()
580 for build in spec.input[u"builds"][job]:
581 status = build[u"status"]
582 if status not in (u"failed", u"not found", u"removed", None):
583 builds_dict[job].append(str(build[u"build"]))
585 # Create "build ID": "date" dict:
587 tb_tbl = spec.environment.get(u"testbeds", None)
588 for job_name, job_data in builds_dict.items():
589 if build_info.get(job_name, None) is None:
590 build_info[job_name] = OrderedDict()
591 for build in job_data:
593 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
595 testbed = tb_tbl.get(tb_ip, u"")
596 build_info[job_name][build] = (
597 input_data.metadata(job_name, build).get(u"generated", u""),
598 input_data.metadata(job_name, build).get(u"version", u""),
602 anomaly_classifications = dict()
604 # Create the table header:
606 for job_name in builds_dict:
607 if csv_tables.get(job_name, None) is None:
608 csv_tables[job_name] = list()
609 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
610 csv_tables[job_name].append(header)
611 build_dates = [x[0] for x in build_info[job_name].values()]
612 header = f"Build Date:,{u','.join(build_dates)}\n"
613 csv_tables[job_name].append(header)
614 versions = [x[1] for x in build_info[job_name].values()]
615 header = f"Version:,{u','.join(versions)}\n"
616 csv_tables[job_name].append(header)
618 for chart in spec.cpta[u"plots"]:
619 result = _generate_chart(chart)
623 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
625 if anomaly_classifications.get(result[u"job_name"], None) is None:
626 anomaly_classifications[result[u"job_name"]] = dict()
627 anomaly_classifications[result[u"job_name"]].update(result[u"results"])
630 for job_name, csv_table in csv_tables.items():
631 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
632 with open(f"{file_name}.csv", u"wt") as file_handler:
633 file_handler.writelines(csv_table)
636 with open(f"{file_name}.csv", u"rt") as csv_file:
637 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
639 for row in csv_content:
640 if txt_table is None:
641 txt_table = prettytable.PrettyTable(row)
644 for idx, item in enumerate(row):
646 row[idx] = str(round(float(item) / 1000000, 2))
650 txt_table.add_row(row)
651 # PrettyTable raises Exception
652 except Exception as err:
654 f"Error occurred while generating TXT table:\n{err}"
657 txt_table.align[u"Build Number:"] = u"l"
658 with open(f"{file_name}.txt", u"wt") as txt_file:
659 txt_file.write(str(txt_table))
662 if anomaly_classifications:
664 for job_name, job_data in anomaly_classifications.items():
666 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
667 with open(file_name, u'w') as txt_file:
668 for test_name, classification in job_data.items():
669 if classification == u"regression":
670 txt_file.write(test_name + u'\n')
671 if classification in (u"regression", u"outlier"):
674 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
675 with open(file_name, u'w') as txt_file:
676 for test_name, classification in job_data.items():
677 if classification == u"progression":
678 txt_file.write(test_name + u'\n')
682 logging.info(f"Partial results: {anomaly_classifications}")
683 logging.info(f"Result: {result}")