1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
20 from collections import OrderedDict
21 from datetime import datetime
22 from copy import deepcopy
25 import plotly.offline as ploff
26 import plotly.graph_objs as plgo
27 import plotly.exceptions as plerr
29 from pal_utils import archive_input_data, execute_command, classify_anomalies
32 # Command to build the html format of the report
33 HTML_BUILDER = u'sphinx-build -v -c conf_cpta -a ' \
36 u'-D version="{date}" ' \
40 # .css file for the html format of the report
41 THEME_OVERRIDES = u"""/* override table width restrictions */
43 max-width: 1200px !important;
45 .rst-content blockquote {
51 display: inline-block;
59 .wy-menu-vertical li.current a {
61 border-right: solid 1px #c9c9c9;
64 .wy-menu-vertical li.toctree-l2.current > a {
68 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
73 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
78 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
85 border-top-width: medium;
86 border-bottom-width: medium;
87 border-top-style: none;
88 border-bottom-style: none;
89 border-top-color: currentcolor;
90 border-bottom-color: currentcolor;
91 padding-left: 2em -4px;
96 u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
97 u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
98 u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
99 u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
100 u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
101 u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey",
102 u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
103 u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
104 u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
105 u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
106 u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
107 u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey"
111 def generate_cpta(spec, data):
112 """Generate all formats and versions of the Continuous Performance Trending
115 :param spec: Specification read from the specification file.
116 :param data: Full data set.
117 :type spec: Specification
118 :type data: InputData
121 logging.info(u"Generating the Continuous Performance Trending and Analysis "
124 ret_code = _generate_all_charts(spec, data)
126 cmd = HTML_BUILDER.format(
127 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
128 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
129 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
132 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
134 css_file.write(THEME_OVERRIDES)
136 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
138 css_file.write(THEME_OVERRIDES)
140 if spec.configuration.get(u"archive-inputs", True):
141 archive_input_data(spec)
143 logging.info(u"Done.")
148 def _generate_trending_traces(in_data, job_name, build_info,
149 show_trend_line=True, name=u"", color=u""):
150 """Generate the trending traces:
152 - outliers, regress, progress
153 - average of normal samples (trending line)
155 :param in_data: Full data set.
156 :param job_name: The name of job which generated the data.
157 :param build_info: Information about the builds.
158 :param show_trend_line: Show moving median (trending plot).
159 :param name: Name of the plot
160 :param color: Name of the color for the plot.
161 :type in_data: OrderedDict
163 :type build_info: dict
164 :type show_trend_line: bool
167 :returns: Generated traces (list) and the evaluated result.
168 :rtype: tuple(traces, result)
171 data_x = list(in_data.keys())
174 data_y_stdev = list()
175 for item in in_data.values():
176 data_y_pps.append(float(item[u"receive-rate"]))
177 data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
178 data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
182 for index, key in enumerate(data_x):
184 date = build_info[job_name][str_key][0]
185 hover_str = (u"date: {date}<br>"
186 u"value [Mpps]: {value:.3f}<br>"
187 u"stdev [Mpps]: {stdev:.3f}<br>"
188 u"{sut}-ref: {build}<br>"
189 u"csit-ref: mrr-{period}-build-{build_nr}<br>"
190 u"testbed: {testbed}")
191 if u"dpdk" in job_name:
192 hover_text.append(hover_str.format(
194 value=data_y_mpps[index],
195 stdev=data_y_stdev[index],
197 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
200 testbed=build_info[job_name][str_key][2]))
201 elif u"vpp" in job_name:
202 hover_text.append(hover_str.format(
204 value=data_y_mpps[index],
205 stdev=data_y_stdev[index],
207 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
210 testbed=build_info[job_name][str_key][2]))
212 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
213 int(date[9:11]), int(date[12:])))
215 data_pd = OrderedDict()
216 for key, value in zip(xaxis, data_y_pps):
219 anomaly_classification, avgs_pps = classify_anomalies(data_pd)
220 avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
222 anomalies = OrderedDict()
223 anomalies_colors = list()
224 anomalies_avgs = list()
230 if anomaly_classification:
231 for index, (key, value) in enumerate(data_pd.items()):
232 if anomaly_classification[index] in (u"regression", u"progression"):
233 anomalies[key] = value / 1e6
234 anomalies_colors.append(
235 anomaly_color[anomaly_classification[index]])
236 anomalies_avgs.append(avgs_mpps[index])
237 anomalies_colors.extend([0.0, 0.5, 1.0])
241 trace_samples = plgo.Scatter(
254 u"symbol": u"circle",
257 hoverinfo=u"text+name"
259 traces = [trace_samples, ]
262 trace_trend = plgo.Scatter(
274 text=[f"trend [Mpps]: {avg:.3f}" for avg in avgs_mpps],
275 hoverinfo=u"text+name"
277 traces.append(trace_trend)
279 trace_anomalies = plgo.Scatter(
280 x=list(anomalies.keys()),
286 name=f"{name}-anomalies",
289 u"symbol": u"circle-open",
290 u"color": anomalies_colors,
306 u"title": u"Circles Marking Data Classification",
307 u"titleside": u"right",
311 u"tickmode": u"array",
312 u"tickvals": [0.167, 0.500, 0.833],
313 u"ticktext": [u"Regression", u"Normal", u"Progression"],
321 traces.append(trace_anomalies)
323 if anomaly_classification:
324 return traces, anomaly_classification[-1]
329 def _generate_all_charts(spec, input_data):
330 """Generate all charts specified in the specification file.
332 :param spec: Specification.
333 :param input_data: Full data set.
334 :type spec: Specification
335 :type input_data: InputData
338 def _generate_chart(graph):
339 """Generates the chart.
341 :param graph: The graph to be generated
343 :returns: Dictionary with the job name, csv table with results and
344 list of tests classification results.
348 logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
350 job_name = list(graph[u"data"].keys())[0]
357 f" Creating the data set for the {graph.get(u'type', u'')} "
358 f"{graph.get(u'title', u'')}."
361 if graph.get(u"include", None):
362 data = input_data.filter_tests_by_name(
364 params=[u"type", u"result", u"tags"],
365 continue_on_error=True
368 data = input_data.filter_data(
370 params=[u"type", u"result", u"tags"],
371 continue_on_error=True)
373 if data is None or data.empty:
374 logging.error(u"No data.")
379 for job, job_data in data.items():
382 for index, bld in job_data.items():
383 for test_name, test in bld.items():
384 if chart_data.get(test_name, None) is None:
385 chart_data[test_name] = OrderedDict()
387 chart_data[test_name][int(index)] = {
388 u"receive-rate": test[u"result"][u"receive-rate"],
389 u"receive-stdev": test[u"result"][u"receive-stdev"]
391 chart_tags[test_name] = test.get(u"tags", None)
392 except (KeyError, TypeError):
395 # Add items to the csv table:
396 for tst_name, tst_data in chart_data.items():
398 for bld in builds_dict[job_name]:
399 itm = tst_data.get(int(bld), dict())
400 # CSIT-1180: Itm will be list, compute stats.
402 tst_lst.append(str(itm.get(u"receive-rate", u"")))
403 except AttributeError:
405 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
410 groups = graph.get(u"groups", None)
417 for tst_name, test_data in chart_data.items():
419 logging.warning(f"No data for the test {tst_name}")
421 if tag not in chart_tags[tst_name]:
424 trace, rslt = _generate_trending_traces(
427 build_info=build_info,
428 name=u'-'.join(tst_name.split(u'.')[-1].
432 logging.error(f"Out of colors: index: "
433 f"{index}, test: {tst_name}")
437 visible.extend([True for _ in range(len(trace))])
441 visibility.append(visible)
443 for tst_name, test_data in chart_data.items():
445 logging.warning(f"No data for the test {tst_name}")
448 trace, rslt = _generate_trending_traces(
451 build_info=build_info,
453 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
457 f"Out of colors: index: {index}, test: {tst_name}"
466 # Generate the chart:
468 layout = deepcopy(graph[u"layout"])
469 except KeyError as err:
470 logging.error(u"Finished with error: No layout defined")
471 logging.error(repr(err))
475 for i in range(len(visibility)):
477 for vis_idx, _ in enumerate(visibility):
478 for _ in range(len(visibility[vis_idx])):
479 visible.append(i == vis_idx)
486 args=[{u"visible": [True for _ in range(len(show[0]))]}, ]
488 for i in range(len(groups)):
490 label = graph[u"group-names"][i]
491 except (IndexError, KeyError):
492 label = f"Group {i + 1}"
496 args=[{u"visible": show[i]}, ]
499 layout[u"updatemenus"] = list([
513 f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}"
514 f"{spec.cpta[u'output-file-type']}")
516 logging.info(f" Writing the file {name_file} ...")
517 plpl = plgo.Figure(data=traces, layout=layout)
519 ploff.plot(plpl, show_link=False, auto_open=False,
521 except plerr.PlotlyEmptyDataError:
522 logging.warning(u"No data for the plot. Skipped.")
524 return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res}
527 for job in spec.input[u"builds"].keys():
528 if builds_dict.get(job, None) is None:
529 builds_dict[job] = list()
530 for build in spec.input[u"builds"][job]:
531 status = build[u"status"]
532 if status not in (u"failed", u"not found", u"removed", None):
533 builds_dict[job].append(str(build[u"build"]))
535 # Create "build ID": "date" dict:
537 tb_tbl = spec.environment.get(u"testbeds", None)
538 for job_name, job_data in builds_dict.items():
539 if build_info.get(job_name, None) is None:
540 build_info[job_name] = OrderedDict()
541 for build in job_data:
543 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
545 testbed = tb_tbl.get(tb_ip, u"")
546 build_info[job_name][build] = (
547 input_data.metadata(job_name, build).get(u"generated", u""),
548 input_data.metadata(job_name, build).get(u"version", u""),
552 anomaly_classifications = dict()
554 # Create the table header:
556 for job_name in builds_dict:
557 if csv_tables.get(job_name, None) is None:
558 csv_tables[job_name] = list()
559 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
560 csv_tables[job_name].append(header)
561 build_dates = [x[0] for x in build_info[job_name].values()]
562 header = f"Build Date:,{u','.join(build_dates)}\n"
563 csv_tables[job_name].append(header)
564 versions = [x[1] for x in build_info[job_name].values()]
565 header = f"Version:,{u','.join(versions)}\n"
566 csv_tables[job_name].append(header)
568 for chart in spec.cpta[u"plots"]:
569 result = _generate_chart(chart)
573 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
575 if anomaly_classifications.get(result[u"job_name"], None) is None:
576 anomaly_classifications[result[u"job_name"]] = dict()
577 anomaly_classifications[result[u"job_name"]].update(result[u"results"])
580 for job_name, csv_table in csv_tables.items():
581 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
582 with open(f"{file_name}.csv", u"wt") as file_handler:
583 file_handler.writelines(csv_table)
586 with open(f"{file_name}.csv", u"rt") as csv_file:
587 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
589 for row in csv_content:
590 if txt_table is None:
591 txt_table = prettytable.PrettyTable(row)
594 for idx, item in enumerate(row):
596 row[idx] = str(round(float(item) / 1000000, 2))
600 txt_table.add_row(row)
601 # PrettyTable raises Exception
602 except Exception as err:
604 f"Error occurred while generating TXT table:\n{err}"
607 txt_table.align[u"Build Number:"] = u"l"
608 with open(f"{file_name}.txt", u"wt") as txt_file:
609 txt_file.write(str(txt_table))
612 if anomaly_classifications:
614 for job_name, job_data in anomaly_classifications.items():
616 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
617 with open(file_name, u'w') as txt_file:
618 for test_name, classification in job_data.items():
619 if classification == u"regression":
620 txt_file.write(test_name + u'\n')
621 if classification in (u"regression", u"outlier"):
624 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
625 with open(file_name, u'w') as txt_file:
626 for test_name, classification in job_data.items():
627 if classification == u"progression":
628 txt_file.write(test_name + u'\n')
632 logging.info(f"Partial results: {anomaly_classifications}")
633 logging.info(f"Result: {result}")