1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
20 from collections import OrderedDict
21 from datetime import datetime
22 from copy import deepcopy
25 import plotly.offline as ploff
26 import plotly.graph_objs as plgo
27 import plotly.exceptions as plerr
29 from pal_utils import archive_input_data, execute_command, classify_anomalies
32 # Command to build the html format of the report
33 HTML_BUILDER = u'sphinx-build -v -c conf_cpta -a ' \
36 u'-D version="{date}" ' \
40 # .css file for the html format of the report
41 THEME_OVERRIDES = u"""/* override table width restrictions */
43 max-width: 1200px !important;
45 .rst-content blockquote {
51 display: inline-block;
59 .wy-menu-vertical li.current a {
61 border-right: solid 1px #c9c9c9;
64 .wy-menu-vertical li.toctree-l2.current > a {
68 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
73 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
78 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
85 border-top-width: medium;
86 border-bottom-width: medium;
87 border-top-style: none;
88 border-bottom-style: none;
89 border-top-color: currentcolor;
90 border-bottom-color: currentcolor;
91 padding-left: 2em -4px;
96 u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
97 u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
98 u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
99 u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
100 u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
101 u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey",
102 u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
103 u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
104 u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
105 u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
106 u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
107 u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey"
111 def generate_cpta(spec, data):
112 """Generate all formats and versions of the Continuous Performance Trending
115 :param spec: Specification read from the specification file.
116 :param data: Full data set.
117 :type spec: Specification
118 :type data: InputData
121 logging.info(u"Generating the Continuous Performance Trending and Analysis "
124 ret_code = _generate_all_charts(spec, data)
126 cmd = HTML_BUILDER.format(
127 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
128 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
129 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
132 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
134 css_file.write(THEME_OVERRIDES)
136 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
138 css_file.write(THEME_OVERRIDES)
140 if spec.configuration.get(u"archive-inputs", True):
141 archive_input_data(spec)
143 logging.info(u"Done.")
148 def _generate_trending_traces(in_data, job_name, build_info,
149 show_trend_line=True, name=u"", color=u""):
150 """Generate the trending traces:
152 - outliers, regress, progress
153 - average of normal samples (trending line)
155 :param in_data: Full data set.
156 :param job_name: The name of job which generated the data.
157 :param build_info: Information about the builds.
158 :param show_trend_line: Show moving median (trending plot).
159 :param name: Name of the plot
160 :param color: Name of the color for the plot.
161 :type in_data: OrderedDict
163 :type build_info: dict
164 :type show_trend_line: bool
167 :returns: Generated traces (list) and the evaluated result.
168 :rtype: tuple(traces, result)
171 data_x = list(in_data.keys())
172 data_y = [float(item) / 1e6 for item in in_data.values()]
177 date = build_info[job_name][str(idx)][0]
178 hover_str = (u"date: {date}<br>"
179 u"value: {value:,}<br>"
180 u"{sut}-ref: {build}<br>"
181 u"csit-ref: mrr-{period}-build-{build_nr}<br>"
182 u"testbed: {testbed}")
183 if u"dpdk" in job_name:
184 hover_text.append(hover_str.format(
186 value=int(in_data[idx]),
188 build=build_info[job_name][str(idx)][1].rsplit(u'~', 1)[0],
191 testbed=build_info[job_name][str(idx)][2]))
192 elif u"vpp" in job_name:
193 hover_text.append(hover_str.format(
195 value=int(in_data[idx]),
197 build=build_info[job_name][str(idx)][1].rsplit(u'~', 1)[0],
200 testbed=build_info[job_name][str(idx)][2]))
202 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
203 int(date[9:11]), int(date[12:])))
205 data_pd = OrderedDict()
206 for key, value in zip(xaxis, data_y):
209 anomaly_classification, avgs = classify_anomalies(data_pd)
211 anomalies = OrderedDict()
212 anomalies_colors = list()
213 anomalies_avgs = list()
219 if anomaly_classification:
220 for idx, (key, value) in enumerate(data_pd.items()):
221 if anomaly_classification[idx] in \
222 (u"outlier", u"regression", u"progression"):
223 anomalies[key] = value
224 anomalies_colors.append(
225 anomaly_color[anomaly_classification[idx]])
226 anomalies_avgs.append(avgs[idx])
227 anomalies_colors.extend([0.0, 0.5, 1.0])
231 trace_samples = plgo.Scatter(
244 u"symbol": u"circle",
249 traces = [trace_samples, ]
252 trace_trend = plgo.Scatter(
264 text=[f"trend: {int(avg):,}" for avg in avgs],
265 hoverinfo=u"text+name"
267 traces.append(trace_trend)
269 trace_anomalies = plgo.Scatter(
270 x=list(anomalies.keys()),
276 name=f"{name}-anomalies",
279 u"symbol": u"circle-open",
280 u"color": anomalies_colors,
296 u"title": u"Circles Marking Data Classification",
297 u"titleside": u"right",
301 u"tickmode": u"array",
302 u"tickvals": [0.167, 0.500, 0.833],
303 u"ticktext": [u"Regression", u"Normal", u"Progression"],
311 traces.append(trace_anomalies)
313 if anomaly_classification:
314 return traces, anomaly_classification[-1]
319 def _generate_all_charts(spec, input_data):
320 """Generate all charts specified in the specification file.
322 :param spec: Specification.
323 :param input_data: Full data set.
324 :type spec: Specification
325 :type input_data: InputData
328 def _generate_chart(graph):
329 """Generates the chart.
331 :param graph: The graph to be generated
333 :returns: Dictionary with the job name, csv table with results and
334 list of tests classification results.
341 (u"INFO", f" Generating the chart {graph.get(u'title', u'')} ...")
344 job_name = list(graph[u"data"].keys())[0]
352 f" Creating the data set for the {graph.get(u'type', u'')} "
353 f"{graph.get(u'title', u'')}."
357 if graph.get(u"include", None):
358 data = input_data.filter_tests_by_name(
359 graph, continue_on_error=True
362 data = input_data.filter_data(graph, continue_on_error=True)
364 if data is None or data.empty:
365 logging.error(u"No data.")
370 for job, job_data in data.items():
373 for index, bld in job_data.items():
374 for test_name, test in bld.items():
375 if chart_data.get(test_name, None) is None:
376 chart_data[test_name] = OrderedDict()
378 chart_data[test_name][int(index)] = \
379 test[u"result"][u"receive-rate"]
380 chart_tags[test_name] = test.get(u"tags", None)
381 except (KeyError, TypeError):
384 # Add items to the csv table:
385 for tst_name, tst_data in chart_data.items():
387 for bld in builds_dict[job_name]:
388 itm = tst_data.get(int(bld), u'')
389 # CSIT-1180: Itm will be list, compute stats.
390 tst_lst.append(str(itm))
391 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
396 groups = graph.get(u"groups", None)
403 for tst_name, test_data in chart_data.items():
406 (u"WARNING", f"No data for the test {tst_name}")
409 if tag not in chart_tags[tst_name]:
411 message = f"index: {index}, test: {tst_name}"
413 trace, rslt = _generate_trending_traces(
416 build_info=build_info,
417 name=u'-'.join(tst_name.split(u'.')[-1].
422 (u"ERROR", f"Out of colors: {message}")
424 logging.error(f"Out of colors: {message}")
428 visible.extend([True for _ in range(len(trace))])
432 visibility.append(visible)
434 for tst_name, test_data in chart_data.items():
437 (u"WARNING", f"No data for the test {tst_name}")
440 message = f"index: {index}, test: {tst_name}"
442 trace, rslt = _generate_trending_traces(
445 build_info=build_info,
447 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
450 logs.append((u"ERROR", f"Out of colors: {message}"))
451 logging.error(f"Out of colors: {message}")
459 # Generate the chart:
461 layout = deepcopy(graph[u"layout"])
462 except KeyError as err:
463 logging.error(u"Finished with error: No layout defined")
464 logging.error(repr(err))
468 for i in range(len(visibility)):
470 for vis_idx, _ in enumerate(visibility):
471 for _ in range(len(visibility[vis_idx])):
472 visible.append(i == vis_idx)
479 args=[{u"visible": [True for _ in range(len(show[0]))]}, ]
481 for i in range(len(groups)):
483 label = graph[u"group-names"][i]
484 except (IndexError, KeyError):
485 label = f"Group {i + 1}"
489 args=[{u"visible": show[i]}, ]
492 layout[u"updatemenus"] = list([
506 f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}"
507 f"{spec.cpta[u'output-file-type']}")
509 logs.append((u"INFO", f" Writing the file {name_file} ..."))
510 plpl = plgo.Figure(data=traces, layout=layout)
512 ploff.plot(plpl, show_link=False, auto_open=False,
514 except plerr.PlotlyEmptyDataError:
515 logs.append((u"WARNING", u"No data for the plot. Skipped."))
517 for level, line in logs:
520 elif level == u"ERROR":
522 elif level == u"DEBUG":
524 elif level == u"CRITICAL":
525 logging.critical(line)
526 elif level == u"WARNING":
527 logging.warning(line)
529 return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res}
532 for job in spec.input[u"builds"].keys():
533 if builds_dict.get(job, None) is None:
534 builds_dict[job] = list()
535 for build in spec.input[u"builds"][job]:
536 status = build[u"status"]
537 if status not in (u"failed", u"not found", u"removed"):
538 builds_dict[job].append(str(build[u"build"]))
540 # Create "build ID": "date" dict:
542 tb_tbl = spec.environment.get(u"testbeds", None)
543 for job_name, job_data in builds_dict.items():
544 if build_info.get(job_name, None) is None:
545 build_info[job_name] = OrderedDict()
546 for build in job_data:
548 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
550 testbed = tb_tbl.get(tb_ip, u"")
551 build_info[job_name][build] = (
552 input_data.metadata(job_name, build).get(u"generated", u""),
553 input_data.metadata(job_name, build).get(u"version", u""),
557 anomaly_classifications = dict()
559 # Create the table header:
561 for job_name in builds_dict:
562 if csv_tables.get(job_name, None) is None:
563 csv_tables[job_name] = list()
564 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
565 csv_tables[job_name].append(header)
566 build_dates = [x[0] for x in build_info[job_name].values()]
567 header = f"Build Date:,{u','.join(build_dates)}\n"
568 csv_tables[job_name].append(header)
569 versions = [x[1] for x in build_info[job_name].values()]
570 header = f"Version:,{u','.join(versions)}\n"
571 csv_tables[job_name].append(header)
573 for chart in spec.cpta[u"plots"]:
574 result = _generate_chart(chart)
578 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
580 if anomaly_classifications.get(result[u"job_name"], None) is None:
581 anomaly_classifications[result[u"job_name"]] = dict()
582 anomaly_classifications[result[u"job_name"]].update(result[u"results"])
585 for job_name, csv_table in csv_tables.items():
586 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
587 with open(f"{file_name}.csv", u"w") as file_handler:
588 file_handler.writelines(csv_table)
591 with open(f"{file_name}.csv", u"rt") as csv_file:
592 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
594 for row in csv_content:
595 if txt_table is None:
596 txt_table = prettytable.PrettyTable(row)
599 for idx, item in enumerate(row):
601 row[idx] = str(round(float(item) / 1000000, 2))
605 txt_table.add_row(row)
606 # PrettyTable raises Exception
607 except Exception as err:
609 f"Error occurred while generating TXT table:\n{err}"
612 txt_table.align[u"Build Number:"] = u"l"
613 with open(f"{file_name}.txt", u"w") as txt_file:
614 txt_file.write(str(txt_table))
617 if anomaly_classifications:
619 for job_name, job_data in anomaly_classifications.items():
621 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
622 with open(file_name, u'w') as txt_file:
623 for test_name, classification in job_data.items():
624 if classification == u"regression":
625 txt_file.write(test_name + u'\n')
626 if classification in (u"regression", u"outlier"):
629 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
630 with open(file_name, u'w') as txt_file:
631 for test_name, classification in job_data.items():
632 if classification == u"progression":
633 txt_file.write(test_name + u'\n')
637 logging.info(f"Partial results: {anomaly_classifications}")
638 logging.info(f"Result: {result}")