1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
20 from collections import OrderedDict
21 from datetime import datetime
22 from copy import deepcopy
25 import plotly.offline as ploff
26 import plotly.graph_objs as plgo
27 import plotly.exceptions as plerr
29 from pal_utils import archive_input_data, execute_command, classify_anomalies
32 # Command to build the html format of the report
33 HTML_BUILDER = u'sphinx-build -v -c conf_cpta -a ' \
36 u'-D version="{date}" ' \
40 # .css file for the html format of the report
41 THEME_OVERRIDES = u"""/* override table width restrictions */
43 max-width: 1200px !important;
45 .rst-content blockquote {
51 display: inline-block;
59 .wy-menu-vertical li.current a {
61 border-right: solid 1px #c9c9c9;
64 .wy-menu-vertical li.toctree-l2.current > a {
68 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
73 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
78 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
85 border-top-width: medium;
86 border-bottom-width: medium;
87 border-top-style: none;
88 border-bottom-style: none;
89 border-top-color: currentcolor;
90 border-bottom-color: currentcolor;
91 padding-left: 2em -4px;
122 def generate_cpta(spec, data):
123 """Generate all formats and versions of the Continuous Performance Trending
126 :param spec: Specification read from the specification file.
127 :param data: Full data set.
128 :type spec: Specification
129 :type data: InputData
132 logging.info(u"Generating the Continuous Performance Trending and Analysis "
135 ret_code = _generate_all_charts(spec, data)
137 cmd = HTML_BUILDER.format(
138 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
139 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
140 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
143 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
145 css_file.write(THEME_OVERRIDES)
147 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
149 css_file.write(THEME_OVERRIDES)
151 if spec.configuration.get(u"archive-inputs", True):
152 archive_input_data(spec)
154 logging.info(u"Done.")
159 def _generate_trending_traces(in_data, job_name, build_info,
160 name=u"", color=u"", incl_tests=u"MRR"):
161 """Generate the trending traces:
163 - outliers, regress, progress
164 - average of normal samples (trending line)
166 :param in_data: Full data set.
167 :param job_name: The name of job which generated the data.
168 :param build_info: Information about the builds.
169 :param name: Name of the plot
170 :param color: Name of the color for the plot.
171 :param incl_tests: Included tests, accepted values: MRR, NDR, PDR
172 :type in_data: OrderedDict
174 :type build_info: dict
177 :type incl_tests: str
178 :returns: Generated traces (list) and the evaluated result.
179 :rtype: tuple(traces, result)
182 if incl_tests not in (u"MRR", u"NDR", u"PDR"):
185 data_x = list(in_data.keys())
188 data_y_stdev = list()
189 for item in in_data.values():
190 data_y_pps.append(float(item[u"receive-rate"]))
191 data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
192 data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
196 for index, key in enumerate(data_x):
198 date = build_info[job_name][str_key][0]
199 hover_str = (u"date: {date}<br>"
200 u"{property} [Mpps]: {value:.3f}<br>"
202 u"{sut}-ref: {build}<br>"
203 u"csit-ref: {test}-{period}-build-{build_nr}<br>"
204 u"testbed: {testbed}")
205 if incl_tests == u"MRR":
206 hover_str = hover_str.replace(
207 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
210 hover_str = hover_str.replace(u"<stdev>", u"")
212 hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]")
213 if u"dpdk" in job_name:
214 hover_text.append(hover_str.format(
216 property=u"average" if incl_tests == u"MRR" else u"throughput",
217 value=data_y_mpps[index],
219 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
220 test=incl_tests.lower(),
223 testbed=build_info[job_name][str_key][2]))
224 elif u"vpp" in job_name:
225 hover_str = hover_str.format(
227 property=u"average" if incl_tests == u"MRR" else u"throughput",
228 value=data_y_mpps[index],
230 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
231 test=incl_tests.lower(),
232 period=u"daily" if incl_tests == u"MRR" else u"weekly",
234 testbed=build_info[job_name][str_key][2])
236 hover_str = hover_str.replace(u"throughput", u"connection rate")
237 hover_text.append(hover_str)
239 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
240 int(date[9:11]), int(date[12:])))
242 data_pd = OrderedDict()
243 for key, value in zip(xaxis, data_y_pps):
246 anomaly_classification, avgs_pps, stdevs_pps = classify_anomalies(data_pd)
247 avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
248 stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps]
250 anomalies = OrderedDict()
251 anomalies_colors = list()
252 anomalies_avgs = list()
258 if anomaly_classification:
259 for index, (key, value) in enumerate(data_pd.items()):
260 if anomaly_classification[index] in (u"regression", u"progression"):
261 anomalies[key] = value / 1e6
262 anomalies_colors.append(
263 anomaly_color[anomaly_classification[index]])
264 anomalies_avgs.append(avgs_mpps[index])
265 anomalies_colors.extend([0.0, 0.5, 1.0])
269 trace_samples = plgo.Scatter(
282 u"symbol": u"circle",
285 hoverinfo=u"text+name"
287 traces = [trace_samples, ]
289 trend_hover_text = list()
290 for idx in range(len(data_x)):
292 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
293 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
295 trend_hover_text.append(trend_hover_str)
297 trace_trend = plgo.Scatter(
309 text=trend_hover_text,
310 hoverinfo=u"text+name"
312 traces.append(trace_trend)
314 trace_anomalies = plgo.Scatter(
315 x=list(anomalies.keys()),
321 name=f"{name}-anomalies",
324 u"symbol": u"circle-open",
325 u"color": anomalies_colors,
341 u"title": u"Circles Marking Data Classification",
342 u"titleside": u"right",
346 u"tickmode": u"array",
347 u"tickvals": [0.167, 0.500, 0.833],
348 u"ticktext": [u"Regression", u"Normal", u"Progression"],
356 traces.append(trace_anomalies)
358 if anomaly_classification:
359 return traces, anomaly_classification[-1]
364 def _generate_all_charts(spec, input_data):
365 """Generate all charts specified in the specification file.
367 :param spec: Specification.
368 :param input_data: Full data set.
369 :type spec: Specification
370 :type input_data: InputData
373 def _generate_chart(graph):
374 """Generates the chart.
376 :param graph: The graph to be generated
378 :returns: Dictionary with the job name, csv table with results and
379 list of tests classification results.
383 logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
385 incl_tests = graph.get(u"include-tests", u"MRR")
387 job_name = list(graph[u"data"].keys())[0]
394 f" Creating the data set for the {graph.get(u'type', u'')} "
395 f"{graph.get(u'title', u'')}."
398 if graph.get(u"include", None):
399 data = input_data.filter_tests_by_name(
401 params=[u"type", u"result", u"throughput", u"tags"],
402 continue_on_error=True
405 data = input_data.filter_data(
407 params=[u"type", u"result", u"throughput", u"tags"],
408 continue_on_error=True)
410 if data is None or data.empty:
411 logging.error(u"No data.")
416 for job, job_data in data.items():
419 for index, bld in job_data.items():
420 for test_name, test in bld.items():
421 if chart_data.get(test_name, None) is None:
422 chart_data[test_name] = OrderedDict()
424 if incl_tests == u"MRR":
425 rate = test[u"result"][u"receive-rate"]
426 stdev = test[u"result"][u"receive-stdev"]
427 elif incl_tests == u"NDR":
428 rate = test[u"throughput"][u"NDR"][u"LOWER"]
429 stdev = float(u"nan")
430 elif incl_tests == u"PDR":
431 rate = test[u"throughput"][u"PDR"][u"LOWER"]
432 stdev = float(u"nan")
435 chart_data[test_name][int(index)] = {
436 u"receive-rate": rate,
437 u"receive-stdev": stdev
439 chart_tags[test_name] = test.get(u"tags", None)
440 except (KeyError, TypeError):
443 # Add items to the csv table:
444 for tst_name, tst_data in chart_data.items():
446 for bld in builds_dict[job_name]:
447 itm = tst_data.get(int(bld), dict())
448 # CSIT-1180: Itm will be list, compute stats.
450 tst_lst.append(str(itm.get(u"receive-rate", u"")))
451 except AttributeError:
453 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
458 groups = graph.get(u"groups", None)
465 for tst_name, test_data in chart_data.items():
467 logging.warning(f"No data for the test {tst_name}")
469 if tag not in chart_tags[tst_name]:
472 trace, rslt = _generate_trending_traces(
475 build_info=build_info,
476 name=u'-'.join(tst_name.split(u'.')[-1].
479 incl_tests=incl_tests
482 logging.error(f"Out of colors: index: "
483 f"{index}, test: {tst_name}")
487 visible.extend([True for _ in range(len(trace))])
491 visibility.append(visible)
493 for tst_name, test_data in chart_data.items():
495 logging.warning(f"No data for the test {tst_name}")
498 trace, rslt = _generate_trending_traces(
501 build_info=build_info,
503 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
505 incl_tests=incl_tests
509 f"Out of colors: index: {index}, test: {tst_name}"
518 # Generate the chart:
520 layout = deepcopy(graph[u"layout"])
521 except KeyError as err:
522 logging.error(u"Finished with error: No layout defined")
523 logging.error(repr(err))
527 for i in range(len(visibility)):
529 for vis_idx, _ in enumerate(visibility):
530 for _ in range(len(visibility[vis_idx])):
531 visible.append(i == vis_idx)
538 args=[{u"visible": [True for _ in range(len(show[0]))]}, ]
540 for i in range(len(groups)):
542 label = graph[u"group-names"][i]
543 except (IndexError, KeyError):
544 label = f"Group {i + 1}"
548 args=[{u"visible": show[i]}, ]
551 layout[u"updatemenus"] = list([
565 f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}"
566 f"{spec.cpta[u'output-file-type']}")
568 logging.info(f" Writing the file {name_file} ...")
569 plpl = plgo.Figure(data=traces, layout=layout)
571 ploff.plot(plpl, show_link=False, auto_open=False,
573 except plerr.PlotlyEmptyDataError:
574 logging.warning(u"No data for the plot. Skipped.")
576 return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res}
579 for job in spec.input[u"builds"].keys():
580 if builds_dict.get(job, None) is None:
581 builds_dict[job] = list()
582 for build in spec.input[u"builds"][job]:
583 status = build[u"status"]
584 if status not in (u"failed", u"not found", u"removed", None):
585 builds_dict[job].append(str(build[u"build"]))
587 # Create "build ID": "date" dict:
589 tb_tbl = spec.environment.get(u"testbeds", None)
590 for job_name, job_data in builds_dict.items():
591 if build_info.get(job_name, None) is None:
592 build_info[job_name] = OrderedDict()
593 for build in job_data:
595 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
597 testbed = tb_tbl.get(tb_ip, u"")
598 build_info[job_name][build] = (
599 input_data.metadata(job_name, build).get(u"generated", u""),
600 input_data.metadata(job_name, build).get(u"version", u""),
604 anomaly_classifications = dict()
606 # Create the table header:
608 for job_name in builds_dict:
609 if csv_tables.get(job_name, None) is None:
610 csv_tables[job_name] = list()
611 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
612 csv_tables[job_name].append(header)
613 build_dates = [x[0] for x in build_info[job_name].values()]
614 header = f"Build Date:,{u','.join(build_dates)}\n"
615 csv_tables[job_name].append(header)
616 versions = [x[1] for x in build_info[job_name].values()]
617 header = f"Version:,{u','.join(versions)}\n"
618 csv_tables[job_name].append(header)
620 for chart in spec.cpta[u"plots"]:
621 result = _generate_chart(chart)
625 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
627 if anomaly_classifications.get(result[u"job_name"], None) is None:
628 anomaly_classifications[result[u"job_name"]] = dict()
629 anomaly_classifications[result[u"job_name"]].update(result[u"results"])
632 for job_name, csv_table in csv_tables.items():
633 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
634 with open(f"{file_name}.csv", u"wt") as file_handler:
635 file_handler.writelines(csv_table)
638 with open(f"{file_name}.csv", u"rt") as csv_file:
639 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
641 for row in csv_content:
642 if txt_table is None:
643 txt_table = prettytable.PrettyTable(row)
646 for idx, item in enumerate(row):
648 row[idx] = str(round(float(item) / 1000000, 2))
652 txt_table.add_row(row)
653 # PrettyTable raises Exception
654 except Exception as err:
656 f"Error occurred while generating TXT table:\n{err}"
659 txt_table.align[u"Build Number:"] = u"l"
660 with open(f"{file_name}.txt", u"wt") as txt_file:
661 txt_file.write(str(txt_table))
664 if anomaly_classifications:
666 for job_name, job_data in anomaly_classifications.items():
668 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
669 with open(file_name, u'w') as txt_file:
670 for test_name, classification in job_data.items():
671 if classification == u"regression":
672 txt_file.write(test_name + u'\n')
673 if classification in (u"regression", u"outlier"):
676 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
677 with open(file_name, u'w') as txt_file:
678 for test_name, classification in job_data.items():
679 if classification == u"progression":
680 txt_file.write(test_name + u'\n')
684 logging.info(f"Partial results: {anomaly_classifications}")
685 logging.info(f"Result: {result}")