1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
21 from collections import OrderedDict
22 from datetime import datetime
23 from copy import deepcopy
26 import plotly.offline as ploff
27 import plotly.graph_objs as plgo
28 import plotly.exceptions as plerr
30 from pal_utils import archive_input_data, execute_command, classify_anomalies
33 # Command to build the html format of the report
34 HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \
37 u'-D version="{date}" ' \
41 # .css file for the html format of the report
42 THEME_OVERRIDES = u"""/* override table width restrictions */
44 max-width: 1200px !important;
46 .rst-content blockquote {
52 display: inline-block;
60 .wy-menu-vertical li.current a {
62 border-right: solid 1px #c9c9c9;
65 .wy-menu-vertical li.toctree-l2.current > a {
69 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
74 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
79 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
86 border-top-width: medium;
87 border-bottom-width: medium;
88 border-top-style: none;
89 border-bottom-style: none;
90 border-top-color: currentcolor;
91 border-bottom-color: currentcolor;
92 padding-left: 2em -4px;
123 def generate_cpta(spec, data):
124 """Generate all formats and versions of the Continuous Performance Trending
127 :param spec: Specification read from the specification file.
128 :param data: Full data set.
129 :type spec: Specification
130 :type data: InputData
133 logging.info(u"Generating the Continuous Performance Trending and Analysis "
136 ret_code = _generate_all_charts(spec, data)
138 cmd = HTML_BUILDER.format(
139 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
140 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
141 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
144 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
146 css_file.write(THEME_OVERRIDES)
148 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
150 css_file.write(THEME_OVERRIDES)
152 if spec.environment.get(u"archive-inputs", False):
153 archive_input_data(spec)
155 logging.info(u"Done.")
160 def _generate_trending_traces(in_data, job_name, build_info,
161 name=u"", color=u"", incl_tests=u"mrr"):
162 """Generate the trending traces:
164 - outliers, regress, progress
165 - average of normal samples (trending line)
167 :param in_data: Full data set.
168 :param job_name: The name of job which generated the data.
169 :param build_info: Information about the builds.
170 :param name: Name of the plot
171 :param color: Name of the color for the plot.
172 :param incl_tests: Included tests, accepted values: mrr, ndr, pdr
173 :type in_data: OrderedDict
175 :type build_info: dict
178 :type incl_tests: str
179 :returns: Generated traces (list) and the evaluated result.
180 :rtype: tuple(traces, result)
183 if incl_tests not in (u"mrr", u"ndr", u"pdr"):
186 data_x = list(in_data.keys())
189 data_y_stdev = list()
190 for item in in_data.values():
191 data_y_pps.append(float(item[u"receive-rate"]))
192 data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
193 data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
197 for index, key in enumerate(data_x):
199 date = build_info[job_name][str_key][0]
200 hover_str = (u"date: {date}<br>"
201 u"{property} [Mpps]: {value:.3f}<br>"
203 u"{sut}-ref: {build}<br>"
204 u"csit-ref: {test}-{period}-build-{build_nr}<br>"
205 u"testbed: {testbed}")
206 if incl_tests == u"mrr":
207 hover_str = hover_str.replace(
208 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
211 hover_str = hover_str.replace(u"<stdev>", u"")
213 hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]")
214 if u"dpdk" in job_name:
215 hover_text.append(hover_str.format(
217 property=u"average" if incl_tests == u"mrr" else u"throughput",
218 value=data_y_mpps[index],
220 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
224 testbed=build_info[job_name][str_key][2]))
225 elif u"vpp" in job_name:
226 hover_str = hover_str.format(
228 property=u"average" if incl_tests == u"mrr" else u"throughput",
229 value=data_y_mpps[index],
231 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
233 period=u"daily" if incl_tests == u"mrr" else u"weekly",
235 testbed=build_info[job_name][str_key][2])
237 hover_str = hover_str.replace(u"throughput", u"connection rate")
238 hover_text.append(hover_str)
240 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
241 int(date[9:11]), int(date[12:])))
243 data_pd = OrderedDict()
244 for key, value in zip(xaxis, data_y_pps):
248 anomaly_classification, avgs_pps, stdevs_pps = \
249 classify_anomalies(data_pd)
250 except ValueError as err:
251 logging.info(f"{err} Skipping")
253 avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
254 stdevs_mpps = [stdev_pps / 1e6 for stdev_pps in stdevs_pps]
256 anomalies = OrderedDict()
257 anomalies_colors = list()
258 anomalies_avgs = list()
264 if anomaly_classification:
265 for index, (key, value) in enumerate(data_pd.items()):
266 if anomaly_classification[index] in (u"regression", u"progression"):
267 anomalies[key] = value / 1e6
268 anomalies_colors.append(
269 anomaly_color[anomaly_classification[index]])
270 anomalies_avgs.append(avgs_mpps[index])
271 anomalies_colors.extend([0.0, 0.5, 1.0])
275 trace_samples = plgo.Scatter(
288 u"symbol": u"circle",
291 hoverinfo=u"text+name"
293 traces = [trace_samples, ]
295 trend_hover_text = list()
296 for idx in range(len(data_x)):
298 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
299 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
301 trend_hover_text.append(trend_hover_str)
303 trace_trend = plgo.Scatter(
315 text=trend_hover_text,
316 hoverinfo=u"text+name"
318 traces.append(trace_trend)
320 trace_anomalies = plgo.Scatter(
321 x=list(anomalies.keys()),
327 name=f"{name}-anomalies",
330 u"symbol": u"circle-open",
331 u"color": anomalies_colors,
347 u"title": u"Circles Marking Data Classification",
348 u"titleside": u"right",
352 u"tickmode": u"array",
353 u"tickvals": [0.167, 0.500, 0.833],
354 u"ticktext": [u"Regression", u"Normal", u"Progression"],
362 traces.append(trace_anomalies)
364 if anomaly_classification:
365 return traces, anomaly_classification[-1]
370 def _generate_all_charts(spec, input_data):
371 """Generate all charts specified in the specification file.
373 :param spec: Specification.
374 :param input_data: Full data set.
375 :type spec: Specification
376 :type input_data: InputData
379 def _generate_chart(graph):
380 """Generates the chart.
382 :param graph: The graph to be generated
384 :returns: Dictionary with the job name, csv table with results and
385 list of tests classification results.
389 logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
391 job_name = list(graph[u"data"].keys())[0]
395 f" Creating the data set for the {graph.get(u'type', u'')} "
396 f"{graph.get(u'title', u'')}."
399 data = input_data.filter_tests_by_name(
401 params=[u"type", u"result", u"throughput", u"tags"],
402 continue_on_error=True
405 if data is None or data.empty:
406 logging.error(u"No data.")
411 for ttype in graph.get(u"test-type", (u"mrr", )):
412 for core in graph.get(u"core", tuple()):
417 for item in graph.get(u"include", tuple()):
418 reg_ex = re.compile(str(item.format(core=core)).lower())
419 for job, job_data in data.items():
422 for index, bld in job_data.items():
423 for test_id, test in bld.items():
424 if not re.match(reg_ex, str(test_id).lower()):
426 if chart_data.get(test_id, None) is None:
427 chart_data[test_id] = OrderedDict()
430 rate = test[u"result"][u"receive-rate"]
432 test[u"result"][u"receive-stdev"]
433 elif ttype == u"ndr":
435 test["throughput"][u"NDR"][u"LOWER"]
436 stdev = float(u"nan")
437 elif ttype == u"pdr":
439 test["throughput"][u"PDR"][u"LOWER"]
440 stdev = float(u"nan")
443 chart_data[test_id][int(index)] = {
444 u"receive-rate": rate,
445 u"receive-stdev": stdev
447 chart_tags[test_id] = \
448 test.get(u"tags", None)
449 except (KeyError, TypeError):
452 # Add items to the csv table:
453 for tst_name, tst_data in chart_data.items():
455 for bld in builds_dict[job_name]:
456 itm = tst_data.get(int(bld), dict())
457 # CSIT-1180: Itm will be list, compute stats.
459 tst_lst.append(str(itm.get(u"receive-rate", u"")))
460 except AttributeError:
462 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
467 groups = graph.get(u"groups", None)
474 for tst_name, test_data in chart_data.items():
477 f"No data for the test {tst_name}"
480 if tag not in chart_tags[tst_name]:
483 trace, rslt = _generate_trending_traces(
486 build_info=build_info,
487 name=u'-'.join(tst_name.split(u'.')[-1].
493 logging.error(f"Out of colors: index: "
494 f"{index}, test: {tst_name}")
499 [True for _ in range(len(trace))]
504 visibility.append(visible)
506 for tst_name, test_data in chart_data.items():
508 logging.warning(f"No data for the test {tst_name}")
511 trace, rslt = _generate_trending_traces(
514 build_info=build_info,
516 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
522 f"Out of colors: index: "
523 f"{index}, test: {tst_name}"
532 # Generate the chart:
534 layout = deepcopy(graph[u"layout"])
535 except KeyError as err:
536 logging.error(u"Finished with error: No layout defined")
537 logging.error(repr(err))
541 for i in range(len(visibility)):
543 for vis_idx, _ in enumerate(visibility):
544 for _ in range(len(visibility[vis_idx])):
545 visible.append(i == vis_idx)
553 [True for _ in range(len(show[0]))]}, ]
555 for i in range(len(groups)):
557 label = graph[u"group-names"][i]
558 except (IndexError, KeyError):
559 label = f"Group {i + 1}"
563 args=[{u"visible": show[i]}, ]
566 layout[u"updatemenus"] = list([
580 f"{spec.cpta[u'output-file']}/"
581 f"{graph[u'output-file-name']}.html"
583 name_file = name_file.format(core=core, test_type=ttype)
585 logging.info(f" Writing the file {name_file}")
586 plpl = plgo.Figure(data=traces, layout=layout)
594 except plerr.PlotlyEmptyDataError:
595 logging.warning(u"No data for the plot. Skipped.")
599 u"job_name": job_name,
600 u"csv_table": csv_tbl,
608 for job, builds in spec.input.items():
609 if builds_dict.get(job, None) is None:
610 builds_dict[job] = list()
612 if build[u"status"] not in (u"failed", u"not found", u"removed",
614 builds_dict[job].append(str(build[u"build"]))
616 # Create "build ID": "date" dict:
618 tb_tbl = spec.environment.get(u"testbeds", None)
619 for job_name, job_data in builds_dict.items():
620 if build_info.get(job_name, None) is None:
621 build_info[job_name] = OrderedDict()
622 for build in job_data:
624 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
626 testbed = tb_tbl.get(tb_ip, u"")
627 build_info[job_name][build] = (
628 input_data.metadata(job_name, build).get(u"generated", u""),
629 input_data.metadata(job_name, build).get(u"version", u""),
633 anomaly_classifications = dict()
635 # Create the table header:
637 for job_name in builds_dict:
638 if csv_tables.get(job_name, None) is None:
639 csv_tables[job_name] = list()
640 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
641 csv_tables[job_name].append(header)
642 build_dates = [x[0] for x in build_info[job_name].values()]
643 header = f"Build Date:,{u','.join(build_dates)}\n"
644 csv_tables[job_name].append(header)
645 versions = [x[1] for x in build_info[job_name].values()]
646 header = f"Version:,{u','.join(versions)}\n"
647 csv_tables[job_name].append(header)
649 for chart in spec.cpta[u"plots"]:
650 results = _generate_chart(chart)
654 for result in results:
655 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
657 if anomaly_classifications.get(result[u"job_name"], None) is None:
658 anomaly_classifications[result[u"job_name"]] = dict()
659 anomaly_classifications[result[u"job_name"]].\
660 update(result[u"results"])
663 for job_name, csv_table in csv_tables.items():
664 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
665 with open(f"{file_name}.csv", u"wt") as file_handler:
666 file_handler.writelines(csv_table)
669 with open(f"{file_name}.csv", u"rt") as csv_file:
670 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
672 for row in csv_content:
673 if txt_table is None:
674 txt_table = prettytable.PrettyTable(row)
677 for idx, item in enumerate(row):
679 row[idx] = str(round(float(item) / 1000000, 2))
683 txt_table.add_row(row)
684 # PrettyTable raises Exception
685 except Exception as err:
687 f"Error occurred while generating TXT table:\n{err}"
690 txt_table.align[u"Build Number:"] = u"l"
691 with open(f"{file_name}.txt", u"wt") as txt_file:
692 txt_file.write(str(txt_table))
695 if anomaly_classifications:
697 for job_name, job_data in anomaly_classifications.items():
699 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
700 with open(file_name, u'w') as txt_file:
701 for test_name, classification in job_data.items():
702 if classification == u"regression":
703 txt_file.write(test_name + u'\n')
704 if classification in (u"regression", u"outlier"):
707 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
708 with open(file_name, u'w') as txt_file:
709 for test_name, classification in job_data.items():
710 if classification == u"progression":
711 txt_file.write(test_name + u'\n')
715 logging.info(f"Partial results: {anomaly_classifications}")
716 logging.info(f"Result: {result}")