1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
17 import multiprocessing
22 import plotly.offline as ploff
23 import plotly.graph_objs as plgo
24 import plotly.exceptions as plerr
26 from collections import OrderedDict
27 from datetime import datetime
28 from copy import deepcopy
30 from utils import archive_input_data, execute_command, \
31 classify_anomalies, Worker
34 # Command to build the html format of the report
35 HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \
38 '-D version="{date}" ' \
42 # .css file for the html format of the report
43 THEME_OVERRIDES = """/* override table width restrictions */
45 max-width: 1200px !important;
47 .rst-content blockquote {
53 display: inline-block;
61 .wy-menu-vertical li.current a {
63 border-right: solid 1px #c9c9c9;
66 .wy-menu-vertical li.toctree-l2.current > a {
70 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
75 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
80 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
87 border-top-width: medium;
88 border-bottom-width: medium;
89 border-top-style: none;
90 border-bottom-style: none;
91 border-top-color: currentcolor;
92 border-bottom-color: currentcolor;
93 padding-left: 2em -4px;
97 COLORS = ["SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
98 "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
99 "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
100 "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
101 "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
102 "MediumSeaGreen", "SeaGreen", "LightSlateGrey",
103 "SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
104 "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
105 "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
106 "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
107 "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
108 "MediumSeaGreen", "SeaGreen", "LightSlateGrey"
112 def generate_cpta(spec, data):
113 """Generate all formats and versions of the Continuous Performance Trending
116 :param spec: Specification read from the specification file.
117 :param data: Full data set.
118 :type spec: Specification
119 :type data: InputData
122 logging.info("Generating the Continuous Performance Trending and Analysis "
125 ret_code = _generate_all_charts(spec, data)
127 cmd = HTML_BUILDER.format(
128 date=datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC'),
129 working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
130 build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
133 with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE]"], "w") as \
135 css_file.write(THEME_OVERRIDES)
137 with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE2]"], "w") as \
139 css_file.write(THEME_OVERRIDES)
141 archive_input_data(spec)
143 logging.info("Done.")
148 def _generate_trending_traces(in_data, job_name, build_info,
149 show_trend_line=True, name="", color=""):
150 """Generate the trending traces:
152 - outliers, regress, progress
153 - average of normal samples (trending line)
155 :param in_data: Full data set.
156 :param job_name: The name of job which generated the data.
157 :param build_info: Information about the builds.
158 :param show_trend_line: Show moving median (trending plot).
159 :param name: Name of the plot
160 :param color: Name of the color for the plot.
161 :type in_data: OrderedDict
163 :type build_info: dict
164 :type show_trend_line: bool
167 :returns: Generated traces (list) and the evaluated result.
168 :rtype: tuple(traces, result)
171 data_x = list(in_data.keys())
172 data_y = list(in_data.values())
177 date = build_info[job_name][str(idx)][0]
178 hover_str = ("date: {date}<br>"
179 "value: {value:,}<br>"
180 "{sut}-ref: {build}<br>"
181 "csit-ref: mrr-{period}-build-{build_nr}<br>"
182 "testbed: {testbed}")
183 if "dpdk" in job_name:
184 hover_text.append(hover_str.format(
186 value=int(in_data[idx].avg),
188 build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
191 testbed=build_info[job_name][str(idx)][2]))
192 elif "vpp" in job_name:
193 hover_text.append(hover_str.format(
195 value=int(in_data[idx].avg),
197 build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
200 testbed=build_info[job_name][str(idx)][2]))
202 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
203 int(date[9:11]), int(date[12:])))
205 data_pd = OrderedDict()
206 for key, value in zip(xaxis, data_y):
209 anomaly_classification, avgs = classify_anomalies(data_pd)
211 anomalies = OrderedDict()
212 anomalies_colors = list()
213 anomalies_avgs = list()
219 if anomaly_classification:
220 for idx, (key, value) in enumerate(data_pd.iteritems()):
221 if anomaly_classification[idx] in \
222 ("outlier", "regression", "progression"):
223 anomalies[key] = value
224 anomalies_colors.append(
225 anomaly_color[anomaly_classification[idx]])
226 anomalies_avgs.append(avgs[idx])
227 anomalies_colors.extend([0.0, 0.5, 1.0])
231 trace_samples = plgo.Scatter(
233 y=[y.avg for y in data_y],
240 name="{name}".format(name=name),
249 traces = [trace_samples, ]
252 trace_trend = plgo.Scatter(
263 name='{name}'.format(name=name),
264 text=["trend: {0:,}".format(int(avg)) for avg in avgs],
265 hoverinfo="text+name"
267 traces.append(trace_trend)
269 trace_anomalies = plgo.Scatter(
276 name="{name}-anomalies".format(name=name),
279 "symbol": "circle-open",
280 "color": anomalies_colors,
281 "colorscale": [[0.00, "red"],
294 "title": "Circles Marking Data Classification",
295 "titleside": 'right',
300 "tickvals": [0.167, 0.500, 0.833],
301 "ticktext": ["Regression", "Normal", "Progression"],
309 traces.append(trace_anomalies)
311 if anomaly_classification:
312 return traces, anomaly_classification[-1]
317 def _generate_all_charts(spec, input_data):
318 """Generate all charts specified in the specification file.
320 :param spec: Specification.
321 :param input_data: Full data set.
322 :type spec: Specification
323 :type input_data: InputData
326 def _generate_chart(_, data_q, graph):
327 """Generates the chart.
332 logging.info(" Generating the chart '{0}' ...".
333 format(graph.get("title", "")))
334 logs.append(("INFO", " Generating the chart '{0}' ...".
335 format(graph.get("title", ""))))
337 job_name = graph["data"].keys()[0]
343 logs.append(("INFO", " Creating the data set for the {0} '{1}'.".
344 format(graph.get("type", ""), graph.get("title", ""))))
345 data = input_data.filter_data(graph, continue_on_error=True)
347 logging.error("No data.")
352 for job, job_data in data.iteritems():
355 for index, bld in job_data.items():
356 for test_name, test in bld.items():
357 if chart_data.get(test_name, None) is None:
358 chart_data[test_name] = OrderedDict()
360 chart_data[test_name][int(index)] = \
361 test["result"]["receive-rate"]
362 chart_tags[test_name] = test.get("tags", None)
363 except (KeyError, TypeError):
366 # Add items to the csv table:
367 for tst_name, tst_data in chart_data.items():
369 for bld in builds_dict[job_name]:
370 itm = tst_data.get(int(bld), '')
371 if not isinstance(itm, str):
373 tst_lst.append(str(itm))
374 csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
379 groups = graph.get("groups", None)
386 for test_name, test_data in chart_data.items():
388 logs.append(("WARNING",
389 "No data for the test '{0}'".
392 if tag in chart_tags[test_name]:
393 message = "index: {index}, test: {test}".format(
394 index=index, test=test_name)
395 test_name = test_name.split('.')[-1]
397 trace, rslt = _generate_trending_traces(
400 build_info=build_info,
401 name='-'.join(test_name.split('-')[2:-1]),
404 message = "Out of colors: {}".format(message)
405 logs.append(("ERROR", message))
406 logging.error(message)
410 visible.extend([True for _ in range(len(trace))])
414 visibility.append(visible)
416 for test_name, test_data in chart_data.items():
418 logs.append(("WARNING", "No data for the test '{0}'".
421 message = "index: {index}, test: {test}".format(
422 index=index, test=test_name)
423 test_name = test_name.split('.')[-1]
425 trace, rslt = _generate_trending_traces(
428 build_info=build_info,
429 name='-'.join(test_name.split('-')[2:-1]),
432 message = "Out of colors: {}".format(message)
433 logs.append(("ERROR", message))
434 logging.error(message)
442 # Generate the chart:
444 layout = deepcopy(graph["layout"])
445 except KeyError as err:
446 logging.error("Finished with error: No layout defined")
447 logging.error(repr(err))
451 for i in range(len(visibility)):
453 for r in range(len(visibility)):
454 for _ in range(len(visibility[r])):
455 visible.append(i == r)
462 args=[{"visible": [True for _ in range(len(show[0]))]}, ]
464 for i in range(len(groups)):
466 label = graph["group-names"][i]
467 except (IndexError, KeyError):
468 label = "Group {num}".format(num=i + 1)
472 args=[{"visible": show[i]}, ]
475 layout['updatemenus'] = list([
488 name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
489 graph["output-file-name"],
490 spec.cpta["output-file-type"])
492 logs.append(("INFO", " Writing the file '{0}' ...".
494 plpl = plgo.Figure(data=traces, layout=layout)
496 ploff.plot(plpl, show_link=False, auto_open=False,
498 except plerr.PlotlyEmptyDataError:
499 logs.append(("WARNING", "No data for the plot. Skipped."))
502 "job_name": job_name,
503 "csv_table": csv_tbl,
510 for job in spec.input["builds"].keys():
511 if builds_dict.get(job, None) is None:
512 builds_dict[job] = list()
513 for build in spec.input["builds"][job]:
514 status = build["status"]
515 if status != "failed" and status != "not found" and \
517 builds_dict[job].append(str(build["build"]))
519 # Create "build ID": "date" dict:
521 tb_tbl = spec.environment.get("testbeds", None)
522 for job_name, job_data in builds_dict.items():
523 if build_info.get(job_name, None) is None:
524 build_info[job_name] = OrderedDict()
525 for build in job_data:
527 tb_ip = input_data.metadata(job_name, build).get("testbed", "")
529 testbed = tb_tbl.get(tb_ip, "")
530 build_info[job_name][build] = (
531 input_data.metadata(job_name, build).get("generated", ""),
532 input_data.metadata(job_name, build).get("version", ""),
536 work_queue = multiprocessing.JoinableQueue()
537 manager = multiprocessing.Manager()
538 data_queue = manager.Queue()
539 cpus = multiprocessing.cpu_count()
542 for cpu in range(cpus):
543 worker = Worker(work_queue,
548 workers.append(worker)
549 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
550 format(cpu, worker.pid))
552 for chart in spec.cpta["plots"]:
553 work_queue.put((chart, ))
556 anomaly_classifications = list()
560 for job_name in builds_dict.keys():
561 if csv_tables.get(job_name, None) is None:
562 csv_tables[job_name] = list()
563 header = "Build Number:," + ",".join(builds_dict[job_name]) + '\n'
564 csv_tables[job_name].append(header)
565 build_dates = [x[0] for x in build_info[job_name].values()]
566 header = "Build Date:," + ",".join(build_dates) + '\n'
567 csv_tables[job_name].append(header)
568 versions = [x[1] for x in build_info[job_name].values()]
569 header = "Version:," + ",".join(versions) + '\n'
570 csv_tables[job_name].append(header)
572 while not data_queue.empty():
573 result = data_queue.get()
575 anomaly_classifications.extend(result["results"])
576 csv_tables[result["job_name"]].extend(result["csv_table"])
578 for item in result["logs"]:
579 if item[0] == "INFO":
580 logging.info(item[1])
581 elif item[0] == "ERROR":
582 logging.error(item[1])
583 elif item[0] == "DEBUG":
584 logging.debug(item[1])
585 elif item[0] == "CRITICAL":
586 logging.critical(item[1])
587 elif item[0] == "WARNING":
588 logging.warning(item[1])
592 # Terminate all workers
593 for worker in workers:
598 for job_name, csv_table in csv_tables.items():
599 file_name = spec.cpta["output-file"] + "-" + job_name + "-trending"
600 with open("{0}.csv".format(file_name), 'w') as file_handler:
601 file_handler.writelines(csv_table)
604 with open("{0}.csv".format(file_name), 'rb') as csv_file:
605 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
607 for row in csv_content:
608 if txt_table is None:
609 txt_table = prettytable.PrettyTable(row)
612 for idx, item in enumerate(row):
614 row[idx] = str(round(float(item) / 1000000, 2))
618 txt_table.add_row(row)
619 except Exception as err:
620 logging.warning("Error occurred while generating TXT "
621 "table:\n{0}".format(err))
623 txt_table.align["Build Number:"] = "l"
624 with open("{0}.txt".format(file_name), "w") as txt_file:
625 txt_file.write(str(txt_table))
628 if anomaly_classifications:
630 for classification in anomaly_classifications:
631 if classification == "regression" or classification == "outlier":
637 logging.info("Partial results: {0}".format(anomaly_classifications))
638 logging.info("Result: {0}".format(result))