1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
17 import multiprocessing
22 import plotly.offline as ploff
23 import plotly.graph_objs as plgo
24 import plotly.exceptions as plerr
26 from collections import OrderedDict
27 from datetime import datetime
29 from utils import archive_input_data, execute_command, \
30 classify_anomalies, Worker
33 # Command to build the html format of the report
34 HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \
37 '-D version="{date}" ' \
41 # .css file for the html format of the report
42 THEME_OVERRIDES = """/* override table width restrictions */
44 max-width: 1200px !important;
46 .rst-content blockquote {
52 display: inline-block;
60 .wy-menu-vertical li.current a {
62 border-right: solid 1px #c9c9c9;
65 .wy-menu-vertical li.toctree-l2.current > a {
69 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
74 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
79 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
86 border-top-width: medium;
87 border-bottom-width: medium;
88 border-top-style: none;
89 border-bottom-style: none;
90 border-top-color: currentcolor;
91 border-bottom-color: currentcolor;
92 padding-left: 2em -4px;
96 COLORS = ["SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
97 "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
98 "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
99 "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
100 "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
101 "MediumSeaGreen", "SeaGreen", "LightSlateGrey",
102 "SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
103 "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
104 "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
105 "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
106 "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
107 "MediumSeaGreen", "SeaGreen", "LightSlateGrey"
111 def generate_cpta(spec, data):
112 """Generate all formats and versions of the Continuous Performance Trending
115 :param spec: Specification read from the specification file.
116 :param data: Full data set.
117 :type spec: Specification
118 :type data: InputData
121 logging.info("Generating the Continuous Performance Trending and Analysis "
124 ret_code = _generate_all_charts(spec, data)
126 cmd = HTML_BUILDER.format(
127 date=datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC'),
128 working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
129 build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
132 with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE]"], "w") as \
134 css_file.write(THEME_OVERRIDES)
136 with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE2]"], "w") as \
138 css_file.write(THEME_OVERRIDES)
140 archive_input_data(spec)
142 logging.info("Done.")
147 def _generate_trending_traces(in_data, job_name, build_info,
148 show_trend_line=True, name="", color=""):
149 """Generate the trending traces:
151 - outliers, regress, progress
152 - average of normal samples (trending line)
154 :param in_data: Full data set.
155 :param job_name: The name of job which generated the data.
156 :param build_info: Information about the builds.
157 :param show_trend_line: Show moving median (trending plot).
158 :param name: Name of the plot
159 :param color: Name of the color for the plot.
160 :type in_data: OrderedDict
162 :type build_info: dict
163 :type show_trend_line: bool
166 :returns: Generated traces (list) and the evaluated result.
167 :rtype: tuple(traces, result)
170 data_x = list(in_data.keys())
171 data_y = list(in_data.values())
176 date = build_info[job_name][str(idx)][0]
177 hover_str = ("date: {0}<br>"
180 "csit-ref: mrr-{4}-build-{5}")
181 if "dpdk" in job_name:
182 hover_text.append(hover_str.format(
184 int(in_data[idx].avg),
186 build_info[job_name][str(idx)][1].
190 elif "vpp" in job_name:
191 hover_text.append(hover_str.format(
193 int(in_data[idx].avg),
195 build_info[job_name][str(idx)][1].
200 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
201 int(date[9:11]), int(date[12:])))
203 data_pd = OrderedDict()
204 for key, value in zip(xaxis, data_y):
207 anomaly_classification, avgs = classify_anomalies(data_pd)
209 anomalies = OrderedDict()
210 anomalies_colors = list()
211 anomalies_avgs = list()
217 if anomaly_classification:
218 for idx, (key, value) in enumerate(data_pd.iteritems()):
219 if anomaly_classification[idx] in \
220 ("outlier", "regression", "progression"):
221 anomalies[key] = value
222 anomalies_colors.append(
223 anomaly_color[anomaly_classification[idx]])
224 anomalies_avgs.append(avgs[idx])
225 anomalies_colors.extend([0.0, 0.5, 1.0])
229 trace_samples = plgo.Scatter(
231 y=[y.avg for y in data_y],
238 name="{name}".format(name=name),
247 traces = [trace_samples, ]
250 trace_trend = plgo.Scatter(
261 name='{name}'.format(name=name),
262 text=["trend: {0:,}".format(int(avg)) for avg in avgs],
263 hoverinfo="text+name"
265 traces.append(trace_trend)
267 trace_anomalies = plgo.Scatter(
274 name="{name}-anomalies".format(name=name),
277 "symbol": "circle-open",
278 "color": anomalies_colors,
279 "colorscale": [[0.00, "red"],
292 "title": "Circles Marking Data Classification",
293 "titleside": 'right',
298 "tickvals": [0.167, 0.500, 0.833],
299 "ticktext": ["Regression", "Normal", "Progression"],
307 traces.append(trace_anomalies)
309 if anomaly_classification:
310 return traces, anomaly_classification[-1]
315 def _generate_all_charts(spec, input_data):
316 """Generate all charts specified in the specification file.
318 :param spec: Specification.
319 :param input_data: Full data set.
320 :type spec: Specification
321 :type input_data: InputData
324 def _generate_chart(_, data_q, graph):
325 """Generates the chart.
330 logging.info(" Generating the chart '{0}' ...".
331 format(graph.get("title", "")))
332 logs.append(("INFO", " Generating the chart '{0}' ...".
333 format(graph.get("title", ""))))
335 job_name = graph["data"].keys()[0]
341 logs.append(("INFO", " Creating the data set for the {0} '{1}'.".
342 format(graph.get("type", ""), graph.get("title", ""))))
343 data = input_data.filter_data(graph, continue_on_error=True)
345 logging.error("No data.")
349 for job, job_data in data.iteritems():
352 for index, bld in job_data.items():
353 for test_name, test in bld.items():
354 if chart_data.get(test_name, None) is None:
355 chart_data[test_name] = OrderedDict()
357 chart_data[test_name][int(index)] = \
358 test["result"]["receive-rate"]
359 except (KeyError, TypeError):
362 # Add items to the csv table:
363 for tst_name, tst_data in chart_data.items():
365 for bld in builds_dict[job_name]:
366 itm = tst_data.get(int(bld), '')
367 if not isinstance(itm, str):
369 tst_lst.append(str(itm))
370 csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
374 for test_name, test_data in chart_data.items():
376 logs.append(("WARNING", "No data for the test '{0}'".
379 message = "index: {index}, test: {test}".format(
380 index=index, test=test_name)
381 test_name = test_name.split('.')[-1]
383 trace, rslt = _generate_trending_traces(
386 build_info=build_info,
387 name='-'.join(test_name.split('-')[2:-1]),
390 message = "Out of colors: {}".format(message)
391 logs.append(("ERROR", message))
392 logging.error(message)
400 # Generate the chart:
401 graph["layout"]["xaxis"]["title"] = \
402 graph["layout"]["xaxis"]["title"].format(job=job_name)
403 name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
404 graph["output-file-name"],
405 spec.cpta["output-file-type"])
407 logs.append(("INFO", " Writing the file '{0}' ...".
409 plpl = plgo.Figure(data=traces, layout=graph["layout"])
411 ploff.plot(plpl, show_link=False, auto_open=False,
413 except plerr.PlotlyEmptyDataError:
414 logs.append(("WARNING", "No data for the plot. Skipped."))
417 "job_name": job_name,
418 "csv_table": csv_tbl,
425 for job in spec.input["builds"].keys():
426 if builds_dict.get(job, None) is None:
427 builds_dict[job] = list()
428 for build in spec.input["builds"][job]:
429 status = build["status"]
430 if status != "failed" and status != "not found" and \
432 builds_dict[job].append(str(build["build"]))
434 # Create "build ID": "date" dict:
436 for job_name, job_data in builds_dict.items():
437 if build_info.get(job_name, None) is None:
438 build_info[job_name] = OrderedDict()
439 for build in job_data:
440 build_info[job_name][build] = (
441 input_data.metadata(job_name, build).get("generated", ""),
442 input_data.metadata(job_name, build).get("version", "")
445 work_queue = multiprocessing.JoinableQueue()
446 manager = multiprocessing.Manager()
447 data_queue = manager.Queue()
448 cpus = multiprocessing.cpu_count()
451 for cpu in range(cpus):
452 worker = Worker(work_queue,
457 workers.append(worker)
458 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
459 format(cpu, worker.pid))
461 for chart in spec.cpta["plots"]:
462 work_queue.put((chart, ))
465 anomaly_classifications = list()
469 for job_name in builds_dict.keys():
470 if csv_tables.get(job_name, None) is None:
471 csv_tables[job_name] = list()
472 header = "Build Number:," + ",".join(builds_dict[job_name]) + '\n'
473 csv_tables[job_name].append(header)
474 build_dates = [x[0] for x in build_info[job_name].values()]
475 header = "Build Date:," + ",".join(build_dates) + '\n'
476 csv_tables[job_name].append(header)
477 versions = [x[1] for x in build_info[job_name].values()]
478 header = "Version:," + ",".join(versions) + '\n'
479 csv_tables[job_name].append(header)
481 while not data_queue.empty():
482 result = data_queue.get()
484 anomaly_classifications.extend(result["results"])
485 csv_tables[result["job_name"]].extend(result["csv_table"])
487 for item in result["logs"]:
488 if item[0] == "INFO":
489 logging.info(item[1])
490 elif item[0] == "ERROR":
491 logging.error(item[1])
492 elif item[0] == "DEBUG":
493 logging.debug(item[1])
494 elif item[0] == "CRITICAL":
495 logging.critical(item[1])
496 elif item[0] == "WARNING":
497 logging.warning(item[1])
501 # Terminate all workers
502 for worker in workers:
507 for job_name, csv_table in csv_tables.items():
508 file_name = spec.cpta["output-file"] + "-" + job_name + "-trending"
509 with open("{0}.csv".format(file_name), 'w') as file_handler:
510 file_handler.writelines(csv_table)
513 with open("{0}.csv".format(file_name), 'rb') as csv_file:
514 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
516 for row in csv_content:
517 if txt_table is None:
518 txt_table = prettytable.PrettyTable(row)
521 for idx, item in enumerate(row):
523 row[idx] = str(round(float(item) / 1000000, 2))
527 txt_table.add_row(row)
528 except Exception as err:
529 logging.warning("Error occurred while generating TXT "
530 "table:\n{0}".format(err))
532 txt_table.align["Build Number:"] = "l"
533 with open("{0}.txt".format(file_name), "w") as txt_file:
534 txt_file.write(str(txt_table))
537 if anomaly_classifications:
539 for classification in anomaly_classifications:
540 if classification == "regression" or classification == "outlier":
546 logging.info("Partial results: {0}".format(anomaly_classifications))
547 logging.info("Result: {0}".format(result))