1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
17 import multiprocessing
22 import plotly.offline as ploff
23 import plotly.graph_objs as plgo
24 import plotly.exceptions as plerr
26 from collections import OrderedDict
27 from datetime import datetime
29 from utils import archive_input_data, execute_command, \
30 classify_anomalies, Worker
33 # Command to build the html format of the report
34 HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \
37 '-D version="{date}" ' \
41 # .css file for the html format of the report
42 THEME_OVERRIDES = """/* override table width restrictions */
44 max-width: 1200px !important;
46 .rst-content blockquote {
52 display: inline-block;
60 .wy-menu-vertical li.current a {
62 border-right: solid 1px #c9c9c9;
65 .wy-menu-vertical li.toctree-l2.current > a {
69 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
74 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
79 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
86 border-top-width: medium;
87 border-bottom-width: medium;
88 border-top-style: none;
89 border-bottom-style: none;
90 border-top-color: currentcolor;
91 border-bottom-color: currentcolor;
92 padding-left: 2em -4px;
96 COLORS = ["SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
97 "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
98 "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
99 "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
100 "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
101 "MediumSeaGreen", "SeaGreen", "LightSlateGrey",
102 "SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
103 "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
104 "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
105 "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
106 "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
107 "MediumSeaGreen", "SeaGreen", "LightSlateGrey"
111 def generate_cpta(spec, data):
112 """Generate all formats and versions of the Continuous Performance Trending
115 :param spec: Specification read from the specification file.
116 :param data: Full data set.
117 :type spec: Specification
118 :type data: InputData
121 logging.info("Generating the Continuous Performance Trending and Analysis "
124 ret_code = _generate_all_charts(spec, data)
126 cmd = HTML_BUILDER.format(
127 date=datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC'),
128 working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
129 build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
132 with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE]"], "w") as \
134 css_file.write(THEME_OVERRIDES)
136 with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE2]"], "w") as \
138 css_file.write(THEME_OVERRIDES)
140 archive_input_data(spec)
142 logging.info("Done.")
147 def _generate_trending_traces(in_data, job_name, build_info,
148 show_trend_line=True, name="", color=""):
149 """Generate the trending traces:
151 - outliers, regress, progress
152 - average of normal samples (trending line)
154 :param in_data: Full data set.
155 :param job_name: The name of job which generated the data.
156 :param build_info: Information about the builds.
157 :param show_trend_line: Show moving median (trending plot).
158 :param name: Name of the plot
159 :param color: Name of the color for the plot.
160 :type in_data: OrderedDict
162 :type build_info: dict
163 :type show_trend_line: bool
166 :returns: Generated traces (list) and the evaluated result.
167 :rtype: tuple(traces, result)
170 data_x = list(in_data.keys())
171 data_y = list(in_data.values())
176 date = build_info[job_name][str(idx)][0]
177 hover_str = ("date: {date}<br>"
178 "value: {value:,}<br>"
179 "{sut}-ref: {build}<br>"
180 "csit-ref: mrr-{period}-build-{build_nr}<br>"
181 "testbed: {testbed}")
182 if "dpdk" in job_name:
183 hover_text.append(hover_str.format(
185 value=int(in_data[idx].avg),
187 build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
190 testbed=build_info[job_name][str(idx)][2]))
191 elif "vpp" in job_name:
192 hover_text.append(hover_str.format(
194 value=int(in_data[idx].avg),
196 build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
199 testbed=build_info[job_name][str(idx)][2]))
201 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
202 int(date[9:11]), int(date[12:])))
204 data_pd = OrderedDict()
205 for key, value in zip(xaxis, data_y):
208 anomaly_classification, avgs = classify_anomalies(data_pd)
210 anomalies = OrderedDict()
211 anomalies_colors = list()
212 anomalies_avgs = list()
218 if anomaly_classification:
219 for idx, (key, value) in enumerate(data_pd.iteritems()):
220 if anomaly_classification[idx] in \
221 ("outlier", "regression", "progression"):
222 anomalies[key] = value
223 anomalies_colors.append(
224 anomaly_color[anomaly_classification[idx]])
225 anomalies_avgs.append(avgs[idx])
226 anomalies_colors.extend([0.0, 0.5, 1.0])
230 trace_samples = plgo.Scatter(
232 y=[y.avg for y in data_y],
239 name="{name}".format(name=name),
248 traces = [trace_samples, ]
251 trace_trend = plgo.Scatter(
262 name='{name}'.format(name=name),
263 text=["trend: {0:,}".format(int(avg)) for avg in avgs],
264 hoverinfo="text+name"
266 traces.append(trace_trend)
268 trace_anomalies = plgo.Scatter(
275 name="{name}-anomalies".format(name=name),
278 "symbol": "circle-open",
279 "color": anomalies_colors,
280 "colorscale": [[0.00, "red"],
293 "title": "Circles Marking Data Classification",
294 "titleside": 'right',
299 "tickvals": [0.167, 0.500, 0.833],
300 "ticktext": ["Regression", "Normal", "Progression"],
308 traces.append(trace_anomalies)
310 if anomaly_classification:
311 return traces, anomaly_classification[-1]
316 def _generate_all_charts(spec, input_data):
317 """Generate all charts specified in the specification file.
319 :param spec: Specification.
320 :param input_data: Full data set.
321 :type spec: Specification
322 :type input_data: InputData
325 def _generate_chart(_, data_q, graph):
326 """Generates the chart.
331 logging.info(" Generating the chart '{0}' ...".
332 format(graph.get("title", "")))
333 logs.append(("INFO", " Generating the chart '{0}' ...".
334 format(graph.get("title", ""))))
336 job_name = graph["data"].keys()[0]
342 logs.append(("INFO", " Creating the data set for the {0} '{1}'.".
343 format(graph.get("type", ""), graph.get("title", ""))))
344 data = input_data.filter_data(graph, continue_on_error=True)
346 logging.error("No data.")
350 for job, job_data in data.iteritems():
353 for index, bld in job_data.items():
354 for test_name, test in bld.items():
355 if chart_data.get(test_name, None) is None:
356 chart_data[test_name] = OrderedDict()
358 chart_data[test_name][int(index)] = \
359 test["result"]["receive-rate"]
360 except (KeyError, TypeError):
363 # Add items to the csv table:
364 for tst_name, tst_data in chart_data.items():
366 for bld in builds_dict[job_name]:
367 itm = tst_data.get(int(bld), '')
368 if not isinstance(itm, str):
370 tst_lst.append(str(itm))
371 csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
375 for test_name, test_data in chart_data.items():
377 logs.append(("WARNING", "No data for the test '{0}'".
380 message = "index: {index}, test: {test}".format(
381 index=index, test=test_name)
382 test_name = test_name.split('.')[-1]
384 trace, rslt = _generate_trending_traces(
387 build_info=build_info,
388 name='-'.join(test_name.split('-')[2:-1]),
391 message = "Out of colors: {}".format(message)
392 logs.append(("ERROR", message))
393 logging.error(message)
401 # Generate the chart:
402 graph["layout"]["title"] = \
403 "<b>{title}</b>".format(title=graph.get("title", ""))
404 name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
405 graph["output-file-name"],
406 spec.cpta["output-file-type"])
408 logs.append(("INFO", " Writing the file '{0}' ...".
410 plpl = plgo.Figure(data=traces, layout=graph["layout"])
412 ploff.plot(plpl, show_link=False, auto_open=False,
414 except plerr.PlotlyEmptyDataError:
415 logs.append(("WARNING", "No data for the plot. Skipped."))
418 "job_name": job_name,
419 "csv_table": csv_tbl,
426 for job in spec.input["builds"].keys():
427 if builds_dict.get(job, None) is None:
428 builds_dict[job] = list()
429 for build in spec.input["builds"][job]:
430 status = build["status"]
431 if status != "failed" and status != "not found" and \
433 builds_dict[job].append(str(build["build"]))
435 # Create "build ID": "date" dict:
437 tb_tbl = spec.environment.get("testbeds", None)
438 for job_name, job_data in builds_dict.items():
439 if build_info.get(job_name, None) is None:
440 build_info[job_name] = OrderedDict()
441 for build in job_data:
443 tb_ip = input_data.metadata(job_name, build).get("testbed", "")
445 testbed = tb_tbl.get(tb_ip, "")
446 build_info[job_name][build] = (
447 input_data.metadata(job_name, build).get("generated", ""),
448 input_data.metadata(job_name, build).get("version", ""),
452 work_queue = multiprocessing.JoinableQueue()
453 manager = multiprocessing.Manager()
454 data_queue = manager.Queue()
455 cpus = multiprocessing.cpu_count()
458 for cpu in range(cpus):
459 worker = Worker(work_queue,
464 workers.append(worker)
465 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
466 format(cpu, worker.pid))
468 for chart in spec.cpta["plots"]:
469 work_queue.put((chart, ))
472 anomaly_classifications = list()
476 for job_name in builds_dict.keys():
477 if csv_tables.get(job_name, None) is None:
478 csv_tables[job_name] = list()
479 header = "Build Number:," + ",".join(builds_dict[job_name]) + '\n'
480 csv_tables[job_name].append(header)
481 build_dates = [x[0] for x in build_info[job_name].values()]
482 header = "Build Date:," + ",".join(build_dates) + '\n'
483 csv_tables[job_name].append(header)
484 versions = [x[1] for x in build_info[job_name].values()]
485 header = "Version:," + ",".join(versions) + '\n'
486 csv_tables[job_name].append(header)
488 while not data_queue.empty():
489 result = data_queue.get()
491 anomaly_classifications.extend(result["results"])
492 csv_tables[result["job_name"]].extend(result["csv_table"])
494 for item in result["logs"]:
495 if item[0] == "INFO":
496 logging.info(item[1])
497 elif item[0] == "ERROR":
498 logging.error(item[1])
499 elif item[0] == "DEBUG":
500 logging.debug(item[1])
501 elif item[0] == "CRITICAL":
502 logging.critical(item[1])
503 elif item[0] == "WARNING":
504 logging.warning(item[1])
508 # Terminate all workers
509 for worker in workers:
514 for job_name, csv_table in csv_tables.items():
515 file_name = spec.cpta["output-file"] + "-" + job_name + "-trending"
516 with open("{0}.csv".format(file_name), 'w') as file_handler:
517 file_handler.writelines(csv_table)
520 with open("{0}.csv".format(file_name), 'rb') as csv_file:
521 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
523 for row in csv_content:
524 if txt_table is None:
525 txt_table = prettytable.PrettyTable(row)
528 for idx, item in enumerate(row):
530 row[idx] = str(round(float(item) / 1000000, 2))
534 txt_table.add_row(row)
535 except Exception as err:
536 logging.warning("Error occurred while generating TXT "
537 "table:\n{0}".format(err))
539 txt_table.align["Build Number:"] = "l"
540 with open("{0}.txt".format(file_name), "w") as txt_file:
541 txt_file.write(str(txt_table))
544 if anomaly_classifications:
546 for classification in anomaly_classifications:
547 if classification == "regression" or classification == "outlier":
553 logging.info("Partial results: {0}".format(anomaly_classifications))
554 logging.info("Result: {0}".format(result))