1 # Copyright (c) 2018 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
17 import multiprocessing
22 import plotly.offline as ploff
23 import plotly.graph_objs as plgo
24 import plotly.exceptions as plerr
26 from collections import OrderedDict
27 from datetime import datetime
29 from utils import archive_input_data, execute_command, \
30 classify_anomalies, Worker
33 # Command to build the html format of the report
34 HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \
37 '-D version="{date}" ' \
41 # .css file for the html format of the report
42 THEME_OVERRIDES = """/* override table width restrictions */
44 max-width: 1200px !important;
48 COLORS = ["SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
49 "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
50 "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
51 "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
52 "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
53 "MediumSeaGreen", "SeaGreen", "LightSlateGrey",
54 "SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
55 "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
56 "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
57 "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
58 "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
59 "MediumSeaGreen", "SeaGreen", "LightSlateGrey"
63 def generate_cpta(spec, data):
64 """Generate all formats and versions of the Continuous Performance Trending
67 :param spec: Specification read from the specification file.
68 :param data: Full data set.
69 :type spec: Specification
73 logging.info("Generating the Continuous Performance Trending and Analysis "
76 ret_code = _generate_all_charts(spec, data)
78 cmd = HTML_BUILDER.format(
79 date=datetime.utcnow().strftime('%m/%d/%Y %H:%M UTC'),
80 working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
81 build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
84 with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE]"], "w") as \
86 css_file.write(THEME_OVERRIDES)
88 with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE2]"], "w") as \
90 css_file.write(THEME_OVERRIDES)
92 archive_input_data(spec)
99 def _generate_trending_traces(in_data, job_name, build_info,
100 show_trend_line=True, name="", color=""):
101 """Generate the trending traces:
103 - outliers, regress, progress
104 - average of normal samples (trending line)
106 :param in_data: Full data set.
107 :param job_name: The name of job which generated the data.
108 :param build_info: Information about the builds.
109 :param show_trend_line: Show moving median (trending plot).
110 :param name: Name of the plot
111 :param color: Name of the color for the plot.
112 :type in_data: OrderedDict
114 :type build_info: dict
115 :type show_trend_line: bool
118 :returns: Generated traces (list) and the evaluated result.
119 :rtype: tuple(traces, result)
122 data_x = list(in_data.keys())
123 data_y = list(in_data.values())
128 date = build_info[job_name][str(idx)][0]
129 hover_str = ("date: {0}<br>"
132 "csit-ref: mrr-{4}-build-{5}")
133 if "dpdk" in job_name:
134 hover_text.append(hover_str.format(
136 int(in_data[idx].avg),
138 build_info[job_name][str(idx)][1].
142 elif "vpp" in job_name:
143 hover_text.append(hover_str.format(
145 int(in_data[idx].avg),
147 build_info[job_name][str(idx)][1].
152 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
153 int(date[9:11]), int(date[12:])))
155 data_pd = OrderedDict()
156 for key, value in zip(xaxis, data_y):
159 anomaly_classification, avgs = classify_anomalies(data_pd)
161 anomalies = OrderedDict()
162 anomalies_colors = list()
163 anomalies_avgs = list()
169 if anomaly_classification:
170 for idx, (key, value) in enumerate(data_pd.iteritems()):
171 if anomaly_classification[idx] in \
172 ("outlier", "regression", "progression"):
173 anomalies[key] = value
174 anomalies_colors.append(
175 anomaly_color[anomaly_classification[idx]])
176 anomalies_avgs.append(avgs[idx])
177 anomalies_colors.extend([0.0, 0.5, 1.0])
181 trace_samples = plgo.Scatter(
183 y=[y.avg for y in data_y],
190 name="{name}".format(name=name),
199 traces = [trace_samples, ]
202 trace_trend = plgo.Scatter(
213 name='{name}'.format(name=name),
214 text=["trend: {0:,}".format(int(avg)) for avg in avgs],
215 hoverinfo="text+name"
217 traces.append(trace_trend)
219 trace_anomalies = plgo.Scatter(
226 name="{name}-anomalies".format(name=name),
229 "symbol": "circle-open",
230 "color": anomalies_colors,
231 "colorscale": [[0.00, "red"],
244 "title": "Circles Marking Data Classification",
245 "titleside": 'right',
250 "tickvals": [0.167, 0.500, 0.833],
251 "ticktext": ["Regression", "Normal", "Progression"],
259 traces.append(trace_anomalies)
261 if anomaly_classification:
262 return traces, anomaly_classification[-1]
267 def _generate_all_charts(spec, input_data):
268 """Generate all charts specified in the specification file.
270 :param spec: Specification.
271 :param input_data: Full data set.
272 :type spec: Specification
273 :type input_data: InputData
276 def _generate_chart(_, data_q, graph):
277 """Generates the chart.
282 logging.info(" Generating the chart '{0}' ...".
283 format(graph.get("title", "")))
284 logs.append(("INFO", " Generating the chart '{0}' ...".
285 format(graph.get("title", ""))))
287 job_name = graph["data"].keys()[0]
293 logs.append(("INFO", " Creating the data set for the {0} '{1}'.".
294 format(graph.get("type", ""), graph.get("title", ""))))
295 data = input_data.filter_data(graph, continue_on_error=True)
297 logging.error("No data.")
301 for job, job_data in data.iteritems():
304 for index, bld in job_data.items():
305 for test_name, test in bld.items():
306 if chart_data.get(test_name, None) is None:
307 chart_data[test_name] = OrderedDict()
309 chart_data[test_name][int(index)] = \
310 test["result"]["receive-rate"]
311 except (KeyError, TypeError):
314 # Add items to the csv table:
315 for tst_name, tst_data in chart_data.items():
317 for bld in builds_dict[job_name]:
318 itm = tst_data.get(int(bld), '')
319 if not isinstance(itm, str):
321 tst_lst.append(str(itm))
322 csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
326 for test_name, test_data in chart_data.items():
328 logs.append(("WARNING", "No data for the test '{0}'".
331 message = "index: {index}, test: {test}".format(
332 index=index, test=test_name)
333 test_name = test_name.split('.')[-1]
335 trace, rslt = _generate_trending_traces(
338 build_info=build_info,
339 name='-'.join(test_name.split('-')[2:-1]),
342 message = "Out of colors: {}".format(message)
343 logs.append(("ERROR", message))
344 logging.error(message)
352 # Generate the chart:
353 graph["layout"]["xaxis"]["title"] = \
354 graph["layout"]["xaxis"]["title"].format(job=job_name)
355 name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
356 graph["output-file-name"],
357 spec.cpta["output-file-type"])
359 logs.append(("INFO", " Writing the file '{0}' ...".
361 plpl = plgo.Figure(data=traces, layout=graph["layout"])
363 ploff.plot(plpl, show_link=False, auto_open=False,
365 except plerr.PlotlyEmptyDataError:
366 logs.append(("WARNING", "No data for the plot. Skipped."))
369 "job_name": job_name,
370 "csv_table": csv_tbl,
377 for job in spec.input["builds"].keys():
378 if builds_dict.get(job, None) is None:
379 builds_dict[job] = list()
380 for build in spec.input["builds"][job]:
381 status = build["status"]
382 if status != "failed" and status != "not found":
383 builds_dict[job].append(str(build["build"]))
385 # Create "build ID": "date" dict:
387 for job_name, job_data in builds_dict.items():
388 if build_info.get(job_name, None) is None:
389 build_info[job_name] = OrderedDict()
390 for build in job_data:
391 build_info[job_name][build] = (
392 input_data.metadata(job_name, build).get("generated", ""),
393 input_data.metadata(job_name, build).get("version", "")
396 work_queue = multiprocessing.JoinableQueue()
397 manager = multiprocessing.Manager()
398 data_queue = manager.Queue()
399 cpus = multiprocessing.cpu_count()
402 for cpu in range(cpus):
403 worker = Worker(work_queue,
408 workers.append(worker)
409 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
410 format(cpu, worker.pid))
412 for chart in spec.cpta["plots"]:
413 work_queue.put((chart, ))
416 anomaly_classifications = list()
420 for job_name in builds_dict.keys():
421 if csv_tables.get(job_name, None) is None:
422 csv_tables[job_name] = list()
423 header = "Build Number:," + ",".join(builds_dict[job_name]) + '\n'
424 csv_tables[job_name].append(header)
425 build_dates = [x[0] for x in build_info[job_name].values()]
426 header = "Build Date:," + ",".join(build_dates) + '\n'
427 csv_tables[job_name].append(header)
428 versions = [x[1] for x in build_info[job_name].values()]
429 header = "Version:," + ",".join(versions) + '\n'
430 csv_tables[job_name].append(header)
432 while not data_queue.empty():
433 result = data_queue.get()
435 anomaly_classifications.extend(result["results"])
436 csv_tables[result["job_name"]].extend(result["csv_table"])
438 for item in result["logs"]:
439 if item[0] == "INFO":
440 logging.info(item[1])
441 elif item[0] == "ERROR":
442 logging.error(item[1])
443 elif item[0] == "DEBUG":
444 logging.debug(item[1])
445 elif item[0] == "CRITICAL":
446 logging.critical(item[1])
447 elif item[0] == "WARNING":
448 logging.warning(item[1])
452 # Terminate all workers
453 for worker in workers:
458 for job_name, csv_table in csv_tables.items():
459 file_name = spec.cpta["output-file"] + "-" + job_name + "-trending"
460 with open("{0}.csv".format(file_name), 'w') as file_handler:
461 file_handler.writelines(csv_table)
464 with open("{0}.csv".format(file_name), 'rb') as csv_file:
465 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
467 for row in csv_content:
468 if txt_table is None:
469 txt_table = prettytable.PrettyTable(row)
472 for idx, item in enumerate(row):
474 row[idx] = str(round(float(item) / 1000000, 2))
478 txt_table.add_row(row)
479 except Exception as err:
480 logging.warning("Error occurred while generating TXT "
481 "table:\n{0}".format(err))
483 txt_table.align["Build Number:"] = "l"
484 with open("{0}.txt".format(file_name), "w") as txt_file:
485 txt_file.write(str(txt_table))
488 if anomaly_classifications:
490 for classification in anomaly_classifications:
491 if classification == "regression" or classification == "outlier":
497 logging.info("Partial results: {0}".format(anomaly_classifications))
498 logging.info("Result: {0}".format(result))