1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
17 import multiprocessing
22 import plotly.offline as ploff
23 import plotly.graph_objs as plgo
24 import plotly.exceptions as plerr
26 from collections import OrderedDict
27 from datetime import datetime
28 from copy import deepcopy
30 from utils import archive_input_data, execute_command, \
31 classify_anomalies, Worker
34 # Command to build the html format of the report
35 HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \
38 '-D version="{date}" ' \
42 # .css file for the html format of the report
43 THEME_OVERRIDES = """/* override table width restrictions */
45 max-width: 1200px !important;
47 .rst-content blockquote {
53 display: inline-block;
61 .wy-menu-vertical li.current a {
63 border-right: solid 1px #c9c9c9;
66 .wy-menu-vertical li.toctree-l2.current > a {
70 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
75 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
80 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
87 border-top-width: medium;
88 border-bottom-width: medium;
89 border-top-style: none;
90 border-bottom-style: none;
91 border-top-color: currentcolor;
92 border-bottom-color: currentcolor;
93 padding-left: 2em -4px;
97 COLORS = ["SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
98 "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
99 "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
100 "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
101 "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
102 "MediumSeaGreen", "SeaGreen", "LightSlateGrey",
103 "SkyBlue", "Olive", "Purple", "Coral", "Indigo", "Pink",
104 "Chocolate", "Brown", "Magenta", "Cyan", "Orange", "Black",
105 "Violet", "Blue", "Yellow", "BurlyWood", "CadetBlue", "Crimson",
106 "DarkBlue", "DarkCyan", "DarkGreen", "Green", "GoldenRod",
107 "LightGreen", "LightSeaGreen", "LightSkyBlue", "Maroon",
108 "MediumSeaGreen", "SeaGreen", "LightSlateGrey"
112 def generate_cpta(spec, data):
113 """Generate all formats and versions of the Continuous Performance Trending
116 :param spec: Specification read from the specification file.
117 :param data: Full data set.
118 :type spec: Specification
119 :type data: InputData
122 logging.info("Generating the Continuous Performance Trending and Analysis "
125 ret_code = _generate_all_charts(spec, data)
127 cmd = HTML_BUILDER.format(
128 date=datetime.utcnow().strftime('%Y-%m-%d %H:%M UTC'),
129 working_dir=spec.environment["paths"]["DIR[WORKING,SRC]"],
130 build_dir=spec.environment["paths"]["DIR[BUILD,HTML]"])
133 with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE]"], "w") as \
135 css_file.write(THEME_OVERRIDES)
137 with open(spec.environment["paths"]["DIR[CSS_PATCH_FILE2]"], "w") as \
139 css_file.write(THEME_OVERRIDES)
141 if spec.configuration.get("archive-inputs", True):
142 archive_input_data(spec)
144 logging.info("Done.")
149 def _generate_trending_traces(in_data, job_name, build_info,
150 show_trend_line=True, name="", color=""):
151 """Generate the trending traces:
153 - outliers, regress, progress
154 - average of normal samples (trending line)
156 :param in_data: Full data set.
157 :param job_name: The name of job which generated the data.
158 :param build_info: Information about the builds.
159 :param show_trend_line: Show moving median (trending plot).
160 :param name: Name of the plot
161 :param color: Name of the color for the plot.
162 :type in_data: OrderedDict
164 :type build_info: dict
165 :type show_trend_line: bool
168 :returns: Generated traces (list) and the evaluated result.
169 :rtype: tuple(traces, result)
172 data_x = list(in_data.keys())
173 data_y = list(in_data.values())
178 date = build_info[job_name][str(idx)][0]
179 hover_str = ("date: {date}<br>"
180 "value: {value:,}<br>"
181 "{sut}-ref: {build}<br>"
182 "csit-ref: mrr-{period}-build-{build_nr}<br>"
183 "testbed: {testbed}")
184 if "dpdk" in job_name:
185 hover_text.append(hover_str.format(
187 value=int(in_data[idx].avg),
189 build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
192 testbed=build_info[job_name][str(idx)][2]))
193 elif "vpp" in job_name:
194 hover_text.append(hover_str.format(
196 value=int(in_data[idx].avg),
198 build=build_info[job_name][str(idx)][1].rsplit('~', 1)[0],
201 testbed=build_info[job_name][str(idx)][2]))
203 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
204 int(date[9:11]), int(date[12:])))
206 data_pd = OrderedDict()
207 for key, value in zip(xaxis, data_y):
210 anomaly_classification, avgs = classify_anomalies(data_pd)
212 anomalies = OrderedDict()
213 anomalies_colors = list()
214 anomalies_avgs = list()
220 if anomaly_classification:
221 for idx, (key, value) in enumerate(data_pd.iteritems()):
222 if anomaly_classification[idx] in \
223 ("outlier", "regression", "progression"):
224 anomalies[key] = value
225 anomalies_colors.append(
226 anomaly_color[anomaly_classification[idx]])
227 anomalies_avgs.append(avgs[idx])
228 anomalies_colors.extend([0.0, 0.5, 1.0])
232 trace_samples = plgo.Scatter(
234 y=[y.avg for y in data_y],
241 name="{name}".format(name=name),
250 traces = [trace_samples, ]
253 trace_trend = plgo.Scatter(
264 name='{name}'.format(name=name),
265 text=["trend: {0:,}".format(int(avg)) for avg in avgs],
266 hoverinfo="text+name"
268 traces.append(trace_trend)
270 trace_anomalies = plgo.Scatter(
277 name="{name}-anomalies".format(name=name),
280 "symbol": "circle-open",
281 "color": anomalies_colors,
282 "colorscale": [[0.00, "red"],
295 "title": "Circles Marking Data Classification",
296 "titleside": 'right',
301 "tickvals": [0.167, 0.500, 0.833],
302 "ticktext": ["Regression", "Normal", "Progression"],
310 traces.append(trace_anomalies)
312 if anomaly_classification:
313 return traces, anomaly_classification[-1]
318 def _generate_all_charts(spec, input_data):
319 """Generate all charts specified in the specification file.
321 :param spec: Specification.
322 :param input_data: Full data set.
323 :type spec: Specification
324 :type input_data: InputData
327 def _generate_chart(_, data_q, graph):
328 """Generates the chart.
333 logging.info(" Generating the chart '{0}' ...".
334 format(graph.get("title", "")))
335 logs.append(("INFO", " Generating the chart '{0}' ...".
336 format(graph.get("title", ""))))
338 job_name = graph["data"].keys()[0]
344 logs.append(("INFO", " Creating the data set for the {0} '{1}'.".
345 format(graph.get("type", ""), graph.get("title", ""))))
346 data = input_data.filter_data(graph, continue_on_error=True)
348 logging.error("No data.")
353 for job, job_data in data.iteritems():
356 for index, bld in job_data.items():
357 for test_name, test in bld.items():
358 if chart_data.get(test_name, None) is None:
359 chart_data[test_name] = OrderedDict()
361 chart_data[test_name][int(index)] = \
362 test["result"]["receive-rate"]
363 chart_tags[test_name] = test.get("tags", None)
364 except (KeyError, TypeError):
367 # Add items to the csv table:
368 for tst_name, tst_data in chart_data.items():
370 for bld in builds_dict[job_name]:
371 itm = tst_data.get(int(bld), '')
372 if not isinstance(itm, str):
374 tst_lst.append(str(itm))
375 csv_tbl.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
380 groups = graph.get("groups", None)
387 for test_name, test_data in chart_data.items():
389 logs.append(("WARNING",
390 "No data for the test '{0}'".
393 if tag in chart_tags[test_name]:
394 message = "index: {index}, test: {test}".format(
395 index=index, test=test_name)
396 test_name = test_name.split('.')[-1]
398 trace, rslt = _generate_trending_traces(
401 build_info=build_info,
402 name='-'.join(test_name.split('-')[2:-1]),
405 message = "Out of colors: {}".format(message)
406 logs.append(("ERROR", message))
407 logging.error(message)
411 visible.extend([True for _ in range(len(trace))])
415 visibility.append(visible)
417 for test_name, test_data in chart_data.items():
419 logs.append(("WARNING", "No data for the test '{0}'".
422 message = "index: {index}, test: {test}".format(
423 index=index, test=test_name)
424 test_name = test_name.split('.')[-1]
426 trace, rslt = _generate_trending_traces(
429 build_info=build_info,
430 name='-'.join(test_name.split('-')[2:-1]),
433 message = "Out of colors: {}".format(message)
434 logs.append(("ERROR", message))
435 logging.error(message)
443 # Generate the chart:
445 layout = deepcopy(graph["layout"])
446 except KeyError as err:
447 logging.error("Finished with error: No layout defined")
448 logging.error(repr(err))
452 for i in range(len(visibility)):
454 for r in range(len(visibility)):
455 for _ in range(len(visibility[r])):
456 visible.append(i == r)
463 args=[{"visible": [True for _ in range(len(show[0]))]}, ]
465 for i in range(len(groups)):
467 label = graph["group-names"][i]
468 except (IndexError, KeyError):
469 label = "Group {num}".format(num=i + 1)
473 args=[{"visible": show[i]}, ]
476 layout['updatemenus'] = list([
489 name_file = "{0}-{1}{2}".format(spec.cpta["output-file"],
490 graph["output-file-name"],
491 spec.cpta["output-file-type"])
493 logs.append(("INFO", " Writing the file '{0}' ...".
495 plpl = plgo.Figure(data=traces, layout=layout)
497 ploff.plot(plpl, show_link=False, auto_open=False,
499 except plerr.PlotlyEmptyDataError:
500 logs.append(("WARNING", "No data for the plot. Skipped."))
503 "job_name": job_name,
504 "csv_table": csv_tbl,
511 for job in spec.input["builds"].keys():
512 if builds_dict.get(job, None) is None:
513 builds_dict[job] = list()
514 for build in spec.input["builds"][job]:
515 status = build["status"]
516 if status != "failed" and status != "not found" and \
518 builds_dict[job].append(str(build["build"]))
520 # Create "build ID": "date" dict:
522 tb_tbl = spec.environment.get("testbeds", None)
523 for job_name, job_data in builds_dict.items():
524 if build_info.get(job_name, None) is None:
525 build_info[job_name] = OrderedDict()
526 for build in job_data:
528 tb_ip = input_data.metadata(job_name, build).get("testbed", "")
530 testbed = tb_tbl.get(tb_ip, "")
531 build_info[job_name][build] = (
532 input_data.metadata(job_name, build).get("generated", ""),
533 input_data.metadata(job_name, build).get("version", ""),
537 work_queue = multiprocessing.JoinableQueue()
538 manager = multiprocessing.Manager()
539 data_queue = manager.Queue()
540 cpus = multiprocessing.cpu_count()
543 for cpu in range(cpus):
544 worker = Worker(work_queue,
549 workers.append(worker)
550 os.system("taskset -p -c {0} {1} > /dev/null 2>&1".
551 format(cpu, worker.pid))
553 for chart in spec.cpta["plots"]:
554 work_queue.put((chart, ))
557 anomaly_classifications = list()
561 for job_name in builds_dict.keys():
562 if csv_tables.get(job_name, None) is None:
563 csv_tables[job_name] = list()
564 header = "Build Number:," + ",".join(builds_dict[job_name]) + '\n'
565 csv_tables[job_name].append(header)
566 build_dates = [x[0] for x in build_info[job_name].values()]
567 header = "Build Date:," + ",".join(build_dates) + '\n'
568 csv_tables[job_name].append(header)
569 versions = [x[1] for x in build_info[job_name].values()]
570 header = "Version:," + ",".join(versions) + '\n'
571 csv_tables[job_name].append(header)
573 while not data_queue.empty():
574 result = data_queue.get()
576 anomaly_classifications.extend(result["results"])
577 csv_tables[result["job_name"]].extend(result["csv_table"])
579 for item in result["logs"]:
580 if item[0] == "INFO":
581 logging.info(item[1])
582 elif item[0] == "ERROR":
583 logging.error(item[1])
584 elif item[0] == "DEBUG":
585 logging.debug(item[1])
586 elif item[0] == "CRITICAL":
587 logging.critical(item[1])
588 elif item[0] == "WARNING":
589 logging.warning(item[1])
593 # Terminate all workers
594 for worker in workers:
599 for job_name, csv_table in csv_tables.items():
600 file_name = spec.cpta["output-file"] + "-" + job_name + "-trending"
601 with open("{0}.csv".format(file_name), 'w') as file_handler:
602 file_handler.writelines(csv_table)
605 with open("{0}.csv".format(file_name), 'rb') as csv_file:
606 csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
608 for row in csv_content:
609 if txt_table is None:
610 txt_table = prettytable.PrettyTable(row)
613 for idx, item in enumerate(row):
615 row[idx] = str(round(float(item) / 1000000, 2))
619 txt_table.add_row(row)
620 except Exception as err:
621 logging.warning("Error occurred while generating TXT "
622 "table:\n{0}".format(err))
624 txt_table.align["Build Number:"] = "l"
625 with open("{0}.txt".format(file_name), "w") as txt_file:
626 txt_file.write(str(txt_table))
629 if anomaly_classifications:
631 for classification in anomaly_classifications:
632 if classification == "regression" or classification == "outlier":
638 logging.info("Partial results: {0}".format(anomaly_classifications))
639 logging.info("Result: {0}".format(result))