a11d1da25fb0d0646fe3e30806a6a7a17a534570
[csit.git] / resources / tools / presentation / generator_cpta.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Generation of Continuous Performance Trending and Analysis.
15 """
16
17 import logging
18 import csv
19
20 from collections import OrderedDict
21 from datetime import datetime
22 from copy import deepcopy
23
24 import prettytable
25 import plotly.offline as ploff
26 import plotly.graph_objs as plgo
27 import plotly.exceptions as plerr
28
29 from pal_utils import archive_input_data, execute_command, classify_anomalies
30
31
32 # Command to build the html format of the report
33 HTML_BUILDER = u'sphinx-build -v -c conf_cpta -a ' \
34                u'-b html -E ' \
35                u'-t html ' \
36                u'-D version="{date}" ' \
37                u'{working_dir} ' \
38                u'{build_dir}/'
39
40 # .css file for the html format of the report
41 THEME_OVERRIDES = u"""/* override table width restrictions */
42 .wy-nav-content {
43     max-width: 1200px !important;
44 }
45 .rst-content blockquote {
46     margin-left: 0px;
47     line-height: 18px;
48     margin-bottom: 0px;
49 }
50 .wy-menu-vertical a {
51     display: inline-block;
52     line-height: 18px;
53     padding: 0 2em;
54     display: block;
55     position: relative;
56     font-size: 90%;
57     color: #d9d9d9
58 }
59 .wy-menu-vertical li.current a {
60     color: gray;
61     border-right: solid 1px #c9c9c9;
62     padding: 0 3em;
63 }
64 .wy-menu-vertical li.toctree-l2.current > a {
65     background: #c9c9c9;
66     padding: 0 3em;
67 }
68 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
69     display: block;
70     background: #c9c9c9;
71     padding: 0 4em;
72 }
73 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
74     display: block;
75     background: #bdbdbd;
76     padding: 0 5em;
77 }
78 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
79     color: #404040;
80     padding: 0 2em;
81     font-weight: bold;
82     position: relative;
83     background: #fcfcfc;
84     border: none;
85         border-top-width: medium;
86         border-bottom-width: medium;
87         border-top-style: none;
88         border-bottom-style: none;
89         border-top-color: currentcolor;
90         border-bottom-color: currentcolor;
91     padding-left: 2em -4px;
92 }
93 """
94
95 COLORS = [
96     u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
97     u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
98     u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
99     u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
100     u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
101     u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey",
102     u"SkyBlue", u"Olive", u"Purple", u"Coral", u"Indigo", u"Pink",
103     u"Chocolate", u"Brown", u"Magenta", u"Cyan", u"Orange", u"Black",
104     u"Violet", u"Blue", u"Yellow", u"BurlyWood", u"CadetBlue", u"Crimson",
105     u"DarkBlue", u"DarkCyan", u"DarkGreen", u"Green", u"GoldenRod",
106     u"LightGreen", u"LightSeaGreen", u"LightSkyBlue", u"Maroon",
107     u"MediumSeaGreen", u"SeaGreen", u"LightSlateGrey"
108 ]
109
110
111 def generate_cpta(spec, data):
112     """Generate all formats and versions of the Continuous Performance Trending
113     and Analysis.
114
115     :param spec: Specification read from the specification file.
116     :param data: Full data set.
117     :type spec: Specification
118     :type data: InputData
119     """
120
121     logging.info(u"Generating the Continuous Performance Trending and Analysis "
122                  u"...")
123
124     ret_code = _generate_all_charts(spec, data)
125
126     cmd = HTML_BUILDER.format(
127         date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
128         working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
129         build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
130     execute_command(cmd)
131
132     with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
133             css_file:
134         css_file.write(THEME_OVERRIDES)
135
136     with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
137             css_file:
138         css_file.write(THEME_OVERRIDES)
139
140     if spec.configuration.get(u"archive-inputs", True):
141         archive_input_data(spec)
142
143     logging.info(u"Done.")
144
145     return ret_code
146
147
148 def _generate_trending_traces(in_data, job_name, build_info,
149                               show_trend_line=True, name=u"", color=u""):
150     """Generate the trending traces:
151      - samples,
152      - outliers, regress, progress
153      - average of normal samples (trending line)
154
155     :param in_data: Full data set.
156     :param job_name: The name of job which generated the data.
157     :param build_info: Information about the builds.
158     :param show_trend_line: Show moving median (trending plot).
159     :param name: Name of the plot
160     :param color: Name of the color for the plot.
161     :type in_data: OrderedDict
162     :type job_name: str
163     :type build_info: dict
164     :type show_trend_line: bool
165     :type name: str
166     :type color: str
167     :returns: Generated traces (list) and the evaluated result.
168     :rtype: tuple(traces, result)
169     """
170
171     data_x = list(in_data.keys())
172     data_y_pps = list(in_data.values())
173     data_y_mpps = [float(item) / 1e6 for item in data_y_pps]
174
175     hover_text = list()
176     xaxis = list()
177     for index, key in enumerate(data_x):
178         str_key = str(key)
179         date = build_info[job_name][str_key][0]
180         hover_str = (u"date: {date}<br>"
181                      u"value [Mpps]: {value:.3f}<br>"
182                      u"{sut}-ref: {build}<br>"
183                      u"csit-ref: mrr-{period}-build-{build_nr}<br>"
184                      u"testbed: {testbed}")
185         if u"dpdk" in job_name:
186             hover_text.append(hover_str.format(
187                 date=date,
188                 value=data_y_mpps[index],
189                 sut=u"dpdk",
190                 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
191                 period=u"weekly",
192                 build_nr=str_key,
193                 testbed=build_info[job_name][str_key][2]))
194         elif u"vpp" in job_name:
195             hover_text.append(hover_str.format(
196                 date=date,
197                 value=data_y_mpps[index],
198                 sut=u"vpp",
199                 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
200                 period=u"daily",
201                 build_nr=str_key,
202                 testbed=build_info[job_name][str_key][2]))
203
204         xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
205                               int(date[9:11]), int(date[12:])))
206
207     data_pd = OrderedDict()
208     for key, value in zip(xaxis, data_y_pps):
209         data_pd[key] = value
210
211     anomaly_classification, avgs_pps = classify_anomalies(data_pd)
212     avgs_mpps = [avg_pps / 1e6 for avg_pps in avgs_pps]
213
214     anomalies = OrderedDict()
215     anomalies_colors = list()
216     anomalies_avgs = list()
217     anomaly_color = {
218         u"regression": 0.0,
219         u"normal": 0.5,
220         u"progression": 1.0
221     }
222     if anomaly_classification:
223         for index, (key, value) in enumerate(data_pd.items()):
224             if anomaly_classification[index] in \
225                     (u"outlier", u"regression", u"progression"):
226                 anomalies[key] = value / 1e6
227                 anomalies_colors.append(
228                     anomaly_color[anomaly_classification[index]])
229                 anomalies_avgs.append(avgs_mpps[index])
230         anomalies_colors.extend([0.0, 0.5, 1.0])
231
232     # Create traces
233
234     trace_samples = plgo.Scatter(
235         x=xaxis,
236         y=data_y_mpps,
237         mode=u"markers",
238         line={
239             u"width": 1
240         },
241         showlegend=True,
242         legendgroup=name,
243         name=f"{name}",
244         marker={
245             u"size": 5,
246             u"color": color,
247             u"symbol": u"circle",
248         },
249         text=hover_text,
250         hoverinfo=u"text"
251     )
252     traces = [trace_samples, ]
253
254     if show_trend_line:
255         trace_trend = plgo.Scatter(
256             x=xaxis,
257             y=avgs_mpps,
258             mode=u"lines",
259             line={
260                 u"shape": u"linear",
261                 u"width": 1,
262                 u"color": color,
263             },
264             showlegend=False,
265             legendgroup=name,
266             name=f"{name}",
267             text=[f"trend [Mpps]: {avg:.3f}" for avg in avgs_mpps],
268             hoverinfo=u"text+name"
269         )
270         traces.append(trace_trend)
271
272     trace_anomalies = plgo.Scatter(
273         x=list(anomalies.keys()),
274         y=anomalies_avgs,
275         mode=u"markers",
276         hoverinfo=u"none",
277         showlegend=False,
278         legendgroup=name,
279         name=f"{name}-anomalies",
280         marker={
281             u"size": 15,
282             u"symbol": u"circle-open",
283             u"color": anomalies_colors,
284             u"colorscale": [
285                 [0.00, u"red"],
286                 [0.33, u"red"],
287                 [0.33, u"white"],
288                 [0.66, u"white"],
289                 [0.66, u"green"],
290                 [1.00, u"green"]
291             ],
292             u"showscale": True,
293             u"line": {
294                 u"width": 2
295             },
296             u"colorbar": {
297                 u"y": 0.5,
298                 u"len": 0.8,
299                 u"title": u"Circles Marking Data Classification",
300                 u"titleside": u"right",
301                 u"titlefont": {
302                     u"size": 14
303                 },
304                 u"tickmode": u"array",
305                 u"tickvals": [0.167, 0.500, 0.833],
306                 u"ticktext": [u"Regression", u"Normal", u"Progression"],
307                 u"ticks": u"",
308                 u"ticklen": 0,
309                 u"tickangle": -90,
310                 u"thickness": 10
311             }
312         }
313     )
314     traces.append(trace_anomalies)
315
316     if anomaly_classification:
317         return traces, anomaly_classification[-1]
318
319     return traces, None
320
321
322 def _generate_all_charts(spec, input_data):
323     """Generate all charts specified in the specification file.
324
325     :param spec: Specification.
326     :param input_data: Full data set.
327     :type spec: Specification
328     :type input_data: InputData
329     """
330
331     def _generate_chart(graph):
332         """Generates the chart.
333
334         :param graph: The graph to be generated
335         :type graph: dict
336         :returns: Dictionary with the job name, csv table with results and
337             list of tests classification results.
338         :rtype: dict
339         """
340
341         logs = list()
342
343         logs.append(
344             (u"INFO", f"  Generating the chart {graph.get(u'title', u'')} ...")
345         )
346
347         job_name = list(graph[u"data"].keys())[0]
348
349         csv_tbl = list()
350         res = dict()
351
352         # Transform the data
353         logs.append(
354             (u"INFO",
355              f"    Creating the data set for the {graph.get(u'type', u'')} "
356              f"{graph.get(u'title', u'')}."
357             )
358         )
359
360         if graph.get(u"include", None):
361             data = input_data.filter_tests_by_name(
362                 graph, continue_on_error=True
363             )
364         else:
365             data = input_data.filter_data(graph, continue_on_error=True)
366
367         if data is None or data.empty:
368             logging.error(u"No data.")
369             return dict()
370
371         chart_data = dict()
372         chart_tags = dict()
373         for job, job_data in data.items():
374             if job != job_name:
375                 continue
376             for index, bld in job_data.items():
377                 for test_name, test in bld.items():
378                     if chart_data.get(test_name, None) is None:
379                         chart_data[test_name] = OrderedDict()
380                     try:
381                         chart_data[test_name][int(index)] = \
382                             test[u"result"][u"receive-rate"]
383                         chart_tags[test_name] = test.get(u"tags", None)
384                     except (KeyError, TypeError):
385                         pass
386
387         # Add items to the csv table:
388         for tst_name, tst_data in chart_data.items():
389             tst_lst = list()
390             for bld in builds_dict[job_name]:
391                 itm = tst_data.get(int(bld), u'')
392                 # CSIT-1180: Itm will be list, compute stats.
393                 tst_lst.append(str(itm))
394             csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
395
396         # Generate traces:
397         traces = list()
398         index = 0
399         groups = graph.get(u"groups", None)
400         visibility = list()
401
402         if groups:
403             for group in groups:
404                 visible = list()
405                 for tag in group:
406                     for tst_name, test_data in chart_data.items():
407                         if not test_data:
408                             logs.append(
409                                 (u"WARNING", f"No data for the test {tst_name}")
410                             )
411                             continue
412                         if tag not in chart_tags[tst_name]:
413                             continue
414                         message = f"index: {index}, test: {tst_name}"
415                         try:
416                             trace, rslt = _generate_trending_traces(
417                                 test_data,
418                                 job_name=job_name,
419                                 build_info=build_info,
420                                 name=u'-'.join(tst_name.split(u'.')[-1].
421                                                split(u'-')[2:-1]),
422                                 color=COLORS[index])
423                         except IndexError:
424                             logs.append(
425                                 (u"ERROR", f"Out of colors: {message}")
426                             )
427                             logging.error(f"Out of colors: {message}")
428                             index += 1
429                             continue
430                         traces.extend(trace)
431                         visible.extend([True for _ in range(len(trace))])
432                         res[tst_name] = rslt
433                         index += 1
434                         break
435                 visibility.append(visible)
436         else:
437             for tst_name, test_data in chart_data.items():
438                 if not test_data:
439                     logs.append(
440                         (u"WARNING", f"No data for the test {tst_name}")
441                     )
442                     continue
443                 message = f"index: {index}, test: {tst_name}"
444                 try:
445                     trace, rslt = _generate_trending_traces(
446                         test_data,
447                         job_name=job_name,
448                         build_info=build_info,
449                         name=u'-'.join(
450                             tst_name.split(u'.')[-1].split(u'-')[2:-1]),
451                         color=COLORS[index])
452                 except IndexError:
453                     logs.append((u"ERROR", f"Out of colors: {message}"))
454                     logging.error(f"Out of colors: {message}")
455                     index += 1
456                     continue
457                 traces.extend(trace)
458                 res[tst_name] = rslt
459                 index += 1
460
461         if traces:
462             # Generate the chart:
463             try:
464                 layout = deepcopy(graph[u"layout"])
465             except KeyError as err:
466                 logging.error(u"Finished with error: No layout defined")
467                 logging.error(repr(err))
468                 return dict()
469             if groups:
470                 show = list()
471                 for i in range(len(visibility)):
472                     visible = list()
473                     for vis_idx, _ in enumerate(visibility):
474                         for _ in range(len(visibility[vis_idx])):
475                             visible.append(i == vis_idx)
476                     show.append(visible)
477
478                 buttons = list()
479                 buttons.append(dict(
480                     label=u"All",
481                     method=u"update",
482                     args=[{u"visible": [True for _ in range(len(show[0]))]}, ]
483                 ))
484                 for i in range(len(groups)):
485                     try:
486                         label = graph[u"group-names"][i]
487                     except (IndexError, KeyError):
488                         label = f"Group {i + 1}"
489                     buttons.append(dict(
490                         label=label,
491                         method=u"update",
492                         args=[{u"visible": show[i]}, ]
493                     ))
494
495                 layout[u"updatemenus"] = list([
496                     dict(
497                         active=0,
498                         type=u"dropdown",
499                         direction=u"down",
500                         xanchor=u"left",
501                         yanchor=u"bottom",
502                         x=-0.12,
503                         y=1.0,
504                         buttons=buttons
505                     )
506                 ])
507
508             name_file = (
509                 f"{spec.cpta[u'output-file']}/{graph[u'output-file-name']}"
510                 f"{spec.cpta[u'output-file-type']}")
511
512             logs.append((u"INFO", f"    Writing the file {name_file} ..."))
513             plpl = plgo.Figure(data=traces, layout=layout)
514             try:
515                 ploff.plot(plpl, show_link=False, auto_open=False,
516                            filename=name_file)
517             except plerr.PlotlyEmptyDataError:
518                 logs.append((u"WARNING", u"No data for the plot. Skipped."))
519
520         for level, line in logs:
521             if level == u"INFO":
522                 logging.info(line)
523             elif level == u"ERROR":
524                 logging.error(line)
525             elif level == u"DEBUG":
526                 logging.debug(line)
527             elif level == u"CRITICAL":
528                 logging.critical(line)
529             elif level == u"WARNING":
530                 logging.warning(line)
531
532         return {u"job_name": job_name, u"csv_table": csv_tbl, u"results": res}
533
534     builds_dict = dict()
535     for job in spec.input[u"builds"].keys():
536         if builds_dict.get(job, None) is None:
537             builds_dict[job] = list()
538         for build in spec.input[u"builds"][job]:
539             status = build[u"status"]
540             if status not in (u"failed", u"not found", u"removed"):
541                 builds_dict[job].append(str(build[u"build"]))
542
543     # Create "build ID": "date" dict:
544     build_info = dict()
545     tb_tbl = spec.environment.get(u"testbeds", None)
546     for job_name, job_data in builds_dict.items():
547         if build_info.get(job_name, None) is None:
548             build_info[job_name] = OrderedDict()
549         for build in job_data:
550             testbed = u""
551             tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
552             if tb_ip and tb_tbl:
553                 testbed = tb_tbl.get(tb_ip, u"")
554             build_info[job_name][build] = (
555                 input_data.metadata(job_name, build).get(u"generated", u""),
556                 input_data.metadata(job_name, build).get(u"version", u""),
557                 testbed
558             )
559
560     anomaly_classifications = dict()
561
562     # Create the table header:
563     csv_tables = dict()
564     for job_name in builds_dict:
565         if csv_tables.get(job_name, None) is None:
566             csv_tables[job_name] = list()
567         header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
568         csv_tables[job_name].append(header)
569         build_dates = [x[0] for x in build_info[job_name].values()]
570         header = f"Build Date:,{u','.join(build_dates)}\n"
571         csv_tables[job_name].append(header)
572         versions = [x[1] for x in build_info[job_name].values()]
573         header = f"Version:,{u','.join(versions)}\n"
574         csv_tables[job_name].append(header)
575
576     for chart in spec.cpta[u"plots"]:
577         result = _generate_chart(chart)
578         if not result:
579             continue
580
581         csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
582
583         if anomaly_classifications.get(result[u"job_name"], None) is None:
584             anomaly_classifications[result[u"job_name"]] = dict()
585         anomaly_classifications[result[u"job_name"]].update(result[u"results"])
586
587     # Write the tables:
588     for job_name, csv_table in csv_tables.items():
589         file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
590         with open(f"{file_name}.csv", u"w") as file_handler:
591             file_handler.writelines(csv_table)
592
593         txt_table = None
594         with open(f"{file_name}.csv", u"rt") as csv_file:
595             csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
596             line_nr = 0
597             for row in csv_content:
598                 if txt_table is None:
599                     txt_table = prettytable.PrettyTable(row)
600                 else:
601                     if line_nr > 1:
602                         for idx, item in enumerate(row):
603                             try:
604                                 row[idx] = str(round(float(item) / 1000000, 2))
605                             except ValueError:
606                                 pass
607                     try:
608                         txt_table.add_row(row)
609                     # PrettyTable raises Exception
610                     except Exception as err:
611                         logging.warning(
612                             f"Error occurred while generating TXT table:\n{err}"
613                         )
614                 line_nr += 1
615             txt_table.align[u"Build Number:"] = u"l"
616         with open(f"{file_name}.txt", u"w") as txt_file:
617             txt_file.write(str(txt_table))
618
619     # Evaluate result:
620     if anomaly_classifications:
621         result = u"PASS"
622         for job_name, job_data in anomaly_classifications.items():
623             file_name = \
624                 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
625             with open(file_name, u'w') as txt_file:
626                 for test_name, classification in job_data.items():
627                     if classification == u"regression":
628                         txt_file.write(test_name + u'\n')
629                     if classification in (u"regression", u"outlier"):
630                         result = u"FAIL"
631             file_name = \
632                 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
633             with open(file_name, u'w') as txt_file:
634                 for test_name, classification in job_data.items():
635                     if classification == u"progression":
636                         txt_file.write(test_name + u'\n')
637     else:
638         result = u"FAIL"
639
640     logging.info(f"Partial results: {anomaly_classifications}")
641     logging.info(f"Result: {result}")
642
643     return result

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.