Trending: Add Latency trending
[csit.git] / resources / tools / presentation / generator_cpta.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Generation of Continuous Performance Trending and Analysis.
15 """
16
17 import re
18 import logging
19 import csv
20
21 from collections import OrderedDict
22 from datetime import datetime
23 from copy import deepcopy
24
25 import prettytable
26 import plotly.offline as ploff
27 import plotly.graph_objs as plgo
28 import plotly.exceptions as plerr
29
30 from pal_utils import archive_input_data, execute_command, classify_anomalies
31
32
33 # Command to build the html format of the report
34 HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \
35                u'-b html -E ' \
36                u'-t html ' \
37                u'-D version="{date}" ' \
38                u'{working_dir} ' \
39                u'{build_dir}/'
40
41 # .css file for the html format of the report
42 THEME_OVERRIDES = u"""/* override table width restrictions */
43 .wy-nav-content {
44     max-width: 1200px !important;
45 }
46 .rst-content blockquote {
47     margin-left: 0px;
48     line-height: 18px;
49     margin-bottom: 0px;
50 }
51 .wy-menu-vertical a {
52     display: inline-block;
53     line-height: 18px;
54     padding: 0 2em;
55     display: block;
56     position: relative;
57     font-size: 90%;
58     color: #d9d9d9
59 }
60 .wy-menu-vertical li.current a {
61     color: gray;
62     border-right: solid 1px #c9c9c9;
63     padding: 0 3em;
64 }
65 .wy-menu-vertical li.toctree-l2.current > a {
66     background: #c9c9c9;
67     padding: 0 3em;
68 }
69 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
70     display: block;
71     background: #c9c9c9;
72     padding: 0 4em;
73 }
74 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
75     display: block;
76     background: #bdbdbd;
77     padding: 0 5em;
78 }
79 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
80     color: #404040;
81     padding: 0 2em;
82     font-weight: bold;
83     position: relative;
84     background: #fcfcfc;
85     border: none;
86         border-top-width: medium;
87         border-bottom-width: medium;
88         border-top-style: none;
89         border-bottom-style: none;
90         border-top-color: currentcolor;
91         border-bottom-color: currentcolor;
92     padding-left: 2em -4px;
93 }
94 """
95
96 COLORS = (
97     u"#1A1110",
98     u"#DA2647",
99     u"#214FC6",
100     u"#01786F",
101     u"#BD8260",
102     u"#FFD12A",
103     u"#A6E7FF",
104     u"#738276",
105     u"#C95A49",
106     u"#FC5A8D",
107     u"#CEC8EF",
108     u"#391285",
109     u"#6F2DA8",
110     u"#FF878D",
111     u"#45A27D",
112     u"#FFD0B9",
113     u"#FD5240",
114     u"#DB91EF",
115     u"#44D7A8",
116     u"#4F86F7",
117     u"#84DE02",
118     u"#FFCFF1",
119     u"#614051"
120 )
121
122
123 def generate_cpta(spec, data):
124     """Generate all formats and versions of the Continuous Performance Trending
125     and Analysis.
126
127     :param spec: Specification read from the specification file.
128     :param data: Full data set.
129     :type spec: Specification
130     :type data: InputData
131     """
132
133     logging.info(u"Generating the Continuous Performance Trending and Analysis "
134                  u"...")
135
136     ret_code = _generate_all_charts(spec, data)
137
138     cmd = HTML_BUILDER.format(
139         date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
140         working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
141         build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
142     execute_command(cmd)
143
144     with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
145             css_file:
146         css_file.write(THEME_OVERRIDES)
147
148     with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
149             css_file:
150         css_file.write(THEME_OVERRIDES)
151
152     if spec.environment.get(u"archive-inputs", False):
153         archive_input_data(spec)
154
155     logging.info(u"Done.")
156
157     return ret_code
158
159
160 def _generate_trending_traces(in_data, job_name, build_info,
161                               name=u"", color=u"", incl_tests=u"mrr"):
162     """Generate the trending traces:
163      - samples,
164      - outliers, regress, progress
165      - average of normal samples (trending line)
166
167     :param in_data: Full data set.
168     :param job_name: The name of job which generated the data.
169     :param build_info: Information about the builds.
170     :param name: Name of the plot
171     :param color: Name of the color for the plot.
172     :param incl_tests: Included tests, accepted values: mrr, ndr, pdr
173     :type in_data: OrderedDict
174     :type job_name: str
175     :type build_info: dict
176     :type name: str
177     :type color: str
178     :type incl_tests: str
179     :returns: Generated traces (list) and the evaluated result.
180     :rtype: tuple(traces, result)
181     """
182
183     if incl_tests not in (u"mrr", u"ndr", u"pdr", u"pdr-lat"):
184         return list(), None
185
186     data_x = list(in_data.keys())
187     data_y_pps = list()
188     data_y_mpps = list()
189     data_y_stdev = list()
190     if incl_tests == u"pdr-lat":
191         for item in in_data.values():
192             data_y_pps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
193             data_y_stdev.append(float(u"nan"))
194             data_y_mpps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
195         multi = 1.0
196     else:
197         for item in in_data.values():
198             data_y_pps.append(float(item[u"receive-rate"]))
199             data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
200             data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
201         multi = 1e6
202     hover_text = list()
203     xaxis = list()
204     for index, key in enumerate(data_x):
205         str_key = str(key)
206         date = build_info[job_name][str_key][0]
207         hover_str = (u"date: {date}<br>"
208                      u"{property} [Mpps]: <val><br>"
209                      u"<stdev>"
210                      u"{sut}-ref: {build}<br>"
211                      u"csit-ref: {test}-{period}-build-{build_nr}<br>"
212                      u"testbed: {testbed}")
213         if incl_tests == u"mrr":
214             hover_str = hover_str.replace(
215                 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
216             )
217         else:
218             hover_str = hover_str.replace(u"<stdev>", u"")
219         if incl_tests == u"pdr-lat":
220             hover_str = hover_str.replace(
221                 u"throughput [Mpps]", u"latency [s]"
222             )
223             hover_str = hover_str.replace(u"<val>", u"{value:.1e}")
224         else:
225             hover_str = hover_str.replace(u"<val>", u"{value:.3f}")
226         if u"-cps" in name:
227             hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]").\
228                 replace(u"throughput", u"connection rate")
229         if u"dpdk" in job_name:
230             hover_text.append(hover_str.format(
231                 date=date,
232                 property=u"average" if incl_tests == u"mrr" else u"throughput",
233                 value=data_y_mpps[index],
234                 sut=u"dpdk",
235                 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
236                 test=incl_tests,
237                 period=u"weekly",
238                 build_nr=str_key,
239                 testbed=build_info[job_name][str_key][2]))
240         elif u"vpp" in job_name:
241             hover_text.append(hover_str.format(
242                 date=date,
243                 property=u"average" if incl_tests == u"mrr" else u"throughput",
244                 value=data_y_mpps[index],
245                 sut=u"vpp",
246                 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
247                 test=incl_tests,
248                 period=u"daily" if incl_tests == u"mrr" else u"weekly",
249                 build_nr=str_key,
250                 testbed=build_info[job_name][str_key][2]))
251
252         xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
253                               int(date[9:11]), int(date[12:])))
254
255     data_pd = OrderedDict()
256     for key, value in zip(xaxis, data_y_pps):
257         data_pd[key] = value
258
259     try:
260         anomaly_classification, avgs_pps, stdevs_pps = \
261             classify_anomalies(data_pd)
262     except ValueError as err:
263         logging.info(f"{err} Skipping")
264         return list(), None
265     avgs_mpps = [avg_pps / multi for avg_pps in avgs_pps]
266     stdevs_mpps = [stdev_pps / multi for stdev_pps in stdevs_pps]
267
268     anomalies = OrderedDict()
269     anomalies_colors = list()
270     anomalies_avgs = list()
271     anomaly_color = {
272         u"regression": 0.0,
273         u"normal": 0.5,
274         u"progression": 1.0
275     }
276     if anomaly_classification:
277         for index, (key, value) in enumerate(data_pd.items()):
278             if anomaly_classification[index] in (u"regression", u"progression"):
279                 anomalies[key] = value / multi
280                 anomalies_colors.append(
281                     anomaly_color[anomaly_classification[index]])
282                 anomalies_avgs.append(avgs_mpps[index])
283         anomalies_colors.extend([0.0, 0.5, 1.0])
284
285     # Create traces
286
287     trace_samples = plgo.Scatter(
288         x=xaxis,
289         y=data_y_mpps,
290         mode=u"markers",
291         line={
292             u"width": 1
293         },
294         showlegend=True,
295         legendgroup=name,
296         name=f"{name}",
297         marker={
298             u"size": 5,
299             u"color": color,
300             u"symbol": u"circle",
301         },
302         text=hover_text,
303         hoverinfo=u"text+name"
304     )
305     traces = [trace_samples, ]
306
307     trend_hover_text = list()
308     for idx in range(len(data_x)):
309         if incl_tests == u"pdr-lat":
310             trend_hover_str = (
311                 f"trend [s]: {avgs_mpps[idx]:.1e}<br>"
312             )
313         else:
314             trend_hover_str = (
315                 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
316                 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
317             )
318         trend_hover_text.append(trend_hover_str)
319
320     trace_trend = plgo.Scatter(
321         x=xaxis,
322         y=avgs_mpps,
323         mode=u"lines",
324         line={
325             u"shape": u"linear",
326             u"width": 1,
327             u"color": color,
328         },
329         showlegend=False,
330         legendgroup=name,
331         name=f"{name}",
332         text=trend_hover_text,
333         hoverinfo=u"text+name"
334     )
335     traces.append(trace_trend)
336
337     if incl_tests == u"pdr-lat":
338         colorscale = [
339             [0.00, u"green"],
340             [0.33, u"green"],
341             [0.33, u"white"],
342             [0.66, u"white"],
343             [0.66, u"red"],
344             [1.00, u"red"]
345         ]
346         ticktext = [u"Progression", u"Normal", u"Regression"]
347     else:
348         colorscale = [
349             [0.00, u"red"],
350             [0.33, u"red"],
351             [0.33, u"white"],
352             [0.66, u"white"],
353             [0.66, u"green"],
354             [1.00, u"green"]
355         ]
356         ticktext = [u"Regression", u"Normal", u"Progression"]
357     trace_anomalies = plgo.Scatter(
358         x=list(anomalies.keys()),
359         y=anomalies_avgs,
360         mode=u"markers",
361         hoverinfo=u"none",
362         showlegend=False,
363         legendgroup=name,
364         name=f"{name}-anomalies",
365         marker={
366             u"size": 15,
367             u"symbol": u"circle-open",
368             u"color": anomalies_colors,
369             u"colorscale": colorscale,
370             u"showscale": True,
371             u"line": {
372                 u"width": 2
373             },
374             u"colorbar": {
375                 u"y": 0.5,
376                 u"len": 0.8,
377                 u"title": u"Circles Marking Data Classification",
378                 u"titleside": u"right",
379                 u"titlefont": {
380                     u"size": 14
381                 },
382                 u"tickmode": u"array",
383                 u"tickvals": [0.167, 0.500, 0.833],
384                 u"ticktext": ticktext,
385                 u"ticks": u"",
386                 u"ticklen": 0,
387                 u"tickangle": -90,
388                 u"thickness": 10
389             }
390         }
391     )
392     traces.append(trace_anomalies)
393
394     if anomaly_classification:
395         return traces, anomaly_classification[-1]
396
397     return traces, None
398
399
400 def _generate_all_charts(spec, input_data):
401     """Generate all charts specified in the specification file.
402
403     :param spec: Specification.
404     :param input_data: Full data set.
405     :type spec: Specification
406     :type input_data: InputData
407     """
408
409     def _generate_chart(graph):
410         """Generates the chart.
411
412         :param graph: The graph to be generated
413         :type graph: dict
414         :returns: Dictionary with the job name, csv table with results and
415             list of tests classification results.
416         :rtype: dict
417         """
418
419         logging.info(f"  Generating the chart {graph.get(u'title', u'')} ...")
420
421         job_name = list(graph[u"data"].keys())[0]
422
423         # Transform the data
424         logging.info(
425             f"    Creating the data set for the {graph.get(u'type', u'')} "
426             f"{graph.get(u'title', u'')}."
427         )
428
429         data = input_data.filter_tests_by_name(
430             graph,
431             params=[u"type", u"result", u"throughput", u"latency", u"tags"],
432             continue_on_error=True
433         )
434
435         if data is None or data.empty:
436             logging.error(u"No data.")
437             return dict()
438
439         return_lst = list()
440
441         for ttype in graph.get(u"test-type", (u"mrr", )):
442             for core in graph.get(u"core", tuple()):
443                 csv_tbl = list()
444                 csv_tbl_lat_1 = list()
445                 csv_tbl_lat_2 = list()
446                 res = dict()
447                 chart_data = dict()
448                 chart_tags = dict()
449                 for item in graph.get(u"include", tuple()):
450                     reg_ex = re.compile(str(item.format(core=core)).lower())
451                     for job, job_data in data.items():
452                         if job != job_name:
453                             continue
454                         for index, bld in job_data.items():
455                             for test_id, test in bld.items():
456                                 if not re.match(reg_ex, str(test_id).lower()):
457                                     continue
458                                 if chart_data.get(test_id, None) is None:
459                                     chart_data[test_id] = OrderedDict()
460                                 try:
461                                     lat_1 = u""
462                                     lat_2 = u""
463                                     if ttype == u"mrr":
464                                         rate = test[u"result"][u"receive-rate"]
465                                         stdev = \
466                                             test[u"result"][u"receive-stdev"]
467                                     elif ttype == u"ndr":
468                                         rate = \
469                                             test["throughput"][u"NDR"][u"LOWER"]
470                                         stdev = float(u"nan")
471                                     elif ttype == u"pdr":
472                                         rate = \
473                                             test["throughput"][u"PDR"][u"LOWER"]
474                                         stdev = float(u"nan")
475                                         lat_1 = test[u"latency"][u"PDR50"]\
476                                             [u"direction1"][u"avg"]
477                                         lat_2 = test[u"latency"][u"PDR50"]\
478                                             [u"direction2"][u"avg"]
479                                     else:
480                                         continue
481                                     chart_data[test_id][int(index)] = {
482                                         u"receive-rate": rate,
483                                         u"receive-stdev": stdev
484                                     }
485                                     if ttype == u"pdr":
486                                         chart_data[test_id][int(index)].update(
487                                             {
488                                                 u"lat_1": lat_1,
489                                                 u"lat_2": lat_2
490                                             }
491                                         )
492                                     chart_tags[test_id] = \
493                                         test.get(u"tags", None)
494                                 except (KeyError, TypeError):
495                                     pass
496
497                 # Add items to the csv table:
498                 for tst_name, tst_data in chart_data.items():
499                     tst_lst = list()
500                     tst_lst_lat_1 = list()
501                     tst_lst_lat_2 = list()
502                     for bld in builds_dict[job_name]:
503                         itm = tst_data.get(int(bld), dict())
504                         # CSIT-1180: Itm will be list, compute stats.
505                         try:
506                             tst_lst.append(str(itm.get(u"receive-rate", u"")))
507                             if ttype == u"pdr":
508                                 tst_lst_lat_1.append(
509                                     str(itm.get(u"lat_1", u""))
510                                 )
511                                 tst_lst_lat_2.append(
512                                     str(itm.get(u"lat_2", u""))
513                                 )
514                         except AttributeError:
515                             tst_lst.append(u"")
516                             if ttype == u"pdr":
517                                 tst_lst_lat_1.append(u"")
518                                 tst_lst_lat_2.append(u"")
519                     csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
520                     csv_tbl_lat_1.append(
521                         f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n"
522                     )
523                     csv_tbl_lat_2.append(
524                         f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n"
525                     )
526
527                 # Generate traces:
528                 traces = list()
529                 traces_lat = list()
530                 index = 0
531                 groups = graph.get(u"groups", None)
532                 visibility = list()
533
534                 if groups:
535                     for group in groups:
536                         visible = list()
537                         for tag in group:
538                             for tst_name, test_data in chart_data.items():
539                                 if not test_data:
540                                     logging.warning(
541                                         f"No data for the test {tst_name}"
542                                     )
543                                     continue
544                                 if tag not in chart_tags[tst_name]:
545                                     continue
546                                 try:
547                                     trace, rslt = _generate_trending_traces(
548                                         test_data,
549                                         job_name=job_name,
550                                         build_info=build_info,
551                                         name=u'-'.join(tst_name.split(u'.')[-1].
552                                                        split(u'-')[2:-1]),
553                                         color=COLORS[index],
554                                         incl_tests=ttype
555                                     )
556                                 except IndexError:
557                                     logging.error(f"Out of colors: index: "
558                                                   f"{index}, test: {tst_name}")
559                                     index += 1
560                                     continue
561                                 traces.extend(trace)
562                                 visible.extend(
563                                     [True for _ in range(len(trace))]
564                                 )
565                                 res[tst_name] = rslt
566                                 index += 1
567                                 break
568                         visibility.append(visible)
569                 else:
570                     for tst_name, test_data in chart_data.items():
571                         if not test_data:
572                             logging.warning(f"No data for the test {tst_name}")
573                             continue
574                         try:
575                             trace, rslt = _generate_trending_traces(
576                                 test_data,
577                                 job_name=job_name,
578                                 build_info=build_info,
579                                 name=u'-'.join(
580                                     tst_name.split(u'.')[-1].split(u'-')[2:-1]),
581                                 color=COLORS[index],
582                                 incl_tests=ttype
583                             )
584                             if ttype == u"pdr":
585                                 trace_lat, _ = _generate_trending_traces(
586                                     test_data,
587                                     job_name=job_name,
588                                     build_info=build_info,
589                                     name=u'-'.join(
590                                         tst_name.split(u'.')[-1].split(
591                                             u'-')[2:-1]),
592                                     color=COLORS[index],
593                                     incl_tests=u"pdr-lat"
594                                 )
595                                 traces_lat.extend(trace_lat)
596                         except IndexError:
597                             logging.error(
598                                 f"Out of colors: index: "
599                                 f"{index}, test: {tst_name}"
600                             )
601                             index += 1
602                             continue
603                         traces.extend(trace)
604                         res[tst_name] = rslt
605                         index += 1
606
607                 if traces:
608                     # Generate the chart:
609                     try:
610                         layout = deepcopy(graph[u"layout"])
611                     except KeyError as err:
612                         logging.error(u"Finished with error: No layout defined")
613                         logging.error(repr(err))
614                         return dict()
615                     if groups:
616                         show = list()
617                         for i in range(len(visibility)):
618                             visible = list()
619                             for vis_idx, _ in enumerate(visibility):
620                                 for _ in range(len(visibility[vis_idx])):
621                                     visible.append(i == vis_idx)
622                             show.append(visible)
623
624                         buttons = list()
625                         buttons.append(dict(
626                             label=u"All",
627                             method=u"update",
628                             args=[{u"visible":
629                                        [True for _ in range(len(show[0]))]}, ]
630                         ))
631                         for i in range(len(groups)):
632                             try:
633                                 label = graph[u"group-names"][i]
634                             except (IndexError, KeyError):
635                                 label = f"Group {i + 1}"
636                             buttons.append(dict(
637                                 label=label,
638                                 method=u"update",
639                                 args=[{u"visible": show[i]}, ]
640                             ))
641
642                         layout[u"updatemenus"] = list([
643                             dict(
644                                 active=0,
645                                 type=u"dropdown",
646                                 direction=u"down",
647                                 xanchor=u"left",
648                                 yanchor=u"bottom",
649                                 x=-0.12,
650                                 y=1.0,
651                                 buttons=buttons
652                             )
653                         ])
654
655                     name_file = (
656                         f"{spec.cpta[u'output-file']}/"
657                         f"{graph[u'output-file-name']}.html"
658                     )
659                     name_file = name_file.format(core=core, test_type=ttype)
660
661                     logging.info(f"    Writing the file {name_file}")
662                     plpl = plgo.Figure(data=traces, layout=layout)
663                     try:
664                         ploff.plot(
665                             plpl,
666                             show_link=False,
667                             auto_open=False,
668                             filename=name_file
669                         )
670                     except plerr.PlotlyEmptyDataError:
671                         logging.warning(u"No data for the plot. Skipped.")
672
673                 if traces_lat:
674                     try:
675                         layout = deepcopy(graph[u"layout"])
676                         layout[u"yaxis"][u"title"] = u"Latency [s]"
677                         layout[u"yaxis"][u"tickformat"] = u".3s"
678                     except KeyError as err:
679                         logging.error(u"Finished with error: No layout defined")
680                         logging.error(repr(err))
681                         return dict()
682                     name_file = (
683                         f"{spec.cpta[u'output-file']}/"
684                         f"{graph[u'output-file-name']}-lat.html"
685                     )
686                     name_file = name_file.format(core=core, test_type=ttype)
687
688                     logging.info(f"    Writing the file {name_file}")
689                     plpl = plgo.Figure(data=traces_lat, layout=layout)
690                     try:
691                         ploff.plot(
692                             plpl,
693                             show_link=False,
694                             auto_open=False,
695                             filename=name_file
696                         )
697                     except plerr.PlotlyEmptyDataError:
698                         logging.warning(u"No data for the plot. Skipped.")
699
700                 return_lst.append(
701                     {
702                         u"job_name": job_name,
703                         u"csv_table": csv_tbl,
704                         u"csv_lat_1": csv_tbl_lat_1,
705                         u"csv_lat_2": csv_tbl_lat_2,
706                         u"results": res
707                     }
708                 )
709
710         return return_lst
711
712     builds_dict = dict()
713     for job, builds in spec.input.items():
714         if builds_dict.get(job, None) is None:
715             builds_dict[job] = list()
716         for build in builds:
717             if build[u"status"] not in (u"failed", u"not found", u"removed",
718                                         None):
719                 builds_dict[job].append(str(build[u"build"]))
720
721     # Create "build ID": "date" dict:
722     build_info = dict()
723     tb_tbl = spec.environment.get(u"testbeds", None)
724     for job_name, job_data in builds_dict.items():
725         if build_info.get(job_name, None) is None:
726             build_info[job_name] = OrderedDict()
727         for build in job_data:
728             testbed = u""
729             tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
730             if tb_ip and tb_tbl:
731                 testbed = tb_tbl.get(tb_ip, u"")
732             build_info[job_name][build] = (
733                 input_data.metadata(job_name, build).get(u"generated", u""),
734                 input_data.metadata(job_name, build).get(u"version", u""),
735                 testbed
736             )
737
738     anomaly_classifications = dict()
739
740     # Create the table header:
741     csv_tables = dict()
742     csv_tables_l1 = dict()
743     csv_tables_l2 = dict()
744     for job_name in builds_dict:
745         if csv_tables.get(job_name, None) is None:
746             csv_tables[job_name] = list()
747         if csv_tables_l1.get(job_name, None) is None:
748             csv_tables_l1[job_name] = list()
749         if csv_tables_l2.get(job_name, None) is None:
750             csv_tables_l2[job_name] = list()
751         header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
752         csv_tables[job_name].append(header)
753         csv_tables_l1[job_name].append(header)
754         csv_tables_l2[job_name].append(header)
755         build_dates = [x[0] for x in build_info[job_name].values()]
756         header = f"Build Date:,{u','.join(build_dates)}\n"
757         csv_tables[job_name].append(header)
758         csv_tables_l1[job_name].append(header)
759         csv_tables_l2[job_name].append(header)
760         versions = [x[1] for x in build_info[job_name].values()]
761         header = f"Version:,{u','.join(versions)}\n"
762         csv_tables[job_name].append(header)
763         csv_tables_l1[job_name].append(header)
764         csv_tables_l2[job_name].append(header)
765         testbed = [x[2] for x in build_info[job_name].values()]
766         header = f"Test bed:,{u','.join(testbed)}\n"
767         csv_tables[job_name].append(header)
768         csv_tables_l1[job_name].append(header)
769         csv_tables_l2[job_name].append(header)
770
771     for chart in spec.cpta[u"plots"]:
772         results = _generate_chart(chart)
773         if not results:
774             continue
775
776         for result in results:
777             csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
778             csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"])
779             csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"])
780
781             if anomaly_classifications.get(result[u"job_name"], None) is None:
782                 anomaly_classifications[result[u"job_name"]] = dict()
783             anomaly_classifications[result[u"job_name"]].\
784                 update(result[u"results"])
785
786     # Write the tables:
787     for job_name, csv_table in csv_tables.items():
788         file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
789         with open(f"{file_name}.csv", u"wt") as file_handler:
790             file_handler.writelines(csv_table)
791
792         txt_table = None
793         with open(f"{file_name}.csv", u"rt") as csv_file:
794             csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
795             line_nr = 0
796             for row in csv_content:
797                 if txt_table is None:
798                     txt_table = prettytable.PrettyTable(row)
799                 else:
800                     if line_nr > 1:
801                         for idx, item in enumerate(row):
802                             try:
803                                 row[idx] = str(round(float(item) / 1000000, 2))
804                             except ValueError:
805                                 pass
806                     try:
807                         txt_table.add_row(row)
808                     # PrettyTable raises Exception
809                     except Exception as err:
810                         logging.warning(
811                             f"Error occurred while generating TXT table:\n{err}"
812                         )
813                 line_nr += 1
814             txt_table.align[u"Build Number:"] = u"l"
815         with open(f"{file_name}.txt", u"wt") as txt_file:
816             txt_file.write(str(txt_table))
817
818     for job_name, csv_table in csv_tables_l1.items():
819         file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1"
820         with open(f"{file_name}.csv", u"wt") as file_handler:
821             file_handler.writelines(csv_table)
822     for job_name, csv_table in csv_tables_l2.items():
823         file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2"
824         with open(f"{file_name}.csv", u"wt") as file_handler:
825             file_handler.writelines(csv_table)
826
827     # Evaluate result:
828     if anomaly_classifications:
829         result = u"PASS"
830         for job_name, job_data in anomaly_classifications.items():
831             file_name = \
832                 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
833             with open(file_name, u'w') as txt_file:
834                 for test_name, classification in job_data.items():
835                     if classification == u"regression":
836                         txt_file.write(test_name + u'\n')
837                     if classification in (u"regression", u"outlier"):
838                         result = u"FAIL"
839             file_name = \
840                 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
841             with open(file_name, u'w') as txt_file:
842                 for test_name, classification in job_data.items():
843                     if classification == u"progression":
844                         txt_file.write(test_name + u'\n')
845     else:
846         result = u"FAIL"
847
848     logging.info(f"Partial results: {anomaly_classifications}")
849     logging.info(f"Result: {result}")
850
851     return result