Trending: Add TRex ndrpdr tests
[csit.git] / resources / tools / presentation / generator_cpta.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Generation of Continuous Performance Trending and Analysis.
15 """
16
17 import re
18 import logging
19 import csv
20
21 from collections import OrderedDict
22 from datetime import datetime
23 from copy import deepcopy
24
25 import prettytable
26 import plotly.offline as ploff
27 import plotly.graph_objs as plgo
28 import plotly.exceptions as plerr
29
30 from pal_utils import archive_input_data, execute_command, classify_anomalies
31
32
33 # Command to build the html format of the report
34 HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \
35                u'-b html -E ' \
36                u'-t html ' \
37                u'-D version="{date}" ' \
38                u'{working_dir} ' \
39                u'{build_dir}/'
40
41 # .css file for the html format of the report
42 THEME_OVERRIDES = u"""/* override table width restrictions */
43 .wy-nav-content {
44     max-width: 1200px !important;
45 }
46 .rst-content blockquote {
47     margin-left: 0px;
48     line-height: 18px;
49     margin-bottom: 0px;
50 }
51 .wy-menu-vertical a {
52     display: inline-block;
53     line-height: 18px;
54     padding: 0 2em;
55     display: block;
56     position: relative;
57     font-size: 90%;
58     color: #d9d9d9
59 }
60 .wy-menu-vertical li.current a {
61     color: gray;
62     border-right: solid 1px #c9c9c9;
63     padding: 0 3em;
64 }
65 .wy-menu-vertical li.toctree-l2.current > a {
66     background: #c9c9c9;
67     padding: 0 3em;
68 }
69 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
70     display: block;
71     background: #c9c9c9;
72     padding: 0 4em;
73 }
74 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
75     display: block;
76     background: #bdbdbd;
77     padding: 0 5em;
78 }
79 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
80     color: #404040;
81     padding: 0 2em;
82     font-weight: bold;
83     position: relative;
84     background: #fcfcfc;
85     border: none;
86         border-top-width: medium;
87         border-bottom-width: medium;
88         border-top-style: none;
89         border-bottom-style: none;
90         border-top-color: currentcolor;
91         border-bottom-color: currentcolor;
92     padding-left: 2em -4px;
93 }
94 """
95
96 COLORS = (
97     u"#1A1110",
98     u"#DA2647",
99     u"#214FC6",
100     u"#01786F",
101     u"#BD8260",
102     u"#FFD12A",
103     u"#A6E7FF",
104     u"#738276",
105     u"#C95A49",
106     u"#FC5A8D",
107     u"#CEC8EF",
108     u"#391285",
109     u"#6F2DA8",
110     u"#FF878D",
111     u"#45A27D",
112     u"#FFD0B9",
113     u"#FD5240",
114     u"#DB91EF",
115     u"#44D7A8",
116     u"#4F86F7",
117     u"#84DE02",
118     u"#FFCFF1",
119     u"#614051"
120 )
121
122
123 def generate_cpta(spec, data):
124     """Generate all formats and versions of the Continuous Performance Trending
125     and Analysis.
126
127     :param spec: Specification read from the specification file.
128     :param data: Full data set.
129     :type spec: Specification
130     :type data: InputData
131     """
132
133     logging.info(u"Generating the Continuous Performance Trending and Analysis "
134                  u"...")
135
136     ret_code = _generate_all_charts(spec, data)
137
138     cmd = HTML_BUILDER.format(
139         date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
140         working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
141         build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
142     execute_command(cmd)
143
144     with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
145             css_file:
146         css_file.write(THEME_OVERRIDES)
147
148     with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
149             css_file:
150         css_file.write(THEME_OVERRIDES)
151
152     if spec.environment.get(u"archive-inputs", False):
153         archive_input_data(spec)
154
155     logging.info(u"Done.")
156
157     return ret_code
158
159
160 def _generate_trending_traces(in_data, job_name, build_info,
161                               name=u"", color=u"", incl_tests=u"mrr"):
162     """Generate the trending traces:
163      - samples,
164      - outliers, regress, progress
165      - average of normal samples (trending line)
166
167     :param in_data: Full data set.
168     :param job_name: The name of job which generated the data.
169     :param build_info: Information about the builds.
170     :param name: Name of the plot
171     :param color: Name of the color for the plot.
172     :param incl_tests: Included tests, accepted values: mrr, ndr, pdr
173     :type in_data: OrderedDict
174     :type job_name: str
175     :type build_info: dict
176     :type name: str
177     :type color: str
178     :type incl_tests: str
179     :returns: Generated traces (list) and the evaluated result.
180     :rtype: tuple(traces, result)
181     """
182
183     if incl_tests not in (u"mrr", u"ndr", u"pdr", u"pdr-lat"):
184         return list(), None
185
186     data_x = list(in_data.keys())
187     data_y_pps = list()
188     data_y_mpps = list()
189     data_y_stdev = list()
190     if incl_tests == u"pdr-lat":
191         for item in in_data.values():
192             data_y_pps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
193             data_y_stdev.append(float(u"nan"))
194             data_y_mpps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
195         multi = 1.0
196     else:
197         for item in in_data.values():
198             data_y_pps.append(float(item[u"receive-rate"]))
199             data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
200             data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
201         multi = 1e6
202     hover_text = list()
203     xaxis = list()
204     for index, key in enumerate(data_x):
205         str_key = str(key)
206         date = build_info[job_name][str_key][0]
207         hover_str = (u"date: {date}<br>"
208                      u"{property} [Mpps]: <val><br>"
209                      u"<stdev>"
210                      u"{sut}-ref: {build}<br>"
211                      u"csit-ref: {test}-{period}-build-{build_nr}<br>"
212                      u"testbed: {testbed}")
213         if incl_tests == u"mrr":
214             hover_str = hover_str.replace(
215                 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
216             )
217         else:
218             hover_str = hover_str.replace(u"<stdev>", u"")
219         if incl_tests == u"pdr-lat":
220             hover_str = hover_str.replace(u"<val>", u"{value:.1e}")
221         else:
222             hover_str = hover_str.replace(u"<val>", u"{value:.3f}")
223         if u"-cps" in name:
224             hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]").\
225                 replace(u"throughput", u"connection rate")
226         if u"dpdk" in job_name:
227             hover_str = hover_str.format(
228                 date=date,
229                 property=u"average" if incl_tests == u"mrr" else u"throughput",
230                 value=data_y_mpps[index],
231                 sut=u"dpdk",
232                 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
233                 test=incl_tests,
234                 period=u"weekly",
235                 build_nr=str_key,
236                 testbed=build_info[job_name][str_key][2])
237         elif u"vpp" in job_name:
238             hover_str = hover_str.format(
239                 date=date,
240                 property=u"average" if incl_tests == u"mrr" else u"throughput",
241                 value=data_y_mpps[index],
242                 sut=u"vpp",
243                 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
244                 test=incl_tests,
245                 period=u"daily" if incl_tests == u"mrr" else u"weekly",
246                 build_nr=str_key,
247                 testbed=build_info[job_name][str_key][2])
248         if incl_tests == u"pdr-lat":
249             hover_str = hover_str.replace(
250                 u"throughput [Mpps]", u"latency [s]"
251             )
252         hover_text.append(hover_str)
253         xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
254                               int(date[9:11]), int(date[12:])))
255
256     data_pd = OrderedDict()
257     for key, value in zip(xaxis, data_y_pps):
258         data_pd[key] = value
259
260     try:
261         anomaly_classification, avgs_pps, stdevs_pps = \
262             classify_anomalies(data_pd)
263     except ValueError as err:
264         logging.info(f"{err} Skipping")
265         return list(), None
266     avgs_mpps = [avg_pps / multi for avg_pps in avgs_pps]
267     stdevs_mpps = [stdev_pps / multi for stdev_pps in stdevs_pps]
268
269     anomalies = OrderedDict()
270     anomalies_colors = list()
271     anomalies_avgs = list()
272     anomaly_color = {
273         u"regression": 0.0,
274         u"normal": 0.5,
275         u"progression": 1.0
276     }
277     if anomaly_classification:
278         for index, (key, value) in enumerate(data_pd.items()):
279             if anomaly_classification[index] in (u"regression", u"progression"):
280                 anomalies[key] = value / multi
281                 anomalies_colors.append(
282                     anomaly_color[anomaly_classification[index]])
283                 anomalies_avgs.append(avgs_mpps[index])
284         anomalies_colors.extend([0.0, 0.5, 1.0])
285
286     # Create traces
287
288     trace_samples = plgo.Scatter(
289         x=xaxis,
290         y=data_y_mpps,
291         mode=u"markers",
292         line={
293             u"width": 1
294         },
295         showlegend=True,
296         legendgroup=name,
297         name=f"{name}",
298         marker={
299             u"size": 5,
300             u"color": color,
301             u"symbol": u"circle",
302         },
303         text=hover_text,
304         hoverinfo=u"text+name"
305     )
306     traces = [trace_samples, ]
307
308     trend_hover_text = list()
309     for idx in range(len(data_x)):
310         if incl_tests == u"pdr-lat":
311             trend_hover_str = (
312                 f"trend [s]: {avgs_mpps[idx]:.1e}<br>"
313             )
314         else:
315             trend_hover_str = (
316                 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
317                 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
318             )
319         trend_hover_text.append(trend_hover_str)
320
321     trace_trend = plgo.Scatter(
322         x=xaxis,
323         y=avgs_mpps,
324         mode=u"lines",
325         line={
326             u"shape": u"linear",
327             u"width": 1,
328             u"color": color,
329         },
330         showlegend=False,
331         legendgroup=name,
332         name=f"{name}",
333         text=trend_hover_text,
334         hoverinfo=u"text+name"
335     )
336     traces.append(trace_trend)
337
338     if incl_tests == u"pdr-lat":
339         colorscale = [
340             [0.00, u"green"],
341             [0.33, u"green"],
342             [0.33, u"white"],
343             [0.66, u"white"],
344             [0.66, u"red"],
345             [1.00, u"red"]
346         ]
347         ticktext = [u"Progression", u"Normal", u"Regression"]
348     else:
349         colorscale = [
350             [0.00, u"red"],
351             [0.33, u"red"],
352             [0.33, u"white"],
353             [0.66, u"white"],
354             [0.66, u"green"],
355             [1.00, u"green"]
356         ]
357         ticktext = [u"Regression", u"Normal", u"Progression"]
358     trace_anomalies = plgo.Scatter(
359         x=list(anomalies.keys()),
360         y=anomalies_avgs,
361         mode=u"markers",
362         hoverinfo=u"none",
363         showlegend=False,
364         legendgroup=name,
365         name=f"{name}-anomalies",
366         marker={
367             u"size": 15,
368             u"symbol": u"circle-open",
369             u"color": anomalies_colors,
370             u"colorscale": colorscale,
371             u"showscale": True,
372             u"line": {
373                 u"width": 2
374             },
375             u"colorbar": {
376                 u"y": 0.5,
377                 u"len": 0.8,
378                 u"title": u"Circles Marking Data Classification",
379                 u"titleside": u"right",
380                 u"titlefont": {
381                     u"size": 14
382                 },
383                 u"tickmode": u"array",
384                 u"tickvals": [0.167, 0.500, 0.833],
385                 u"ticktext": ticktext,
386                 u"ticks": u"",
387                 u"ticklen": 0,
388                 u"tickangle": -90,
389                 u"thickness": 10
390             }
391         }
392     )
393     traces.append(trace_anomalies)
394
395     if anomaly_classification:
396         return traces, anomaly_classification[-1]
397
398     return traces, None
399
400
401 def _generate_all_charts(spec, input_data):
402     """Generate all charts specified in the specification file.
403
404     :param spec: Specification.
405     :param input_data: Full data set.
406     :type spec: Specification
407     :type input_data: InputData
408     """
409
410     def _generate_chart(graph):
411         """Generates the chart.
412
413         :param graph: The graph to be generated
414         :type graph: dict
415         :returns: Dictionary with the job name, csv table with results and
416             list of tests classification results.
417         :rtype: dict
418         """
419
420         logging.info(f"  Generating the chart {graph.get(u'title', u'')} ...")
421
422         job_name = list(graph[u"data"].keys())[0]
423
424         # Transform the data
425         logging.info(
426             f"    Creating the data set for the {graph.get(u'type', u'')} "
427             f"{graph.get(u'title', u'')}."
428         )
429
430         data = input_data.filter_tests_by_name(
431             graph,
432             params=[u"type", u"result", u"throughput", u"latency", u"tags"],
433             continue_on_error=True
434         )
435
436         if data is None or data.empty:
437             logging.error(u"No data.")
438             return dict()
439
440         return_lst = list()
441
442         for ttype in graph.get(u"test-type", (u"mrr", )):
443             for core in graph.get(u"core", tuple()):
444                 csv_tbl = list()
445                 csv_tbl_lat_1 = list()
446                 csv_tbl_lat_2 = list()
447                 res = dict()
448                 chart_data = dict()
449                 chart_tags = dict()
450                 for item in graph.get(u"include", tuple()):
451                     reg_ex = re.compile(str(item.format(core=core)).lower())
452                     for job, job_data in data.items():
453                         if job != job_name:
454                             continue
455                         for index, bld in job_data.items():
456                             for test_id, test in bld.items():
457                                 if not re.match(reg_ex, str(test_id).lower()):
458                                     continue
459                                 if chart_data.get(test_id, None) is None:
460                                     chart_data[test_id] = OrderedDict()
461                                 try:
462                                     lat_1 = u""
463                                     lat_2 = u""
464                                     if ttype == u"mrr":
465                                         rate = test[u"result"][u"receive-rate"]
466                                         stdev = \
467                                             test[u"result"][u"receive-stdev"]
468                                     elif ttype == u"ndr":
469                                         rate = \
470                                             test["throughput"][u"NDR"][u"LOWER"]
471                                         stdev = float(u"nan")
472                                     elif ttype == u"pdr":
473                                         rate = \
474                                             test["throughput"][u"PDR"][u"LOWER"]
475                                         stdev = float(u"nan")
476                                         lat_1 = test[u"latency"][u"PDR50"]\
477                                             [u"direction1"][u"avg"]
478                                         lat_2 = test[u"latency"][u"PDR50"]\
479                                             [u"direction2"][u"avg"]
480                                     else:
481                                         continue
482                                     chart_data[test_id][int(index)] = {
483                                         u"receive-rate": rate,
484                                         u"receive-stdev": stdev
485                                     }
486                                     if ttype == u"pdr":
487                                         chart_data[test_id][int(index)].update(
488                                             {
489                                                 u"lat_1": lat_1,
490                                                 u"lat_2": lat_2
491                                             }
492                                         )
493                                     chart_tags[test_id] = \
494                                         test.get(u"tags", None)
495                                 except (KeyError, TypeError):
496                                     pass
497
498                 # Add items to the csv table:
499                 for tst_name, tst_data in chart_data.items():
500                     tst_lst = list()
501                     tst_lst_lat_1 = list()
502                     tst_lst_lat_2 = list()
503                     for bld in builds_dict[job_name]:
504                         itm = tst_data.get(int(bld), dict())
505                         # CSIT-1180: Itm will be list, compute stats.
506                         try:
507                             tst_lst.append(str(itm.get(u"receive-rate", u"")))
508                             if ttype == u"pdr":
509                                 tst_lst_lat_1.append(
510                                     str(itm.get(u"lat_1", u""))
511                                 )
512                                 tst_lst_lat_2.append(
513                                     str(itm.get(u"lat_2", u""))
514                                 )
515                         except AttributeError:
516                             tst_lst.append(u"")
517                             if ttype == u"pdr":
518                                 tst_lst_lat_1.append(u"")
519                                 tst_lst_lat_2.append(u"")
520                     csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
521                     csv_tbl_lat_1.append(
522                         f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n"
523                     )
524                     csv_tbl_lat_2.append(
525                         f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n"
526                     )
527
528                 # Generate traces:
529                 traces = list()
530                 traces_lat = list()
531                 index = 0
532                 groups = graph.get(u"groups", None)
533                 visibility = list()
534
535                 if groups:
536                     for group in groups:
537                         visible = list()
538                         for tag in group:
539                             for tst_name, test_data in chart_data.items():
540                                 if not test_data:
541                                     logging.warning(
542                                         f"No data for the test {tst_name}"
543                                     )
544                                     continue
545                                 if tag not in chart_tags[tst_name]:
546                                     continue
547                                 try:
548                                     trace, rslt = _generate_trending_traces(
549                                         test_data,
550                                         job_name=job_name,
551                                         build_info=build_info,
552                                         name=u'-'.join(tst_name.split(u'.')[-1].
553                                                        split(u'-')[2:-1]),
554                                         color=COLORS[index],
555                                         incl_tests=ttype
556                                     )
557                                 except IndexError:
558                                     logging.error(f"Out of colors: index: "
559                                                   f"{index}, test: {tst_name}")
560                                     index += 1
561                                     continue
562                                 traces.extend(trace)
563                                 visible.extend(
564                                     [True for _ in range(len(trace))]
565                                 )
566                                 res[tst_name] = rslt
567                                 index += 1
568                                 break
569                         visibility.append(visible)
570                 else:
571                     for tst_name, test_data in chart_data.items():
572                         if not test_data:
573                             logging.warning(f"No data for the test {tst_name}")
574                             continue
575                         try:
576                             trace, rslt = _generate_trending_traces(
577                                 test_data,
578                                 job_name=job_name,
579                                 build_info=build_info,
580                                 name=u'-'.join(
581                                     tst_name.split(u'.')[-1].split(u'-')[2:-1]),
582                                 color=COLORS[index],
583                                 incl_tests=ttype
584                             )
585                             if ttype == u"pdr":
586                                 trace_lat, _ = _generate_trending_traces(
587                                     test_data,
588                                     job_name=job_name,
589                                     build_info=build_info,
590                                     name=u'-'.join(
591                                         tst_name.split(u'.')[-1].split(
592                                             u'-')[2:-1]),
593                                     color=COLORS[index],
594                                     incl_tests=u"pdr-lat"
595                                 )
596                                 traces_lat.extend(trace_lat)
597                         except IndexError:
598                             logging.error(
599                                 f"Out of colors: index: "
600                                 f"{index}, test: {tst_name}"
601                             )
602                             index += 1
603                             continue
604                         traces.extend(trace)
605                         res[tst_name] = rslt
606                         index += 1
607
608                 if traces:
609                     # Generate the chart:
610                     try:
611                         layout = deepcopy(graph[u"layout"])
612                     except KeyError as err:
613                         logging.error(u"Finished with error: No layout defined")
614                         logging.error(repr(err))
615                         return dict()
616                     if groups:
617                         show = list()
618                         for i in range(len(visibility)):
619                             visible = list()
620                             for vis_idx, _ in enumerate(visibility):
621                                 for _ in range(len(visibility[vis_idx])):
622                                     visible.append(i == vis_idx)
623                             show.append(visible)
624
625                         buttons = list()
626                         buttons.append(dict(
627                             label=u"All",
628                             method=u"update",
629                             args=[{u"visible":
630                                        [True for _ in range(len(show[0]))]}, ]
631                         ))
632                         for i in range(len(groups)):
633                             try:
634                                 label = graph[u"group-names"][i]
635                             except (IndexError, KeyError):
636                                 label = f"Group {i + 1}"
637                             buttons.append(dict(
638                                 label=label,
639                                 method=u"update",
640                                 args=[{u"visible": show[i]}, ]
641                             ))
642
643                         layout[u"updatemenus"] = list([
644                             dict(
645                                 active=0,
646                                 type=u"dropdown",
647                                 direction=u"down",
648                                 xanchor=u"left",
649                                 yanchor=u"bottom",
650                                 x=-0.12,
651                                 y=1.0,
652                                 buttons=buttons
653                             )
654                         ])
655
656                     name_file = (
657                         f"{spec.cpta[u'output-file']}/"
658                         f"{graph[u'output-file-name']}.html"
659                     )
660                     name_file = name_file.format(core=core, test_type=ttype)
661
662                     logging.info(f"    Writing the file {name_file}")
663                     plpl = plgo.Figure(data=traces, layout=layout)
664                     try:
665                         ploff.plot(
666                             plpl,
667                             show_link=False,
668                             auto_open=False,
669                             filename=name_file
670                         )
671                     except plerr.PlotlyEmptyDataError:
672                         logging.warning(u"No data for the plot. Skipped.")
673
674                 if traces_lat:
675                     try:
676                         layout = deepcopy(graph[u"layout"])
677                         layout[u"yaxis"][u"title"] = u"Latency [s]"
678                         layout[u"yaxis"][u"tickformat"] = u".3s"
679                     except KeyError as err:
680                         logging.error(u"Finished with error: No layout defined")
681                         logging.error(repr(err))
682                         return dict()
683                     name_file = (
684                         f"{spec.cpta[u'output-file']}/"
685                         f"{graph[u'output-file-name']}-lat.html"
686                     )
687                     name_file = name_file.format(core=core, test_type=ttype)
688
689                     logging.info(f"    Writing the file {name_file}")
690                     plpl = plgo.Figure(data=traces_lat, layout=layout)
691                     try:
692                         ploff.plot(
693                             plpl,
694                             show_link=False,
695                             auto_open=False,
696                             filename=name_file
697                         )
698                     except plerr.PlotlyEmptyDataError:
699                         logging.warning(u"No data for the plot. Skipped.")
700
701                 return_lst.append(
702                     {
703                         u"job_name": job_name,
704                         u"csv_table": csv_tbl,
705                         u"csv_lat_1": csv_tbl_lat_1,
706                         u"csv_lat_2": csv_tbl_lat_2,
707                         u"results": res
708                     }
709                 )
710
711         return return_lst
712
713     builds_dict = dict()
714     for job, builds in spec.input.items():
715         if builds_dict.get(job, None) is None:
716             builds_dict[job] = list()
717         for build in builds:
718             if build[u"status"] not in (u"failed", u"not found", u"removed",
719                                         None):
720                 builds_dict[job].append(str(build[u"build"]))
721
722     # Create "build ID": "date" dict:
723     build_info = dict()
724     tb_tbl = spec.environment.get(u"testbeds", None)
725     for job_name, job_data in builds_dict.items():
726         if build_info.get(job_name, None) is None:
727             build_info[job_name] = OrderedDict()
728         for build in job_data:
729             testbed = u""
730             tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
731             if tb_ip and tb_tbl:
732                 testbed = tb_tbl.get(tb_ip, u"")
733             build_info[job_name][build] = (
734                 input_data.metadata(job_name, build).get(u"generated", u""),
735                 input_data.metadata(job_name, build).get(u"version", u""),
736                 testbed
737             )
738
739     anomaly_classifications = dict()
740
741     # Create the table header:
742     csv_tables = dict()
743     csv_tables_l1 = dict()
744     csv_tables_l2 = dict()
745     for job_name in builds_dict:
746         if csv_tables.get(job_name, None) is None:
747             csv_tables[job_name] = list()
748         if csv_tables_l1.get(job_name, None) is None:
749             csv_tables_l1[job_name] = list()
750         if csv_tables_l2.get(job_name, None) is None:
751             csv_tables_l2[job_name] = list()
752         header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
753         csv_tables[job_name].append(header)
754         csv_tables_l1[job_name].append(header)
755         csv_tables_l2[job_name].append(header)
756         build_dates = [x[0] for x in build_info[job_name].values()]
757         header = f"Build Date:,{u','.join(build_dates)}\n"
758         csv_tables[job_name].append(header)
759         csv_tables_l1[job_name].append(header)
760         csv_tables_l2[job_name].append(header)
761         versions = [x[1] for x in build_info[job_name].values()]
762         header = f"Version:,{u','.join(versions)}\n"
763         csv_tables[job_name].append(header)
764         csv_tables_l1[job_name].append(header)
765         csv_tables_l2[job_name].append(header)
766         testbed = [x[2] for x in build_info[job_name].values()]
767         header = f"Test bed:,{u','.join(testbed)}\n"
768         csv_tables[job_name].append(header)
769         csv_tables_l1[job_name].append(header)
770         csv_tables_l2[job_name].append(header)
771
772     for chart in spec.cpta[u"plots"]:
773         results = _generate_chart(chart)
774         if not results:
775             continue
776
777         for result in results:
778             csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
779             csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"])
780             csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"])
781
782             if anomaly_classifications.get(result[u"job_name"], None) is None:
783                 anomaly_classifications[result[u"job_name"]] = dict()
784             anomaly_classifications[result[u"job_name"]].\
785                 update(result[u"results"])
786
787     # Write the tables:
788     for job_name, csv_table in csv_tables.items():
789         file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
790         with open(f"{file_name}.csv", u"wt") as file_handler:
791             file_handler.writelines(csv_table)
792
793         txt_table = None
794         with open(f"{file_name}.csv", u"rt") as csv_file:
795             csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
796             line_nr = 0
797             for row in csv_content:
798                 if txt_table is None:
799                     txt_table = prettytable.PrettyTable(row)
800                 else:
801                     if line_nr > 1:
802                         for idx, item in enumerate(row):
803                             try:
804                                 row[idx] = str(round(float(item) / 1000000, 2))
805                             except ValueError:
806                                 pass
807                     try:
808                         txt_table.add_row(row)
809                     # PrettyTable raises Exception
810                     except Exception as err:
811                         logging.warning(
812                             f"Error occurred while generating TXT table:\n{err}"
813                         )
814                 line_nr += 1
815             txt_table.align[u"Build Number:"] = u"l"
816         with open(f"{file_name}.txt", u"wt") as txt_file:
817             txt_file.write(str(txt_table))
818
819     for job_name, csv_table in csv_tables_l1.items():
820         file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1"
821         with open(f"{file_name}.csv", u"wt") as file_handler:
822             file_handler.writelines(csv_table)
823     for job_name, csv_table in csv_tables_l2.items():
824         file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2"
825         with open(f"{file_name}.csv", u"wt") as file_handler:
826             file_handler.writelines(csv_table)
827
828     # Evaluate result:
829     if anomaly_classifications:
830         result = u"PASS"
831         for job_name, job_data in anomaly_classifications.items():
832             file_name = \
833                 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
834             with open(file_name, u'w') as txt_file:
835                 for test_name, classification in job_data.items():
836                     if classification == u"regression":
837                         txt_file.write(test_name + u'\n')
838                     if classification in (u"regression", u"outlier"):
839                         result = u"FAIL"
840             file_name = \
841                 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
842             with open(file_name, u'w') as txt_file:
843                 for test_name, classification in job_data.items():
844                     if classification == u"progression":
845                         txt_file.write(test_name + u'\n')
846     else:
847         result = u"FAIL"
848
849     logging.info(f"Partial results: {anomaly_classifications}")
850     logging.info(f"Result: {result}")
851
852     return result