Report: Configure Report 2202
[csit.git] / resources / tools / presentation / generator_cpta.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Generation of Continuous Performance Trending and Analysis.
15 """
16 import re
17 import logging
18 import csv
19
20 from collections import OrderedDict
21 from datetime import datetime
22 from copy import deepcopy
23 from os import listdir
24
25 import prettytable
26 import plotly.offline as ploff
27 import plotly.graph_objs as plgo
28 import plotly.exceptions as plerr
29
30 from pal_utils import archive_input_data, execute_command, classify_anomalies
31
32
33 # Command to build the html format of the report
34 HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \
35                u'-b html -E ' \
36                u'-t html ' \
37                u'-D version="{date}" ' \
38                u'{working_dir} ' \
39                u'{build_dir}/'
40
41 # .css file for the html format of the report
42 THEME_OVERRIDES = u"""/* override table width restrictions */
43 .wy-nav-content {
44     max-width: 1200px !important;
45 }
46 .rst-content blockquote {
47     margin-left: 0px;
48     line-height: 18px;
49     margin-bottom: 0px;
50 }
51 .wy-menu-vertical a {
52     display: inline-block;
53     line-height: 18px;
54     padding: 0 2em;
55     display: block;
56     position: relative;
57     font-size: 90%;
58     color: #d9d9d9
59 }
60 .wy-menu-vertical li.current a {
61     color: gray;
62     border-right: solid 1px #c9c9c9;
63     padding: 0 3em;
64 }
65 .wy-menu-vertical li.toctree-l2.current > a {
66     background: #c9c9c9;
67     padding: 0 3em;
68 }
69 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
70     display: block;
71     background: #c9c9c9;
72     padding: 0 4em;
73 }
74 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
75     display: block;
76     background: #bdbdbd;
77     padding: 0 5em;
78 }
79 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
80     color: #404040;
81     padding: 0 2em;
82     font-weight: bold;
83     position: relative;
84     background: #fcfcfc;
85     border: none;
86         border-top-width: medium;
87         border-bottom-width: medium;
88         border-top-style: none;
89         border-bottom-style: none;
90         border-top-color: currentcolor;
91         border-bottom-color: currentcolor;
92     padding-left: 2em -4px;
93 }
94 """
95
96 COLORS = (
97     u"#1A1110",
98     u"#DA2647",
99     u"#214FC6",
100     u"#01786F",
101     u"#BD8260",
102     u"#FFD12A",
103     u"#A6E7FF",
104     u"#738276",
105     u"#C95A49",
106     u"#FC5A8D",
107     u"#CEC8EF",
108     u"#391285",
109     u"#6F2DA8",
110     u"#FF878D",
111     u"#45A27D",
112     u"#FFD0B9",
113     u"#FD5240",
114     u"#DB91EF",
115     u"#44D7A8",
116     u"#4F86F7",
117     u"#84DE02",
118     u"#FFCFF1",
119     u"#614051"
120 )
121
122
123 def generate_cpta(spec, data):
124     """Generate all formats and versions of the Continuous Performance Trending
125     and Analysis.
126
127     :param spec: Specification read from the specification file.
128     :param data: Full data set.
129     :type spec: Specification
130     :type data: InputData
131     """
132
133     logging.info(u"Generating the Continuous Performance Trending and Analysis "
134                  u"...")
135
136     ret_code = _generate_all_charts(spec, data)
137
138     cmd = HTML_BUILDER.format(
139         date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
140         working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
141         build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
142     execute_command(cmd)
143
144     with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
145             css_file:
146         css_file.write(THEME_OVERRIDES)
147
148     with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
149             css_file:
150         css_file.write(THEME_OVERRIDES)
151
152     if spec.environment.get(u"archive-inputs", False):
153         archive_input_data(spec)
154
155     logging.info(u"Done.")
156
157     return ret_code
158
159
160 def _generate_trending_traces(in_data, job_name, build_info,
161                               name=u"", color=u"", incl_tests=u"mrr"):
162     """Generate the trending traces:
163      - samples,
164      - outliers, regress, progress
165      - average of normal samples (trending line)
166
167     :param in_data: Full data set.
168     :param job_name: The name of job which generated the data.
169     :param build_info: Information about the builds.
170     :param name: Name of the plot
171     :param color: Name of the color for the plot.
172     :param incl_tests: Included tests, accepted values: mrr, ndr, pdr
173     :type in_data: OrderedDict
174     :type job_name: str
175     :type build_info: dict
176     :type name: str
177     :type color: str
178     :type incl_tests: str
179     :returns: Generated traces (list) and the evaluated result.
180     :rtype: tuple(traces, result)
181     """
182
183     if incl_tests not in (u"mrr", u"ndr", u"pdr", u"pdr-lat"):
184         return list(), None
185
186     data_x = list(in_data.keys())
187     data_y_pps = list()
188     data_y_mpps = list()
189     data_y_stdev = list()
190     if incl_tests == u"pdr-lat":
191         for item in in_data.values():
192             data_y_pps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
193             data_y_stdev.append(float(u"nan"))
194             data_y_mpps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
195         multi = 1.0
196     else:
197         for item in in_data.values():
198             data_y_pps.append(float(item[u"receive-rate"]))
199             data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
200             data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
201         multi = 1e6
202     hover_text = list()
203     xaxis = list()
204     for index, key in enumerate(data_x):
205         str_key = str(key)
206         date = build_info[job_name][str_key][0]
207         hover_str = (u"date: {date}<br>"
208                      u"{property} [Mpps]: <val><br>"
209                      u"<stdev>"
210                      u"{sut}-ref: {build}<br>"
211                      u"csit-ref: {test}-{period}-build-{build_nr}<br>"
212                      u"testbed: {testbed}")
213         if incl_tests == u"mrr":
214             hover_str = hover_str.replace(
215                 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
216             )
217         else:
218             hover_str = hover_str.replace(u"<stdev>", u"")
219         if incl_tests == u"pdr-lat":
220             hover_str = hover_str.replace(u"<val>", u"{value:.1e}")
221         else:
222             hover_str = hover_str.replace(u"<val>", u"{value:.3f}")
223         if u"-cps" in name:
224             hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]").\
225                 replace(u"throughput", u"connection rate")
226         if u"vpp" in job_name:
227             hover_str = hover_str.format(
228                 date=date,
229                 property=u"average" if incl_tests == u"mrr" else u"throughput",
230                 value=data_y_mpps[index],
231                 sut=u"vpp",
232                 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
233                 test=incl_tests,
234                 period=u"daily" if incl_tests == u"mrr" else u"weekly",
235                 build_nr=str_key,
236                 testbed=build_info[job_name][str_key][2])
237         elif u"dpdk" in job_name:
238             hover_str = hover_str.format(
239                 date=date,
240                 property=u"average" if incl_tests == u"mrr" else u"throughput",
241                 value=data_y_mpps[index],
242                 sut=u"dpdk",
243                 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
244                 test=incl_tests,
245                 period=u"weekly",
246                 build_nr=str_key,
247                 testbed=build_info[job_name][str_key][2])
248         elif u"trex" in job_name:
249             hover_str = hover_str.format(
250                 date=date,
251                 property=u"average" if incl_tests == u"mrr" else u"throughput",
252                 value=data_y_mpps[index],
253                 sut=u"trex",
254                 build=u"",
255                 test=incl_tests,
256                 period=u"daily" if incl_tests == u"mrr" else u"weekly",
257                 build_nr=str_key,
258                 testbed=build_info[job_name][str_key][2])
259         if incl_tests == u"pdr-lat":
260             hover_str = hover_str.replace(
261                 u"throughput [Mpps]", u"latency [s]"
262             )
263         hover_text.append(hover_str)
264         xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
265                               int(date[9:11]), int(date[12:])))
266
267     data_pd = OrderedDict()
268     for key, value in zip(xaxis, data_y_pps):
269         data_pd[key] = value
270
271     try:
272         anomaly_classification, avgs_pps, stdevs_pps = \
273             classify_anomalies(data_pd)
274     except ValueError as err:
275         logging.info(f"{err} Skipping")
276         return list(), None
277     avgs_mpps = [avg_pps / multi for avg_pps in avgs_pps]
278     stdevs_mpps = [stdev_pps / multi for stdev_pps in stdevs_pps]
279
280     anomalies = OrderedDict()
281     anomalies_colors = list()
282     anomalies_avgs = list()
283     anomaly_color = {
284         u"regression": 0.0,
285         u"normal": 0.5,
286         u"progression": 1.0
287     }
288     if anomaly_classification:
289         for index, (key, value) in enumerate(data_pd.items()):
290             if anomaly_classification[index] in (u"regression", u"progression"):
291                 anomalies[key] = value / multi
292                 anomalies_colors.append(
293                     anomaly_color[anomaly_classification[index]])
294                 anomalies_avgs.append(avgs_mpps[index])
295         anomalies_colors.extend([0.0, 0.5, 1.0])
296
297     # Create traces
298
299     trace_samples = plgo.Scatter(
300         x=xaxis,
301         y=data_y_mpps,
302         mode=u"markers",
303         line={
304             u"width": 1
305         },
306         showlegend=True,
307         legendgroup=name,
308         name=f"{name}",
309         marker={
310             u"size": 5,
311             u"color": color,
312             u"symbol": u"circle",
313         },
314         text=hover_text,
315         hoverinfo=u"text+name"
316     )
317     traces = [trace_samples, ]
318
319     trend_hover_text = list()
320     for idx in range(len(data_x)):
321         if incl_tests == u"pdr-lat":
322             trend_hover_str = (
323                 f"trend [s]: {avgs_mpps[idx]:.1e}<br>"
324             )
325         else:
326             trend_hover_str = (
327                 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
328                 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
329             )
330         trend_hover_text.append(trend_hover_str)
331
332     trace_trend = plgo.Scatter(
333         x=xaxis,
334         y=avgs_mpps,
335         mode=u"lines",
336         line={
337             u"shape": u"linear",
338             u"width": 1,
339             u"color": color,
340         },
341         showlegend=False,
342         legendgroup=name,
343         name=f"{name}",
344         text=trend_hover_text,
345         hoverinfo=u"text+name"
346     )
347     traces.append(trace_trend)
348
349     if incl_tests == u"pdr-lat":
350         colorscale = [
351             [0.00, u"green"],
352             [0.33, u"green"],
353             [0.33, u"white"],
354             [0.66, u"white"],
355             [0.66, u"red"],
356             [1.00, u"red"]
357         ]
358         ticktext = [u"Progression", u"Normal", u"Regression"]
359     else:
360         colorscale = [
361             [0.00, u"red"],
362             [0.33, u"red"],
363             [0.33, u"white"],
364             [0.66, u"white"],
365             [0.66, u"green"],
366             [1.00, u"green"]
367         ]
368         ticktext = [u"Regression", u"Normal", u"Progression"]
369     trace_anomalies = plgo.Scatter(
370         x=list(anomalies.keys()),
371         y=anomalies_avgs,
372         mode=u"markers",
373         hoverinfo=u"none",
374         showlegend=False,
375         legendgroup=name,
376         name=f"{name}-anomalies",
377         marker={
378             u"size": 15,
379             u"symbol": u"circle-open",
380             u"color": anomalies_colors,
381             u"colorscale": colorscale,
382             u"showscale": True,
383             u"line": {
384                 u"width": 2
385             },
386             u"colorbar": {
387                 u"y": 0.5,
388                 u"len": 0.8,
389                 u"title": u"Circles Marking Data Classification",
390                 u"titleside": u"right",
391                 u"titlefont": {
392                     u"size": 14
393                 },
394                 u"tickmode": u"array",
395                 u"tickvals": [0.167, 0.500, 0.833],
396                 u"ticktext": ticktext,
397                 u"ticks": u"",
398                 u"ticklen": 0,
399                 u"tickangle": -90,
400                 u"thickness": 10
401             }
402         }
403     )
404     traces.append(trace_anomalies)
405
406     if anomaly_classification:
407         return traces, anomaly_classification[-1]
408
409     return traces, None
410
411
412 def _generate_all_charts(spec, input_data):
413     """Generate all charts specified in the specification file.
414
415     :param spec: Specification.
416     :param input_data: Full data set.
417     :type spec: Specification
418     :type input_data: InputData
419     """
420
421     def _generate_chart(graph):
422         """Generates the chart.
423
424         :param graph: The graph to be generated
425         :type graph: dict
426         :returns: Dictionary with the job name, csv table with results and
427             list of tests classification results.
428         :rtype: dict
429         """
430
431         logging.info(f"  Generating the chart {graph.get(u'title', u'')} ...")
432
433         job_name = list(graph[u"data"].keys())[0]
434
435         # Transform the data
436         logging.info(
437             f"    Creating the data set for the {graph.get(u'type', u'')} "
438             f"{graph.get(u'title', u'')}."
439         )
440
441         data = input_data.filter_tests_by_name(
442             graph,
443             params=[u"type", u"result", u"throughput", u"latency", u"tags"],
444             continue_on_error=True
445         )
446
447         if data is None or data.empty:
448             logging.error(u"No data.")
449             return dict()
450
451         return_lst = list()
452
453         for ttype in graph.get(u"test-type", (u"mrr", )):
454             for core in graph.get(u"core", tuple()):
455                 csv_tbl = list()
456                 csv_tbl_lat_1 = list()
457                 csv_tbl_lat_2 = list()
458                 res = dict()
459                 chart_data = dict()
460                 chart_tags = dict()
461                 for item in graph.get(u"include", tuple()):
462                     reg_ex = re.compile(str(item.format(core=core)).lower())
463                     for job, job_data in data.items():
464                         if job != job_name:
465                             continue
466                         for index, bld in job_data.items():
467                             for test_id, test in bld.items():
468                                 if not re.match(reg_ex, str(test_id).lower()):
469                                     continue
470                                 if chart_data.get(test_id, None) is None:
471                                     chart_data[test_id] = OrderedDict()
472                                 try:
473                                     lat_1 = u""
474                                     lat_2 = u""
475                                     if ttype == u"mrr":
476                                         rate = test[u"result"][u"receive-rate"]
477                                         stdev = \
478                                             test[u"result"][u"receive-stdev"]
479                                     elif ttype == u"ndr":
480                                         rate = \
481                                             test["throughput"][u"NDR"][u"LOWER"]
482                                         stdev = float(u"nan")
483                                     elif ttype == u"pdr":
484                                         rate = \
485                                             test["throughput"][u"PDR"][u"LOWER"]
486                                         stdev = float(u"nan")
487                                         lat_1 = test[u"latency"][u"PDR50"]\
488                                             [u"direction1"][u"avg"]
489                                         lat_2 = test[u"latency"][u"PDR50"]\
490                                             [u"direction2"][u"avg"]
491                                     else:
492                                         continue
493                                     chart_data[test_id][int(index)] = {
494                                         u"receive-rate": rate,
495                                         u"receive-stdev": stdev
496                                     }
497                                     if ttype == u"pdr":
498                                         chart_data[test_id][int(index)].update(
499                                             {
500                                                 u"lat_1": lat_1,
501                                                 u"lat_2": lat_2
502                                             }
503                                         )
504                                     chart_tags[test_id] = \
505                                         test.get(u"tags", None)
506                                 except (KeyError, TypeError):
507                                     pass
508
509                 # Add items to the csv table:
510                 for tst_name, tst_data in chart_data.items():
511                     tst_lst = list()
512                     tst_lst_lat_1 = list()
513                     tst_lst_lat_2 = list()
514                     for bld in builds_dict[job_name]:
515                         itm = tst_data.get(int(bld), dict())
516                         # CSIT-1180: Itm will be list, compute stats.
517                         try:
518                             tst_lst.append(str(itm.get(u"receive-rate", u"")))
519                             if ttype == u"pdr":
520                                 tst_lst_lat_1.append(
521                                     str(itm.get(u"lat_1", u""))
522                                 )
523                                 tst_lst_lat_2.append(
524                                     str(itm.get(u"lat_2", u""))
525                                 )
526                         except AttributeError:
527                             tst_lst.append(u"")
528                             if ttype == u"pdr":
529                                 tst_lst_lat_1.append(u"")
530                                 tst_lst_lat_2.append(u"")
531                     csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
532                     csv_tbl_lat_1.append(
533                         f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n"
534                     )
535                     csv_tbl_lat_2.append(
536                         f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n"
537                     )
538
539                 # Generate traces:
540                 traces = list()
541                 traces_lat = list()
542                 index = 0
543                 groups = graph.get(u"groups", None)
544                 visibility = list()
545
546                 if groups:
547                     for group in groups:
548                         visible = list()
549                         for tag in group:
550                             for tst_name, test_data in chart_data.items():
551                                 if not test_data:
552                                     logging.warning(
553                                         f"No data for the test {tst_name}"
554                                     )
555                                     continue
556                                 if tag not in chart_tags[tst_name]:
557                                     continue
558                                 try:
559                                     trace, rslt = _generate_trending_traces(
560                                         test_data,
561                                         job_name=job_name,
562                                         build_info=build_info,
563                                         name=u'-'.join(tst_name.split(u'.')[-1].
564                                                        split(u'-')[2:-1]),
565                                         color=COLORS[index],
566                                         incl_tests=ttype
567                                     )
568                                 except IndexError:
569                                     logging.error(f"Out of colors: index: "
570                                                   f"{index}, test: {tst_name}")
571                                     index += 1
572                                     continue
573                                 traces.extend(trace)
574                                 visible.extend(
575                                     [True for _ in range(len(trace))]
576                                 )
577                                 res[tst_name] = rslt
578                                 index += 1
579                                 break
580                         visibility.append(visible)
581                 else:
582                     for tst_name, test_data in chart_data.items():
583                         if not test_data:
584                             logging.warning(f"No data for the test {tst_name}")
585                             continue
586                         try:
587                             trace, rslt = _generate_trending_traces(
588                                 test_data,
589                                 job_name=job_name,
590                                 build_info=build_info,
591                                 name=u'-'.join(
592                                     tst_name.split(u'.')[-1].split(u'-')[2:-1]),
593                                 color=COLORS[index],
594                                 incl_tests=ttype
595                             )
596                             if ttype == u"pdr":
597                                 trace_lat, _ = _generate_trending_traces(
598                                     test_data,
599                                     job_name=job_name,
600                                     build_info=build_info,
601                                     name=u'-'.join(
602                                         tst_name.split(u'.')[-1].split(
603                                             u'-')[2:-1]),
604                                     color=COLORS[index],
605                                     incl_tests=u"pdr-lat"
606                                 )
607                                 traces_lat.extend(trace_lat)
608                         except IndexError:
609                             logging.error(
610                                 f"Out of colors: index: "
611                                 f"{index}, test: {tst_name}"
612                             )
613                             index += 1
614                             continue
615                         traces.extend(trace)
616                         res[tst_name] = rslt
617                         index += 1
618
619                 if traces:
620                     # Generate the chart:
621                     try:
622                         layout = deepcopy(graph[u"layout"])
623                     except KeyError as err:
624                         logging.error(u"Finished with error: No layout defined")
625                         logging.error(repr(err))
626                         return dict()
627                     if groups:
628                         show = list()
629                         for i in range(len(visibility)):
630                             visible = list()
631                             for vis_idx, _ in enumerate(visibility):
632                                 for _ in range(len(visibility[vis_idx])):
633                                     visible.append(i == vis_idx)
634                             show.append(visible)
635
636                         buttons = list()
637                         buttons.append(dict(
638                             label=u"All",
639                             method=u"update",
640                             args=[{u"visible":
641                                        [True for _ in range(len(show[0]))]}, ]
642                         ))
643                         for i in range(len(groups)):
644                             try:
645                                 label = graph[u"group-names"][i]
646                             except (IndexError, KeyError):
647                                 label = f"Group {i + 1}"
648                             buttons.append(dict(
649                                 label=label,
650                                 method=u"update",
651                                 args=[{u"visible": show[i]}, ]
652                             ))
653
654                         layout[u"updatemenus"] = list([
655                             dict(
656                                 active=0,
657                                 type=u"dropdown",
658                                 direction=u"down",
659                                 xanchor=u"left",
660                                 yanchor=u"bottom",
661                                 x=-0.12,
662                                 y=1.0,
663                                 buttons=buttons
664                             )
665                         ])
666
667                     name_file = (
668                         f"{spec.cpta[u'output-file']}/"
669                         f"{graph[u'output-file-name']}.html"
670                     )
671                     name_file = name_file.format(core=core, test_type=ttype)
672
673                     logging.info(f"    Writing the file {name_file}")
674                     plpl = plgo.Figure(data=traces, layout=layout)
675                     try:
676                         ploff.plot(
677                             plpl,
678                             show_link=False,
679                             auto_open=False,
680                             filename=name_file
681                         )
682                     except plerr.PlotlyEmptyDataError:
683                         logging.warning(u"No data for the plot. Skipped.")
684
685                 if traces_lat:
686                     try:
687                         layout = deepcopy(graph[u"layout"])
688                         layout[u"yaxis"][u"title"] = u"Latency [s]"
689                         layout[u"yaxis"][u"tickformat"] = u".3s"
690                     except KeyError as err:
691                         logging.error(u"Finished with error: No layout defined")
692                         logging.error(repr(err))
693                         return dict()
694                     name_file = (
695                         f"{spec.cpta[u'output-file']}/"
696                         f"{graph[u'output-file-name']}-lat.html"
697                     )
698                     name_file = name_file.format(core=core, test_type=ttype)
699
700                     logging.info(f"    Writing the file {name_file}")
701                     plpl = plgo.Figure(data=traces_lat, layout=layout)
702                     try:
703                         ploff.plot(
704                             plpl,
705                             show_link=False,
706                             auto_open=False,
707                             filename=name_file
708                         )
709                     except plerr.PlotlyEmptyDataError:
710                         logging.warning(u"No data for the plot. Skipped.")
711
712                 return_lst.append(
713                     {
714                         u"job_name": job_name,
715                         u"csv_table": csv_tbl,
716                         u"csv_lat_1": csv_tbl_lat_1,
717                         u"csv_lat_2": csv_tbl_lat_2,
718                         u"results": res
719                     }
720                 )
721
722         return return_lst
723
724     builds_dict = dict()
725     for job, builds in spec.input.items():
726         if builds_dict.get(job, None) is None:
727             builds_dict[job] = list()
728         for build in builds:
729             if build[u"status"] not in (u"failed", u"not found", u"removed",
730                                         None):
731                 builds_dict[job].append(str(build[u"build"]))
732
733     # Create "build ID": "date" dict:
734     build_info = dict()
735     tb_tbl = spec.environment.get(u"testbeds", None)
736     for job_name, job_data in builds_dict.items():
737         if build_info.get(job_name, None) is None:
738             build_info[job_name] = OrderedDict()
739         for build in job_data:
740             testbed = u""
741             tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
742             if tb_ip and tb_tbl:
743                 testbed = tb_tbl.get(tb_ip, u"")
744             build_info[job_name][build] = (
745                 input_data.metadata(job_name, build).get(u"generated", u""),
746                 input_data.metadata(job_name, build).get(u"version", u""),
747                 testbed
748             )
749
750     anomaly_classifications = dict()
751
752     # Create the table header:
753     csv_tables = dict()
754     csv_tables_l1 = dict()
755     csv_tables_l2 = dict()
756     for job_name in builds_dict:
757         if csv_tables.get(job_name, None) is None:
758             csv_tables[job_name] = list()
759         if csv_tables_l1.get(job_name, None) is None:
760             csv_tables_l1[job_name] = list()
761         if csv_tables_l2.get(job_name, None) is None:
762             csv_tables_l2[job_name] = list()
763         header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
764         csv_tables[job_name].append(header)
765         csv_tables_l1[job_name].append(header)
766         csv_tables_l2[job_name].append(header)
767         build_dates = [x[0] for x in build_info[job_name].values()]
768         header = f"Build Date:,{u','.join(build_dates)}\n"
769         csv_tables[job_name].append(header)
770         csv_tables_l1[job_name].append(header)
771         csv_tables_l2[job_name].append(header)
772         versions = [x[1] for x in build_info[job_name].values()]
773         header = f"Version:,{u','.join(versions)}\n"
774         csv_tables[job_name].append(header)
775         csv_tables_l1[job_name].append(header)
776         csv_tables_l2[job_name].append(header)
777         testbed = [x[2] for x in build_info[job_name].values()]
778         header = f"Test bed:,{u','.join(testbed)}\n"
779         csv_tables[job_name].append(header)
780         csv_tables_l1[job_name].append(header)
781         csv_tables_l2[job_name].append(header)
782
783     for chart in spec.cpta[u"plots"]:
784         results = _generate_chart(chart)
785         if not results:
786             continue
787
788         for result in results:
789             csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
790             csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"])
791             csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"])
792
793             if anomaly_classifications.get(result[u"job_name"], None) is None:
794                 anomaly_classifications[result[u"job_name"]] = dict()
795             anomaly_classifications[result[u"job_name"]].\
796                 update(result[u"results"])
797
798     # Write the tables:
799     for job_name, csv_table in csv_tables.items():
800         file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
801         with open(f"{file_name}.csv", u"wt") as file_handler:
802             file_handler.writelines(csv_table)
803
804         txt_table = None
805         with open(f"{file_name}.csv", u"rt") as csv_file:
806             csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
807             line_nr = 0
808             for row in csv_content:
809                 if txt_table is None:
810                     txt_table = prettytable.PrettyTable(row)
811                 else:
812                     if line_nr > 1:
813                         for idx, item in enumerate(row):
814                             try:
815                                 row[idx] = str(round(float(item) / 1000000, 2))
816                             except ValueError:
817                                 pass
818                     try:
819                         txt_table.add_row(row)
820                     # PrettyTable raises Exception
821                     except Exception as err:
822                         logging.warning(
823                             f"Error occurred while generating TXT table:\n{err}"
824                         )
825                 line_nr += 1
826             txt_table.align[u"Build Number:"] = u"l"
827         with open(f"{file_name}.txt", u"wt") as txt_file:
828             txt_file.write(str(txt_table))
829
830     for job_name, csv_table in csv_tables_l1.items():
831         file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1"
832         with open(f"{file_name}.csv", u"wt") as file_handler:
833             file_handler.writelines(csv_table)
834     for job_name, csv_table in csv_tables_l2.items():
835         file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2"
836         with open(f"{file_name}.csv", u"wt") as file_handler:
837             file_handler.writelines(csv_table)
838
839     # Evaluate result:
840     if anomaly_classifications:
841         test_reg_lst = []
842         nic_reg_lst = []
843         frmsize_reg_lst = []
844         trend_reg_lst = []
845         number_reg_lst = []
846         ltc_reg_lst = []
847         test_prog_lst = []
848         nic_prog_lst = []
849         frmsize_prog_lst = []
850         trend_prog_lst = []
851         number_prog_lst = []
852         ltc_prog_lst = []
853         result = u"PASS"
854
855         class MaxLens():
856             """Class to store the max lengths of strings displayed in
857             regressions and progressions.
858             """
859
860             def __init__(self, tst, nic, frmsize, trend, run, ltc):
861                 """Initialisation.
862
863                 :param tst: Name of the test.
864                 :param nic: NIC used in the test.
865                 :param frmsize: Frame size used in the test.
866                 :param trend: Trend Change.
867                 :param run: Number of runs for last trend.
868                 :param ltc: Regression or Progression
869                 """
870                 self.tst = tst
871                 self.nic = nic
872                 self.frmsize = frmsize
873                 self.trend = trend
874                 self.run = run
875                 self.ltc = ltc
876
877         max_len = MaxLens(0, 0, 0, 0, 0, 0)
878
879         for job_name, job_data in anomaly_classifications.items():
880             data = []
881             tb = u"-".join(job_name.split(u"-")[-2:])
882             for file in listdir(f"{spec.cpta[u'output-file']}"):
883                 if tb in file and u"performance-trending-dashboard" in \
884                         file and u"txt" in file:
885                     file_to_read = f"{spec.cpta[u'output-file']}/{file}"
886                     with open(f"{file_to_read}", u"rt") as f_in:
887                         data = data + f_in.readlines()
888
889             for test_name, classification in job_data.items():
890                 if classification != u"normal":
891                     if u"2n" in test_name:
892                         test_name = test_name.split("-", 2)
893                         tst = test_name[2].split(".")[-1]
894                         nic = test_name[1]
895                     else:
896                         test_name = test_name.split("-", 1)
897                         tst = test_name[1].split(".")[-1]
898                         nic = test_name[0].split(".")[-1]
899                     frmsize = tst.split("-")[0]
900                     tst = u"-".join(tst.split("-")[1:])
901                     tst_name = f"{nic}-{frmsize}-{tst}"
902                     if len(tst) > max_len.tst:
903                         max_len.tst = len(tst)
904                     if len(nic) > max_len.nic:
905                         max_len.nic = len(nic)
906                     if len(frmsize) > max_len.frmsize:
907                         max_len.frmsize = len(frmsize)
908
909                     for line in data:
910                         if tst_name in line:
911                             line = line.replace(" ", "")
912                             trend = line.split("|")[2]
913                             if len(str(trend)) > max_len.trend:
914                                 max_len.trend = len(str(trend))
915                             number = line.split("|")[3]
916                             if len(str(number)) > max_len.run:
917                                 max_len.run = len(str(number))
918                             ltc = line.split("|")[4]
919                             if len(str(ltc)) > max_len.ltc:
920                                 max_len.ltc = len(str(ltc))
921                             if classification == u'regression':
922                                 test_reg_lst.append(tst)
923                                 nic_reg_lst.append(nic)
924                                 frmsize_reg_lst.append(frmsize)
925                                 trend_reg_lst.append(trend)
926                                 number_reg_lst.append(number)
927                                 ltc_reg_lst.append(ltc)
928                             elif classification == u'progression':
929                                 test_prog_lst.append(tst)
930                                 nic_prog_lst.append(nic)
931                                 frmsize_prog_lst.append(frmsize)
932                                 trend_prog_lst.append(trend)
933                                 number_prog_lst.append(number)
934                                 ltc_prog_lst.append(ltc)
935
936                     if classification in (u"regression", u"outlier"):
937                         result = u"FAIL"
938
939             text = u""
940             for idx in range(len(test_reg_lst)):
941                 text += (
942                     f"{test_reg_lst[idx]}"
943                     f"{u' ' * (max_len.tst - len(test_reg_lst[idx]))}  "
944                     f"{nic_reg_lst[idx]}"
945                     f"{u' ' * (max_len.nic - len(nic_reg_lst[idx]))}  "
946                     f"{frmsize_reg_lst[idx].upper()}"
947                     f"{u' ' * (max_len.frmsize - len(frmsize_reg_lst[idx]))}  "
948                     f"{trend_reg_lst[idx]}"
949                     f"{u' ' * (max_len.trend - len(str(trend_reg_lst[idx])))}  "
950                     f"{number_reg_lst[idx]}"
951                     f"{u' ' * (max_len.run - len(str(number_reg_lst[idx])))}  "
952                     f"{ltc_reg_lst[idx]}"
953                     f"{u' ' * (max_len.ltc - len(str(ltc_reg_lst[idx])))}  "
954                     f"\n"
955                 )
956
957             file_name = \
958                 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
959
960             try:
961                 with open(f"{file_name}", u'w') as txt_file:
962                     txt_file.write(text)
963             except IOError:
964                 logging.error(
965                     f"Not possible to write the file {file_name}.")
966
967             text = u""
968             for idx in range(len(test_prog_lst)):
969                 text += (
970                     f"{test_prog_lst[idx]}"
971                     f"{u' ' * (max_len.tst - len(test_prog_lst[idx]))}  "
972                     f"{nic_prog_lst[idx]}"
973                     f"{u' ' * (max_len.nic - len(nic_prog_lst[idx]))}  "
974                     f"{frmsize_prog_lst[idx].upper()}"
975                     f"{u' ' * (max_len.frmsize - len(frmsize_prog_lst[idx]))}  "
976                     f"{trend_prog_lst[idx]}"
977                     f"{u' ' * (max_len.trend -len(str(trend_prog_lst[idx])))}  "
978                     f"{number_prog_lst[idx]}"
979                     f"{u' ' * (max_len.run - len(str(number_prog_lst[idx])))}  "
980                     f"{ltc_prog_lst[idx]}"
981                     f"{u' ' * (max_len.ltc - len(str(ltc_prog_lst[idx])))}  "
982                     f"\n"
983                 )
984
985             file_name = \
986                 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
987             try:
988                 with open(f"{file_name}", u'w') as txt_file:
989                     txt_file.write(text)
990             except IOError:
991                 logging.error(f"Not possible to write the file {file_name}.")
992
993     else:
994         result = u"FAIL"
995
996     logging.info(f"Partial results: {anomaly_classifications}")
997     logging.info(f"Result: {result}")
998
999     return result