Report: Detailed test results
[csit.git] / resources / tools / presentation / generator_files.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate files.
15 """
16
17 import re
18
19 from os.path import join
20 from collections import OrderedDict
21
22 import logging
23
24 from pal_utils import get_files, get_rst_title_char
25
26
27 RST_INCLUDE_TABLE = (u"\n.. only:: html\n\n"
28                      u"    .. csv-table::\n"
29                      u"        :header-rows: 1\n"
30                      u"        :widths: auto\n"
31                      u"        :align: center\n"
32                      u"        :file: {file_html}\n"
33                      u"\n.. only:: latex\n\n"
34                      u"\n  .. raw:: latex\n\n"
35                      u"      \\csvautolongtable{{{file_latex}}}\n\n")
36
37 REGEX_NIC_SHORT = re.compile(r'(\d*ge\dp\d)(\D*\d*[a-z]*)-')
38
39
40 def generate_files(spec, data):
41     """Generate all files specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"file_details_split": file_details_split,
51         u"file_details_split_html": file_details_split_html,
52         u"file_test_results": file_test_results,
53         u"file_test_results_html": file_test_results_html
54     }
55
56     logging.info(u"Generating the files ...")
57     for file_spec in spec.files:
58         try:
59             generator[file_spec[u"algorithm"]](file_spec, data)
60         except (NameError, KeyError) as err:
61             logging.error(
62                 f"Probably algorithm {file_spec[u'algorithm']} is not defined: "
63                 f"{repr(err)}"
64             )
65     logging.info(u"Done.")
66
67
68 def _tests_in_suite(suite_name, tests):
69     """Check if the suite includes tests.
70
71     :param suite_name: Name of the suite to be checked.
72     :param tests: Set of tests
73     :type suite_name: str
74     :type tests: pandas.Series
75     :returns: True if the suite includes tests.
76     :rtype: bool
77     """
78
79     for key in tests.keys():
80         if suite_name == tests[key][u"parent"]:
81             return True
82     return False
83
84
85 def file_details_split(file_spec, input_data, frmt=u"rst"):
86     """Generate the file(s) with algorithms
87     - file_details_split
88     specified in the specification file.
89
90     :param file_spec: File to generate.
91     :param input_data: Data to process.
92     :param frmt: Format can be: rst or html
93     :type file_spec: pandas.Series
94     :type input_data: InputData
95     :type frmt: str
96     """
97
98     fileset_file_name = f"{file_spec[u'output-file']}"
99     rst_header = (
100         u"\n"
101         u".. |br| raw:: html\n\n    <br />\n\n\n"
102         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
103         u".. |preout| raw:: html\n\n    </pre>\n\n"
104     )
105     start_lvl = file_spec.get(u"data-start-level", 4)
106
107     logging.info(f"  Generating the file set {fileset_file_name} ...")
108
109     data_sets = file_spec.get(u"data", None)
110     if not data_sets:
111         logging.error(
112             f"  No data sets specified for {file_spec[u'output-file']}, exit."
113         )
114         return
115
116     table_sets = file_spec.get(u"dir-tables", None)
117     if not table_sets:
118         logging.error(
119             f"  No table sets specified for {file_spec[u'output-file']}, exit."
120         )
121         return
122
123     if len(data_sets) != len(table_sets):
124         logging.error(
125             f"  The number of data sets and the number of table sets for "
126             f"{file_spec[u'output-file']} are not equal, exit."
127         )
128         return
129
130     chapters = OrderedDict()
131     for data_set, table_set in zip(data_sets, table_sets):
132
133         logging.info(f"   Processing the table set {table_set}...")
134
135         table_lst = None
136         if frmt == u"html":
137             table_lst = get_files(table_set, u".rst", full_path=True)
138         elif frmt == u"rst":
139             table_lst = get_files(table_set, u".csv", full_path=True)
140
141         if not table_lst:
142             logging.error(
143                 f"    No tables to include in {table_set}. Skipping."
144             )
145             return
146
147         logging.info(u"    Creating the test data set...")
148         tests = input_data.filter_data(
149             element=file_spec,
150             params=[u"name", u"parent", u"doc", u"type", u"level"],
151             data=data_set,
152             data_set=u"tests",
153             continue_on_error=True
154         )
155         if tests.empty:
156             return
157         tests = input_data.merge_data(tests)
158         tests.sort_index(inplace=True)
159
160         logging.info(u"    Creating the suite data set...")
161         suites = input_data.filter_data(
162             element=file_spec,
163             data=data_set,
164             continue_on_error=True,
165             data_set=u"suites"
166         )
167         if suites.empty:
168             return
169         suites = input_data.merge_data(suites)
170         suites.sort_index(inplace=True)
171
172         logging.info(u"    Generating files...")
173
174         chapter_l1 = u""
175         chapter_l2 = u"-".join(table_set.split(u"_")[-2:])
176         for suite_longname, suite in suites.items():
177
178             suite_lvl = len(suite_longname.split(u"."))
179             if suite_lvl < start_lvl:
180                 # Not interested in this suite
181                 continue
182
183             if suite_lvl == start_lvl:
184                 # Our top-level suite
185                 chapter_l1 = suite_longname.split(u'.')[-1]
186                 if chapters.get(chapter_l1, None) is None:
187                     chapters[chapter_l1] = OrderedDict()
188                 if chapters[chapter_l1].get(chapter_l2, None) is None:
189                     chapters[chapter_l1][chapter_l2] = OrderedDict()
190                 continue
191
192             if _tests_in_suite(suite[u"name"], tests):
193                 groups = re.search(REGEX_NIC_SHORT, suite[u"name"])
194                 nic = groups.group(2) if groups else None
195                 if nic is None:
196                     continue
197                 if chapters[chapter_l1][chapter_l2].get(nic, None) is None:
198                     chapters[chapter_l1][chapter_l2][nic] = dict(
199                         rst_file=f"{join(table_set, chapter_l1)}_{nic}.rst".
200                         replace(u"2n1l-", u""),
201                         tables=list()
202                     )
203                 for idx, tbl_file in enumerate(table_lst):
204                     if suite[u"name"] in tbl_file:
205                         chapters[chapter_l1][chapter_l2][nic][u"tables"].append(
206                             (
207                                 table_lst.pop(idx),
208                                 suite[u"doc"].replace(u'|br|', u'\n\n -')
209                             )
210                         )
211                         break
212
213     print(chapters)
214
215     titles = {
216         # VPP Perf, MRR
217         u"container_memif": u"LXC/DRC Container Memif",
218         u"crypto": u"IPsec IPv4 Routing",
219         u"hoststack": u"Hoststack Testing",
220         u"ip4": u"IPv4 Routing",
221         u"ip4_tunnels": u"IPv4 Tunnels",
222         u"ip6": u"IPv6 Routing",
223         u"ip6_tunnels": u"IPv6 Tunnels",
224         u"l2": u"L2 Ethernet Switching",
225         u"lb": u"LoadBalancer",
226         u"nfv_density": u"NFV Service Density",
227         u"srv6": u"SRv6 Routing",
228         u"vm_vhost": u"KVM VMs vhost-user",
229         u"vts": u"Virtual Topology System",
230         # VPP Device
231         u"interfaces": u"Interfaces",
232         u"l2bd": u"L2 Bridge-domain",
233         u"l2patch": u"L2 Patch",
234         u"l2xc": u"L2 Cross-connect",
235     }
236
237     order_chapters = file_spec.get(u"order-chapters", None)
238
239     if order_chapters:
240         order_1 = order_chapters.get(u"level-1", None)
241         order_2 = order_chapters.get(u"level-2", None)
242         order_3 = order_chapters.get(u"level-3", None)
243         if not order_1:
244             order_1 = chapters.keys()
245     else:
246         order_1 = None
247         order_2 = None
248         order_3 = None
249
250     for chapter_l1 in order_1:
251         content_l1 = chapters.get(chapter_l1, None)
252         if not content_l1:
253             continue
254         with open(f"{fileset_file_name}/index.rst", u"a") as file_handler:
255             file_handler.write(f"    {chapter_l1}\n")
256         l1_file_name = f"{join(fileset_file_name, chapter_l1)}.rst"
257         title = titles.get(chapter_l1, chapter_l1)
258         logging.info(f"   Generating {title} ...")
259         with open(l1_file_name, u"w") as file_handler:
260             file_handler.write(
261                 f"{title}\n"
262                 f"{get_rst_title_char(1) * len(title)}\n\n"
263                 f".. toctree::\n\n"
264             )
265
266         if not order_2:
267             order_2 = chapters[chapter_l1].keys()
268         for chapter_l2 in order_2:
269             content_l2 = content_l1.get(chapter_l2, None)
270             if not content_l2:
271                 continue
272             if not order_3:
273                 order_3 = chapters[chapter_l1][chapter_l2].keys()
274             for chapter_l3 in order_3:
275                 content_l3 = content_l2.get(chapter_l3, None)
276                 if not content_l3:
277                     continue
278                 with open(l1_file_name, u"a") as file_handler:
279                     item = u"/".join(content_l3[u'rst_file'].split(u'/')[-2:])
280                     file_handler.write(f"    ../{item}\n")
281                 logging.info(f"    Writing the file {content_l3[u'rst_file']}")
282                 with open(content_l3[u'rst_file'], u"w+") as file_handler:
283                     title = f"{chapter_l2}-{chapter_l3}"
284                     file_handler.write(
285                         f"{rst_header}\n"
286                         f"{title}\n"
287                         f"{get_rst_title_char(2) * len(title)}\n"
288                     )
289                     for table in content_l3[u'tables']:
290                         title = table[0].split(u"/")[-1].split(u".")[0]
291                         file_handler.write(
292                             f"\n{title}\n"
293                             f"{get_rst_title_char(3) * len(title)}\n"
294                         )
295                         file_handler.write(f"\n{table[1]}\n")
296                         if frmt == u"html":
297                             file_handler.write(
298                                 f"\n.. include:: {table[0].split(u'/')[-1]}"
299                                 f"\n"
300                             )
301                         elif frmt == u"rst":
302                             file_handler.write(
303                                 RST_INCLUDE_TABLE.format(
304                                     file_latex=table[0],
305                                     file_html=table[0].split(u"/")[-1])
306                             )
307
308
309 def file_details_split_html(file_spec, input_data):
310     """Generate the file(s) with algorithms
311     - file_details_split_html
312     specified in the specification file.
313
314     :param file_spec: File to generate.
315     :param input_data: Data to process.
316     :type file_spec: pandas.Series
317     :type input_data: InputData
318     """
319     file_details_split(file_spec, input_data, frmt=u"html")
320
321
322 def file_test_results(file_spec, input_data, frmt=u"rst"):
323     """Generate the file(s) with algorithms
324     - file_test_results
325     specified in the specification file.
326
327     :param file_spec: File to generate.
328     :param input_data: Data to process.
329     :param frmt: Format can be: rst or html
330     :type file_spec: pandas.Series
331     :type input_data: InputData
332     :type frmt: str
333     """
334
335     base_file_name = f"{file_spec[u'output-file']}"
336     rst_header = (
337         u"\n"
338         u".. |br| raw:: html\n\n    <br />\n\n\n"
339         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
340         u".. |preout| raw:: html\n\n    </pre>\n\n"
341     )
342     start_lvl = file_spec.get(u"data-start-level", 4)
343
344     logging.info(f"  Generating the file {base_file_name} ...")
345
346     if frmt == u"html":
347         table_lst = get_files(file_spec[u"dir-tables"], u".rst", full_path=True)
348     elif frmt == u"rst":
349         table_lst = get_files(file_spec[u"dir-tables"], u".csv", full_path=True)
350     else:
351         return
352     if not table_lst:
353         logging.error(
354             f"  No tables to include in {file_spec[u'dir-tables']}. Skipping."
355         )
356         return
357
358     logging.info(
359         f"    Creating the tests data set for the "
360         f"{file_spec.get(u'type', u'')} {file_spec.get(u'title', u'')}."
361     )
362
363     tests = input_data.filter_data(
364         file_spec,
365         params=[u"name", u"parent", u"doc", u"type", u"level"],
366         continue_on_error=True
367     )
368     if tests.empty:
369         return
370     tests = input_data.merge_data(tests)
371
372     suites = input_data.filter_data(
373         file_spec,
374         continue_on_error=True,
375         data_set=u"suites"
376     )
377     if suites.empty:
378         return
379     suites = input_data.merge_data(suites)
380     suites.sort_index(inplace=True)
381
382     file_name = u""
383     for suite_longname, suite in suites.items():
384
385         suite_lvl = len(suite_longname.split(u"."))
386         if suite_lvl < start_lvl:
387             # Not interested in this suite
388             continue
389
390         if suite_lvl == start_lvl:
391             # Our top-level suite
392             chapter = suite_longname.split(u'.')[-1]
393             file_name = f"{base_file_name}/{chapter}.rst"
394             logging.info(f"    Writing file {file_name}")
395             with open(f"{base_file_name}/index.rst", u"a") as file_handler:
396                 file_handler.write(f"    {chapter}\n")
397             with open(file_name, u"a") as file_handler:
398                 file_handler.write(rst_header)
399
400         title_line = get_rst_title_char(suite[u"level"] - start_lvl + 2) * \
401             len(suite[u"name"])
402         with open(file_name, u"a") as file_handler:
403             if not (u"-ndrpdr" in suite[u"name"] or
404                     u"-mrr" in suite[u"name"] or
405                     u"-dev" in suite[u"name"]):
406                 file_handler.write(f"\n{suite[u'name']}\n{title_line}\n")
407
408             if _tests_in_suite(suite[u"name"], tests):
409                 for tbl_file in table_lst:
410                     if suite[u"name"] in tbl_file:
411                         file_handler.write(
412                             f"\n{suite[u'name']}\n{title_line}\n"
413                         )
414                         file_handler.write(
415                             f"\n{suite[u'doc']}\n".replace(u'|br|', u'\n\n -')
416                         )
417                         if frmt == u"html":
418                             file_handler.write(
419                                 f"\n.. include:: {tbl_file.split(u'/')[-1]}\n"
420                             )
421                         elif frmt == u"rst":
422                             file_handler.write(
423                                 RST_INCLUDE_TABLE.format(
424                                     file_latex=tbl_file,
425                                     file_html=tbl_file.split(u"/")[-1])
426                             )
427                         break
428
429     logging.info(u"  Done.")
430
431
432 def file_test_results_html(file_spec, input_data):
433     """Generate the file(s) with algorithms
434     - file_test_results_html
435     specified in the specification file.
436
437     :param file_spec: File to generate.
438     :param input_data: Data to process.
439     :type file_spec: pandas.Series
440     :type input_data: InputData
441     """
442     file_test_results(file_spec, input_data, frmt=u"html")