feat(api): Use newest API messages after rls2402
[csit.git] / resources / tools / presentation / generator_files.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate files.
15 """
16
17 import re
18
19 from os.path import join
20 from collections import OrderedDict
21
22 import logging
23
24 from pal_utils import get_files, get_rst_title_char
25
26
27 RST_INCLUDE_TABLE = (u"\n.. only:: html\n\n"
28                      u"    .. csv-table::\n"
29                      u"        :header-rows: 1\n"
30                      u"        :widths: auto\n"
31                      u"        :align: center\n"
32                      u"        :file: {file_html}\n"
33                      u"\n.. only:: latex\n\n"
34                      u"\n  .. raw:: latex\n\n"
35                      u"      \\csvautolongtable{{{file_latex}}}\n\n")
36
37 REGEX_NIC_SHORT = re.compile(r'(\d*ge\dp\d)([a-z]*\d*[a-z]*)-')
38
39
40 def generate_files(spec, data):
41     """Generate all files specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"file_details_split": file_details_split,
51         u"file_details_split_html": file_details_split_html,
52         u"file_test_results": file_test_results,
53         u"file_test_results_html": file_test_results_html
54     }
55
56     logging.info(u"Generating the files ...")
57     for file_spec in spec.files:
58         try:
59             generator[file_spec[u"algorithm"]](file_spec, data)
60         except (NameError, KeyError) as err:
61             logging.error(
62                 f"Probably algorithm {file_spec[u'algorithm']} is not defined: "
63                 f"{repr(err)}"
64             )
65     logging.info(u"Done.")
66
67
68 def _tests_in_suite(suite_name, tests):
69     """Check if the suite includes tests.
70
71     :param suite_name: Name of the suite to be checked.
72     :param tests: Set of tests
73     :type suite_name: str
74     :type tests: pandas.Series
75     :returns: True if the suite includes tests.
76     :rtype: bool
77     """
78
79     for key in tests.keys():
80         if suite_name == tests[key][u"parent"]:
81             return True
82     return False
83
84
85 def file_details_split(file_spec, input_data, frmt=u"rst"):
86     """Generate the file(s) with algorithms
87     - file_details_split
88     specified in the specification file.
89
90     :param file_spec: File to generate.
91     :param input_data: Data to process.
92     :param frmt: Format can be: rst or html
93     :type file_spec: pandas.Series
94     :type input_data: InputData
95     :type frmt: str
96     """
97
98     fileset_file_name = f"{file_spec[u'output-file']}"
99     rst_header = (
100         u"\n"
101         u".. |br| raw:: html\n\n    <br />\n\n\n"
102         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
103         u".. |preout| raw:: html\n\n    </pre>\n\n"
104     )
105     start_lvl = file_spec.get(u"data-start-level", 4)
106
107     logging.info(f"  Generating the file set {fileset_file_name} ...")
108
109     data_sets = file_spec.get(u"data", None)
110     if not data_sets:
111         logging.error(
112             f"  No data sets specified for {file_spec[u'output-file']}, exit."
113         )
114         return
115
116     table_sets = file_spec.get(u"dir-tables", None)
117     if not table_sets:
118         logging.error(
119             f"  No table sets specified for {file_spec[u'output-file']}, exit."
120         )
121         return
122
123     if len(data_sets) != len(table_sets):
124         logging.error(
125             f"  The number of data sets and the number of table sets for "
126             f"{file_spec[u'output-file']} are not equal, exit."
127         )
128         return
129
130     chapters = OrderedDict()
131     for data_set, table_set in zip(data_sets, table_sets):
132
133         logging.info(f"   Processing the table set {table_set}...")
134
135         table_lst = None
136         if frmt == u"html":
137             table_lst = get_files(table_set, u".rst", full_path=True)
138         elif frmt == u"rst":
139             table_lst = get_files(table_set, u".csv", full_path=True)
140
141         if not table_lst:
142             logging.error(
143                 f"    No tables to include in {table_set}. Skipping."
144             )
145             continue
146
147         logging.info(u"    Creating the test data set...")
148         tests = input_data.filter_data(
149             element=file_spec,
150             params=[u"name", u"parent", u"doc", u"type", u"level"],
151             data=data_set,
152             data_set=u"tests",
153             continue_on_error=True
154         )
155         if tests.empty:
156             continue
157         tests = input_data.merge_data(tests)
158         tests.sort_index(inplace=True)
159
160         logging.info(u"    Creating the suite data set...")
161         suites = input_data.filter_data(
162             element=file_spec,
163             data=data_set,
164             continue_on_error=True,
165             data_set=u"suites"
166         )
167         if suites.empty:
168             continue
169         suites = input_data.merge_data(suites)
170         suites.sort_index(inplace=True)
171
172         logging.info(u"    Generating files...")
173
174         chapter_l1 = u""
175         chapter_l2 = u"-".join(table_set.split(u"_")[-2:])
176         for suite_longname, suite in suites.items():
177
178             suite_lvl = len(suite_longname.split(u"."))
179             if suite_lvl < start_lvl:
180                 # Not interested in this suite
181                 continue
182
183             if suite_lvl == start_lvl:
184                 # Our top-level suite
185                 chapter_l1 = suite_longname.split(u'.')[-1]
186                 if chapters.get(chapter_l1, None) is None:
187                     chapters[chapter_l1] = OrderedDict()
188                 if chapters[chapter_l1].get(chapter_l2, None) is None:
189                     chapters[chapter_l1][chapter_l2] = OrderedDict()
190                 continue
191
192             if _tests_in_suite(suite[u"name"], tests):
193                 groups = re.search(REGEX_NIC_SHORT, suite[u"name"])
194                 nic = groups.group(2) if groups else None
195                 if nic is None:
196                     continue
197                 if chapters[chapter_l1][chapter_l2].get(nic, None) is None:
198                     chapters[chapter_l1][chapter_l2][nic] = dict(
199                         rst_file=f"{join(table_set, chapter_l1)}_{nic}.rst".
200                         replace(u"2n1l-", u"").replace(u"1n1l-", u""),
201                         tables=list()
202                     )
203                 for idx, tbl_file in enumerate(table_lst):
204                     if suite[u"name"] in tbl_file:
205                         chapters[chapter_l1][chapter_l2][nic][u"tables"].append(
206                             (
207                                 table_lst.pop(idx),
208                                 suite[u"doc"].replace(u'"', u"'").
209                                 replace(u'\n', u' ').
210                                 replace(u'\r', u'').
211                                 replace(u'*[', u'\n\n - *[').
212                                 replace(u"*", u"**").
213                                 replace(u'\n\n - *[', u' - *[', 1)
214                             )
215                         )
216                         break
217     titles = {
218         # VPP Perf, MRR
219         u"container_memif": u"LXC/DRC Container Memif",
220         u"crypto": u"IPsec IPv4 Routing",
221         u"hoststack": u"Hoststack Testing",
222         u"ip4": u"IPv4 Routing",
223         u"ip4_tunnels": u"IPv4 Tunnels",
224         u"ip6": u"IPv6 Routing",
225         u"ip6_tunnels": u"IPv6 Tunnels",
226         u"l2": u"L2 Ethernet Switching",
227         u"lb": u"LoadBalancer",
228         u"nfv_density": u"NFV Service Density",
229         u"srv6": u"SRv6 Routing",
230         u"vm_vhost": u"KVM VMs vhost-user",
231         u"vts": u"Virtual Topology System",
232         # VPP Device
233         u"interfaces": u"Interfaces",
234         u"l2bd": u"L2 Bridge-domain",
235         u"l2patch": u"L2 Patch",
236         u"l2xc": u"L2 Cross-connect",
237     }
238
239     order_chapters = file_spec.get(u"order-chapters", None)
240
241     if order_chapters:
242         order_1 = order_chapters.get(u"level-1", None)
243         order_2 = order_chapters.get(u"level-2", None)
244         order_3 = order_chapters.get(u"level-3", None)
245         if not order_1:
246             order_1 = chapters.keys()
247     else:
248         order_1 = None
249         order_2 = None
250         order_3 = None
251
252     for chapter_l1 in order_1:
253         content_l1 = chapters.get(chapter_l1, None)
254         if not content_l1:
255             continue
256         with open(f"{fileset_file_name}/index.rst", u"a") as file_handler:
257             file_handler.write(f"    {chapter_l1}\n")
258         l1_file_name = f"{join(fileset_file_name, chapter_l1)}.rst"
259         title = titles.get(chapter_l1, chapter_l1)
260         logging.info(f"   Generating {title} ...")
261         with open(l1_file_name, u"w") as file_handler:
262             file_handler.write(
263                 f"{title}\n"
264                 f"{get_rst_title_char(1) * len(title)}\n\n"
265                 f".. toctree::\n\n"
266             )
267
268         if not order_2:
269             order_2 = chapters[chapter_l1].keys()
270         for chapter_l2 in order_2:
271             content_l2 = content_l1.get(chapter_l2, None)
272             if not content_l2:
273                 continue
274             if not order_3:
275                 order_3 = chapters[chapter_l1][chapter_l2].keys()
276             for chapter_l3 in order_3:
277                 content_l3 = content_l2.get(chapter_l3, None)
278                 if not content_l3:
279                     continue
280                 with open(l1_file_name, u"a") as file_handler:
281                     item = u"/".join(content_l3[u'rst_file'].split(u'/')[-2:])
282                     file_handler.write(f"    ../{item}\n")
283                 logging.info(f"    Writing the file {content_l3[u'rst_file']}")
284                 with open(content_l3[u'rst_file'], u"w+") as file_handler:
285                     title = f"{chapter_l2}-{chapter_l3}"
286                     file_handler.write(
287                         f"{rst_header}\n"
288                         f"{title}\n"
289                         f"{get_rst_title_char(2) * len(title)}\n"
290                     )
291                     for table in content_l3[u'tables']:
292                         title = table[0].split(u"/")[-1].split(u".")[0]
293                         file_handler.write(
294                             f"\n{title}\n"
295                             f"{get_rst_title_char(3) * len(title)}\n"
296                         )
297                         file_handler.write(f"\n{table[1]}\n")
298                         if frmt == u"html":
299                             file_handler.write(
300                                 f"\n.. include:: {table[0].split(u'/')[-1]}"
301                                 f"\n"
302                             )
303                         elif frmt == u"rst":
304                             file_handler.write(
305                                 RST_INCLUDE_TABLE.format(
306                                     file_latex=table[0],
307                                     file_html=table[0].split(u"/")[-1])
308                             )
309
310
311 def file_details_split_html(file_spec, input_data):
312     """Generate the file(s) with algorithms
313     - file_details_split_html
314     specified in the specification file.
315
316     :param file_spec: File to generate.
317     :param input_data: Data to process.
318     :type file_spec: pandas.Series
319     :type input_data: InputData
320     """
321     file_details_split(file_spec, input_data, frmt=u"html")
322
323
324 def file_test_results(file_spec, input_data, frmt=u"rst"):
325     """Generate the file(s) with algorithms
326     - file_test_results
327     specified in the specification file.
328
329     :param file_spec: File to generate.
330     :param input_data: Data to process.
331     :param frmt: Format can be: rst or html
332     :type file_spec: pandas.Series
333     :type input_data: InputData
334     :type frmt: str
335     """
336
337     base_file_name = f"{file_spec[u'output-file']}"
338     rst_header = (
339         u"\n"
340         u".. |br| raw:: html\n\n    <br />\n\n\n"
341         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
342         u".. |preout| raw:: html\n\n    </pre>\n\n"
343     )
344     start_lvl = file_spec.get(u"data-start-level", 4)
345
346     logging.info(f"  Generating the file {base_file_name} ...")
347
348     if frmt == u"html":
349         table_lst = get_files(file_spec[u"dir-tables"], u".rst", full_path=True)
350     elif frmt == u"rst":
351         table_lst = get_files(file_spec[u"dir-tables"], u".csv", full_path=True)
352     else:
353         return
354     if not table_lst:
355         logging.error(
356             f"  No tables to include in {file_spec[u'dir-tables']}. Skipping."
357         )
358         return
359
360     logging.info(
361         f"    Creating the tests data set for the "
362         f"{file_spec.get(u'type', u'')} {file_spec.get(u'title', u'')}."
363     )
364
365     tests = input_data.filter_data(
366         file_spec,
367         params=[u"name", u"parent", u"doc", u"type", u"level"],
368         continue_on_error=True
369     )
370     if tests.empty:
371         return
372     tests = input_data.merge_data(tests)
373
374     suites = input_data.filter_data(
375         file_spec,
376         continue_on_error=True,
377         data_set=u"suites"
378     )
379     if suites.empty:
380         return
381     suites = input_data.merge_data(suites)
382     suites.sort_index(inplace=True)
383
384     file_name = u""
385     for suite_longname, suite in suites.items():
386
387         suite_lvl = len(suite_longname.split(u"."))
388         if suite_lvl < start_lvl:
389             # Not interested in this suite
390             continue
391
392         if suite_lvl == start_lvl:
393             # Our top-level suite
394             chapter = suite_longname.split(u'.')[-1]
395             file_name = f"{base_file_name}/{chapter}.rst"
396             logging.info(f"    Writing file {file_name}")
397             with open(f"{base_file_name}/index.rst", u"a") as file_handler:
398                 file_handler.write(f"    {chapter}\n")
399             with open(file_name, u"a") as file_handler:
400                 file_handler.write(rst_header)
401
402         title_line = get_rst_title_char(suite[u"level"] - start_lvl + 2) * \
403             len(suite[u"name"])
404         with open(file_name, u"a") as file_handler:
405             if not (u"-ndrpdr" in suite[u"name"] or
406                     u"-mrr" in suite[u"name"] or
407                     u"-dev" in suite[u"name"]):
408                 file_handler.write(f"\n{suite[u'name']}\n{title_line}\n")
409
410             if _tests_in_suite(suite[u"name"], tests):
411                 for tbl_file in table_lst:
412                     if suite[u"name"] in tbl_file:
413                         file_handler.write(
414                             f"\n{suite[u'name']}\n{title_line}\n"
415                         )
416                         file_handler.write(
417                             f"\n{suite[u'doc']}\n".replace(u'|br|', u'\n\n -')
418                         )
419                         if frmt == u"html":
420                             file_handler.write(
421                                 f"\n.. include:: {tbl_file.split(u'/')[-1]}\n"
422                             )
423                         elif frmt == u"rst":
424                             file_handler.write(
425                                 RST_INCLUDE_TABLE.format(
426                                     file_latex=tbl_file,
427                                     file_html=tbl_file.split(u"/")[-1])
428                             )
429                         break
430
431     logging.info(u"  Done.")
432
433
434 def file_test_results_html(file_spec, input_data):
435     """Generate the file(s) with algorithms
436     - file_test_results_html
437     specified in the specification file.
438
439     :param file_spec: File to generate.
440     :param input_data: Data to process.
441     :type file_spec: pandas.Series
442     :type input_data: InputData
443     """
444     file_test_results(file_spec, input_data, frmt=u"html")