Report: Suite doc format
[csit.git] / resources / tools / presentation / generator_files.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate files.
15 """
16
17 import re
18
19 from os.path import join
20 from collections import OrderedDict
21
22 import logging
23
24 from pal_utils import get_files, get_rst_title_char
25
26
27 RST_INCLUDE_TABLE = (u"\n.. only:: html\n\n"
28                      u"    .. csv-table::\n"
29                      u"        :header-rows: 1\n"
30                      u"        :widths: auto\n"
31                      u"        :align: center\n"
32                      u"        :file: {file_html}\n"
33                      u"\n.. only:: latex\n\n"
34                      u"\n  .. raw:: latex\n\n"
35                      u"      \\csvautolongtable{{{file_latex}}}\n\n")
36
37 REGEX_NIC_SHORT = re.compile(r'(\d*ge\dp\d)([a-z]*\d*[a-z]*)-')
38
39
40 def generate_files(spec, data):
41     """Generate all files specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"file_details_split": file_details_split,
51         u"file_details_split_html": file_details_split_html,
52         u"file_test_results": file_test_results,
53         u"file_test_results_html": file_test_results_html
54     }
55
56     logging.info(u"Generating the files ...")
57     for file_spec in spec.files:
58         try:
59             generator[file_spec[u"algorithm"]](file_spec, data)
60         except (NameError, KeyError) as err:
61             logging.error(
62                 f"Probably algorithm {file_spec[u'algorithm']} is not defined: "
63                 f"{repr(err)}"
64             )
65     logging.info(u"Done.")
66
67
68 def _tests_in_suite(suite_name, tests):
69     """Check if the suite includes tests.
70
71     :param suite_name: Name of the suite to be checked.
72     :param tests: Set of tests
73     :type suite_name: str
74     :type tests: pandas.Series
75     :returns: True if the suite includes tests.
76     :rtype: bool
77     """
78
79     for key in tests.keys():
80         if suite_name == tests[key][u"parent"]:
81             return True
82     return False
83
84
85 def file_details_split(file_spec, input_data, frmt=u"rst"):
86     """Generate the file(s) with algorithms
87     - file_details_split
88     specified in the specification file.
89
90     :param file_spec: File to generate.
91     :param input_data: Data to process.
92     :param frmt: Format can be: rst or html
93     :type file_spec: pandas.Series
94     :type input_data: InputData
95     :type frmt: str
96     """
97
98     fileset_file_name = f"{file_spec[u'output-file']}"
99     rst_header = (
100         u"\n"
101         u".. |br| raw:: html\n\n    <br />\n\n\n"
102         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
103         u".. |preout| raw:: html\n\n    </pre>\n\n"
104     )
105     start_lvl = file_spec.get(u"data-start-level", 4)
106
107     logging.info(f"  Generating the file set {fileset_file_name} ...")
108
109     data_sets = file_spec.get(u"data", None)
110     if not data_sets:
111         logging.error(
112             f"  No data sets specified for {file_spec[u'output-file']}, exit."
113         )
114         return
115
116     table_sets = file_spec.get(u"dir-tables", None)
117     if not table_sets:
118         logging.error(
119             f"  No table sets specified for {file_spec[u'output-file']}, exit."
120         )
121         return
122
123     if len(data_sets) != len(table_sets):
124         logging.error(
125             f"  The number of data sets and the number of table sets for "
126             f"{file_spec[u'output-file']} are not equal, exit."
127         )
128         return
129
130     chapters = OrderedDict()
131     for data_set, table_set in zip(data_sets, table_sets):
132
133         logging.info(f"   Processing the table set {table_set}...")
134
135         table_lst = None
136         if frmt == u"html":
137             table_lst = get_files(table_set, u".rst", full_path=True)
138         elif frmt == u"rst":
139             table_lst = get_files(table_set, u".csv", full_path=True)
140
141         if not table_lst:
142             logging.error(
143                 f"    No tables to include in {table_set}. Skipping."
144             )
145             continue
146
147         logging.info(u"    Creating the test data set...")
148         tests = input_data.filter_data(
149             element=file_spec,
150             params=[u"name", u"parent", u"doc", u"type", u"level"],
151             data=data_set,
152             data_set=u"tests",
153             continue_on_error=True
154         )
155         if tests.empty:
156             continue
157         tests = input_data.merge_data(tests)
158         tests.sort_index(inplace=True)
159
160         logging.info(u"    Creating the suite data set...")
161         suites = input_data.filter_data(
162             element=file_spec,
163             data=data_set,
164             continue_on_error=True,
165             data_set=u"suites"
166         )
167         if suites.empty:
168             continue
169         suites = input_data.merge_data(suites)
170         suites.sort_index(inplace=True)
171
172         logging.info(u"    Generating files...")
173
174         chapter_l1 = u""
175         chapter_l2 = u"-".join(table_set.split(u"_")[-2:])
176         for suite_longname, suite in suites.items():
177
178             suite_lvl = len(suite_longname.split(u"."))
179             if suite_lvl < start_lvl:
180                 # Not interested in this suite
181                 continue
182
183             if suite_lvl == start_lvl:
184                 # Our top-level suite
185                 chapter_l1 = suite_longname.split(u'.')[-1]
186                 if chapters.get(chapter_l1, None) is None:
187                     chapters[chapter_l1] = OrderedDict()
188                 if chapters[chapter_l1].get(chapter_l2, None) is None:
189                     chapters[chapter_l1][chapter_l2] = OrderedDict()
190                 continue
191
192             if _tests_in_suite(suite[u"name"], tests):
193                 groups = re.search(REGEX_NIC_SHORT, suite[u"name"])
194                 nic = groups.group(2) if groups else None
195                 if nic is None:
196                     continue
197                 if chapters[chapter_l1][chapter_l2].get(nic, None) is None:
198                     chapters[chapter_l1][chapter_l2][nic] = dict(
199                         rst_file=f"{join(table_set, chapter_l1)}_{nic}.rst".
200                         replace(u"2n1l-", u"").replace(u"1n1l-", u""),
201                         tables=list()
202                     )
203                 for idx, tbl_file in enumerate(table_lst):
204                     if suite[u"name"] in tbl_file:
205                         chapters[chapter_l1][chapter_l2][nic][u"tables"].append(
206                             (table_lst.pop(idx), suite[u"doc"])
207                         )
208                         break
209     titles = {
210         # VPP Perf, MRR
211         u"container_memif": u"LXC/DRC Container Memif",
212         u"crypto": u"IPsec IPv4 Routing",
213         u"hoststack": u"Hoststack Testing",
214         u"ip4": u"IPv4 Routing",
215         u"ip4_tunnels": u"IPv4 Tunnels",
216         u"ip6": u"IPv6 Routing",
217         u"ip6_tunnels": u"IPv6 Tunnels",
218         u"l2": u"L2 Ethernet Switching",
219         u"lb": u"LoadBalancer",
220         u"nfv_density": u"NFV Service Density",
221         u"srv6": u"SRv6 Routing",
222         u"vm_vhost": u"KVM VMs vhost-user",
223         u"vts": u"Virtual Topology System",
224         # VPP Device
225         u"interfaces": u"Interfaces",
226         u"l2bd": u"L2 Bridge-domain",
227         u"l2patch": u"L2 Patch",
228         u"l2xc": u"L2 Cross-connect",
229     }
230
231     order_chapters = file_spec.get(u"order-chapters", None)
232
233     if order_chapters:
234         order_1 = order_chapters.get(u"level-1", None)
235         order_2 = order_chapters.get(u"level-2", None)
236         order_3 = order_chapters.get(u"level-3", None)
237         if not order_1:
238             order_1 = chapters.keys()
239     else:
240         order_1 = None
241         order_2 = None
242         order_3 = None
243
244     for chapter_l1 in order_1:
245         content_l1 = chapters.get(chapter_l1, None)
246         if not content_l1:
247             continue
248         with open(f"{fileset_file_name}/index.rst", u"a") as file_handler:
249             file_handler.write(f"    {chapter_l1}\n")
250         l1_file_name = f"{join(fileset_file_name, chapter_l1)}.rst"
251         title = titles.get(chapter_l1, chapter_l1)
252         logging.info(f"   Generating {title} ...")
253         with open(l1_file_name, u"w") as file_handler:
254             file_handler.write(
255                 f"{title}\n"
256                 f"{get_rst_title_char(1) * len(title)}\n\n"
257                 f".. toctree::\n\n"
258             )
259
260         if not order_2:
261             order_2 = chapters[chapter_l1].keys()
262         for chapter_l2 in order_2:
263             content_l2 = content_l1.get(chapter_l2, None)
264             if not content_l2:
265                 continue
266             if not order_3:
267                 order_3 = chapters[chapter_l1][chapter_l2].keys()
268             for chapter_l3 in order_3:
269                 content_l3 = content_l2.get(chapter_l3, None)
270                 if not content_l3:
271                     continue
272                 with open(l1_file_name, u"a") as file_handler:
273                     item = u"/".join(content_l3[u'rst_file'].split(u'/')[-2:])
274                     file_handler.write(f"    ../{item}\n")
275                 logging.info(f"    Writing the file {content_l3[u'rst_file']}")
276                 with open(content_l3[u'rst_file'], u"w+") as file_handler:
277                     title = f"{chapter_l2}-{chapter_l3}"
278                     file_handler.write(
279                         f"{rst_header}\n"
280                         f"{title}\n"
281                         f"{get_rst_title_char(2) * len(title)}\n"
282                     )
283                     for table in content_l3[u'tables']:
284                         title = table[0].split(u"/")[-1].split(u".")[0]
285                         file_handler.write(
286                             f"\n{title}\n"
287                             f"{get_rst_title_char(3) * len(title)}\n"
288                         )
289                         file_handler.write(f"\n{table[1]}\n")
290                         if frmt == u"html":
291                             file_handler.write(
292                                 f"\n.. include:: {table[0].split(u'/')[-1]}"
293                                 f"\n"
294                             )
295                         elif frmt == u"rst":
296                             file_handler.write(
297                                 RST_INCLUDE_TABLE.format(
298                                     file_latex=table[0],
299                                     file_html=table[0].split(u"/")[-1])
300                             )
301
302
303 def file_details_split_html(file_spec, input_data):
304     """Generate the file(s) with algorithms
305     - file_details_split_html
306     specified in the specification file.
307
308     :param file_spec: File to generate.
309     :param input_data: Data to process.
310     :type file_spec: pandas.Series
311     :type input_data: InputData
312     """
313     file_details_split(file_spec, input_data, frmt=u"html")
314
315
316 def file_test_results(file_spec, input_data, frmt=u"rst"):
317     """Generate the file(s) with algorithms
318     - file_test_results
319     specified in the specification file.
320
321     :param file_spec: File to generate.
322     :param input_data: Data to process.
323     :param frmt: Format can be: rst or html
324     :type file_spec: pandas.Series
325     :type input_data: InputData
326     :type frmt: str
327     """
328
329     base_file_name = f"{file_spec[u'output-file']}"
330     rst_header = (
331         u"\n"
332         u".. |br| raw:: html\n\n    <br />\n\n\n"
333         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
334         u".. |preout| raw:: html\n\n    </pre>\n\n"
335     )
336     start_lvl = file_spec.get(u"data-start-level", 4)
337
338     logging.info(f"  Generating the file {base_file_name} ...")
339
340     if frmt == u"html":
341         table_lst = get_files(file_spec[u"dir-tables"], u".rst", full_path=True)
342     elif frmt == u"rst":
343         table_lst = get_files(file_spec[u"dir-tables"], u".csv", full_path=True)
344     else:
345         return
346     if not table_lst:
347         logging.error(
348             f"  No tables to include in {file_spec[u'dir-tables']}. Skipping."
349         )
350         return
351
352     logging.info(
353         f"    Creating the tests data set for the "
354         f"{file_spec.get(u'type', u'')} {file_spec.get(u'title', u'')}."
355     )
356
357     tests = input_data.filter_data(
358         file_spec,
359         params=[u"name", u"parent", u"doc", u"type", u"level"],
360         continue_on_error=True
361     )
362     if tests.empty:
363         return
364     tests = input_data.merge_data(tests)
365
366     suites = input_data.filter_data(
367         file_spec,
368         continue_on_error=True,
369         data_set=u"suites"
370     )
371     if suites.empty:
372         return
373     suites = input_data.merge_data(suites)
374     suites.sort_index(inplace=True)
375
376     file_name = u""
377     for suite_longname, suite in suites.items():
378
379         suite_lvl = len(suite_longname.split(u"."))
380         if suite_lvl < start_lvl:
381             # Not interested in this suite
382             continue
383
384         if suite_lvl == start_lvl:
385             # Our top-level suite
386             chapter = suite_longname.split(u'.')[-1]
387             file_name = f"{base_file_name}/{chapter}.rst"
388             logging.info(f"    Writing file {file_name}")
389             with open(f"{base_file_name}/index.rst", u"a") as file_handler:
390                 file_handler.write(f"    {chapter}\n")
391             with open(file_name, u"a") as file_handler:
392                 file_handler.write(rst_header)
393
394         title_line = get_rst_title_char(suite[u"level"] - start_lvl + 2) * \
395             len(suite[u"name"])
396         with open(file_name, u"a") as file_handler:
397             if not (u"-ndrpdr" in suite[u"name"] or
398                     u"-mrr" in suite[u"name"] or
399                     u"-dev" in suite[u"name"]):
400                 file_handler.write(f"\n{suite[u'name']}\n{title_line}\n")
401
402             if _tests_in_suite(suite[u"name"], tests):
403                 for tbl_file in table_lst:
404                     if suite[u"name"] in tbl_file:
405                         file_handler.write(
406                             f"\n{suite[u'name']}\n{title_line}\n"
407                         )
408                         file_handler.write(
409                             f"\n{suite[u'doc']}\n".replace(u'|br|', u'\n\n -')
410                         )
411                         if frmt == u"html":
412                             file_handler.write(
413                                 f"\n.. include:: {tbl_file.split(u'/')[-1]}\n"
414                             )
415                         elif frmt == u"rst":
416                             file_handler.write(
417                                 RST_INCLUDE_TABLE.format(
418                                     file_latex=tbl_file,
419                                     file_html=tbl_file.split(u"/")[-1])
420                             )
421                         break
422
423     logging.info(u"  Done.")
424
425
426 def file_test_results_html(file_spec, input_data):
427     """Generate the file(s) with algorithms
428     - file_test_results_html
429     specified in the specification file.
430
431     :param file_spec: File to generate.
432     :param input_data: Data to process.
433     :type file_spec: pandas.Series
434     :type input_data: InputData
435     """
436     file_test_results(file_spec, input_data, frmt=u"html")