Report: Fix title levels in auto generated files
[csit.git] / resources / tools / presentation / generator_files.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate files.
15 """
16
17 from os.path import isfile
18 from collections import OrderedDict
19
20 import logging
21
22 from pal_utils import get_files, get_rst_title_char
23
24
25 RST_INCLUDE_TABLE = (u"\n.. only:: html\n\n"
26                      u"    .. csv-table::\n"
27                      u"        :header-rows: 1\n"
28                      u"        :widths: auto\n"
29                      u"        :align: center\n"
30                      u"        :file: {file_html}\n"
31                      u"\n.. only:: latex\n\n"
32                      u"\n  .. raw:: latex\n\n"
33                      u"      \\csvautolongtable{{{file_latex}}}\n\n")
34
35
36 def generate_files(spec, data):
37     """Generate all files specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     generator = {
46         u"file_details_split": file_details_split,
47         u"file_details_split_html": file_details_split_html,
48         u"file_test_results": file_test_results,
49         u"file_test_results_html": file_test_results_html
50     }
51
52     logging.info(u"Generating the files ...")
53     for file_spec in spec.files:
54         try:
55             generator[file_spec[u"algorithm"]](file_spec, data)
56         except (NameError, KeyError) as err:
57             logging.error(
58                 f"Probably algorithm {file_spec[u'algorithm']} is not defined: "
59                 f"{repr(err)}"
60             )
61     logging.info(u"Done.")
62
63
64 def _tests_in_suite(suite_name, tests):
65     """Check if the suite includes tests.
66
67     :param suite_name: Name of the suite to be checked.
68     :param tests: Set of tests
69     :type suite_name: str
70     :type tests: pandas.Series
71     :returns: True if the suite includes tests.
72     :rtype: bool
73     """
74
75     for key in tests.keys():
76         if suite_name == tests[key][u"parent"]:
77             return True
78     return False
79
80
81 def file_details_split(file_spec, input_data, frmt=u"rst"):
82     """Generate the file(s) with algorithms
83     - file_details_split
84     specified in the specification file.
85
86     :param file_spec: File to generate.
87     :param input_data: Data to process.
88     :param frmt: Format can be: rst or html
89     :type file_spec: pandas.Series
90     :type input_data: InputData
91     :type frmt: str
92     """
93
94     fileset_file_name = f"{file_spec[u'output-file']}"
95     rst_header = (
96         u"\n"
97         u".. |br| raw:: html\n\n    <br />\n\n\n"
98         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
99         u".. |preout| raw:: html\n\n    </pre>\n\n"
100     )
101     start_lvl = file_spec.get(u"data-start-level", 4)
102
103     logging.info(f"  Generating the file set {fileset_file_name} ...")
104
105     data_sets = file_spec.get(u"data", None)
106     if not data_sets:
107         logging.error(
108             f"  No data sets specified for {file_spec[u'output-file']}, exit."
109         )
110         return
111
112     table_sets = file_spec.get(u"dir-tables", None)
113     if not table_sets:
114         logging.error(
115             f"  No table sets specified for {file_spec[u'output-file']}, exit."
116         )
117         return
118
119     if len(data_sets) != len(table_sets):
120         logging.error(
121             f"  The number of data sets and the number of table sets for "
122             f"{file_spec[u'output-file']} are not equal, exit."
123         )
124         return
125
126     chapters = OrderedDict()
127     for data_set, table_set in zip(data_sets, table_sets):
128
129         logging.info(f"   Processing the table set {table_set}...")
130
131         table_lst = None
132         if frmt == u"html":
133             table_lst = get_files(table_set, u".rst", full_path=True)
134         elif frmt == u"rst":
135             table_lst = get_files(table_set, u".csv", full_path=True)
136
137         if not table_lst:
138             logging.error(
139                 f"    No tables to include in {table_set}. Skipping."
140             )
141             return
142
143         logging.info(u"    Creating the test data set...")
144         tests = input_data.filter_data(
145             element=file_spec,
146             params=[u"name", u"parent", u"doc", u"type", u"level"],
147             data=data_set,
148             data_set=u"tests",
149             continue_on_error=True
150         )
151         if tests.empty:
152             return
153         tests = input_data.merge_data(tests)
154
155         logging.info(u"    Creating the suite data set...")
156         suites = input_data.filter_data(
157             element=file_spec,
158             data=data_set,
159             continue_on_error=True,
160             data_set=u"suites"
161         )
162         if suites.empty:
163             return
164         suites = input_data.merge_data(suites)
165         suites.sort_index(inplace=True)
166
167         logging.info(u"    Generating files...")
168
169         file_name = u""
170         sub_chapter = u"-".join(table_set.split(u"_")[-2:])
171         for suite_longname, suite in suites.items():
172
173             suite_lvl = len(suite_longname.split(u"."))
174             if suite_lvl < start_lvl:
175                 # Not interested in this suite
176                 continue
177
178             if suite_lvl == start_lvl:
179                 # Our top-level suite
180                 chapter = suite_longname.split(u'.')[-1]
181                 file_name = f"{table_set}/{chapter}.rst"
182                 logging.info(f"    Writing file {file_name}")
183                 with open(file_name, u"a") as file_handler:
184                     file_handler.write(rst_header)
185                 if chapters.get(chapter, None) is None:
186                     chapters[chapter] = OrderedDict()
187                 chapters[chapter][sub_chapter] = file_name
188
189             title_line = get_rst_title_char(suite[u"level"] - start_lvl + 2) * \
190                 len(sub_chapter)
191             with open(file_name, u"a") as file_handler:
192                 if not (u"-ndrpdr" in suite[u"name"] or
193                         u"-mrr" in suite[u"name"] or
194                         u"-dev" in suite[u"name"]):
195                     file_handler.write(f"\n{sub_chapter}\n{title_line}\n")
196
197                 if _tests_in_suite(suite[u"name"], tests):
198                     for tbl_file in table_lst:
199                         if suite[u"name"] in tbl_file:
200                             title_line = get_rst_title_char(
201                                 suite[u"level"] - start_lvl + 2) * \
202                                          len(suite[u"name"])
203                             file_handler.write(
204                                 f"\n{suite[u'name']}\n{title_line}\n"
205                             )
206                             file_handler.write(
207                                 f"\n{suite[u'doc']}\n".
208                                 replace(u'|br|', u'\n\n -')
209                             )
210                             if frmt == u"html":
211                                 file_handler.write(
212                                     f"\n.. include:: {tbl_file.split(u'/')[-1]}"
213                                     f"\n"
214                                 )
215                             elif frmt == u"rst":
216                                 file_handler.write(
217                                     RST_INCLUDE_TABLE.format(
218                                         file_latex=tbl_file,
219                                         file_html=tbl_file.split(u"/")[-1])
220                                 )
221                             break
222     titles = {
223         # VPP Perf, MRR
224         u"container_memif": u"LXC/DRC Container Memif",
225         u"crypto": u"IPsec IPv4 Routing",
226         u"hoststack": u"Hoststack Testing",
227         u"ip4": u"IPv4 Routing",
228         u"ip4_tunnels": u"IPv4 Tunnels",
229         u"ip6": u"IPv6 Routing",
230         u"ip6_tunnels": u"IPv6 Tunnels",
231         u"l2": u"L2 Ethernet Switching",
232         u"lb": u"LoadBalancer",
233         u"nfv_density": u"NFV Service Density",
234         u"srv6": u"SRv6 Routing",
235         u"vm_vhost": u"KVM VMs vhost-user",
236         u"vts": u"Virtual Topology System",
237         # VPP Device
238         u"interfaces": u"Interfaces",
239         u"l2bd": u"L2 Bridge-domain",
240         u"l2patch": u"L2 Patch",
241         u"l2xc": u"L2 Cross-connect",
242     }
243
244     order_chapters = file_spec.get(u"order-chapters", None)
245     if not order_chapters:
246         order_chapters = chapters.keys()
247
248     order_sub_chapters = file_spec.get(u"order-sub-chapters", None)
249
250     for chapter in order_chapters:
251         sub_chapters = chapters.get(chapter, None)
252         if not sub_chapters:
253             continue
254         with open(f"{fileset_file_name}/index.rst", u"a") as file_handler:
255             file_handler.write(f"    {chapter}\n")
256         chapter_file_name = f"{fileset_file_name}/{chapter}.rst"
257         if not isfile(chapter_file_name):
258             with open(chapter_file_name, u"a") as file_handler:
259                 title = titles.get(chapter, chapter)
260                 file_handler.write(
261                     f"{title}\n"
262                     f"{get_rst_title_char(1) * len(title)}\n\n"
263                     f".. toctree::\n\n"
264                 )
265
266         if not order_sub_chapters:
267             order_sub_chapters = sub_chapters.keys()
268         for sub_chapter in order_sub_chapters:
269             testbed = sub_chapters.get(sub_chapter, None)
270             if not testbed:
271                 continue
272             with open(chapter_file_name, u"a") as file_handler:
273                 file_handler.write(
274                     f"    ../{u'/'.join(testbed.split(u'/')[-2:])}\n"
275                 )
276
277
278 def file_details_split_html(file_spec, input_data):
279     """Generate the file(s) with algorithms
280     - file_details_split_html
281     specified in the specification file.
282
283     :param file_spec: File to generate.
284     :param input_data: Data to process.
285     :type file_spec: pandas.Series
286     :type input_data: InputData
287     """
288     file_details_split(file_spec, input_data, frmt=u"html")
289
290
291 def file_test_results(file_spec, input_data, frmt=u"rst"):
292     """Generate the file(s) with algorithms
293     - file_test_results
294     specified in the specification file.
295
296     :param file_spec: File to generate.
297     :param input_data: Data to process.
298     :param frmt: Format can be: rst or html
299     :type file_spec: pandas.Series
300     :type input_data: InputData
301     :type frmt: str
302     """
303
304     base_file_name = f"{file_spec[u'output-file']}"
305     rst_header = (
306         u"\n"
307         u".. |br| raw:: html\n\n    <br />\n\n\n"
308         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
309         u".. |preout| raw:: html\n\n    </pre>\n\n"
310     )
311     start_lvl = file_spec.get(u"data-start-level", 4)
312
313     logging.info(f"  Generating the file {base_file_name} ...")
314
315     if frmt == u"html":
316         table_lst = get_files(file_spec[u"dir-tables"], u".rst", full_path=True)
317     elif frmt == u"rst":
318         table_lst = get_files(file_spec[u"dir-tables"], u".csv", full_path=True)
319     else:
320         return
321     if not table_lst:
322         logging.error(
323             f"  No tables to include in {file_spec[u'dir-tables']}. Skipping."
324         )
325         return
326
327     logging.info(
328         f"    Creating the tests data set for the "
329         f"{file_spec.get(u'type', u'')} {file_spec.get(u'title', u'')}."
330     )
331
332     tests = input_data.filter_data(
333         file_spec,
334         params=[u"name", u"parent", u"doc", u"type", u"level"],
335         continue_on_error=True
336     )
337     if tests.empty:
338         return
339     tests = input_data.merge_data(tests)
340
341     suites = input_data.filter_data(
342         file_spec,
343         continue_on_error=True,
344         data_set=u"suites"
345     )
346     if suites.empty:
347         return
348     suites = input_data.merge_data(suites)
349     suites.sort_index(inplace=True)
350
351     file_name = u""
352     for suite_longname, suite in suites.items():
353
354         suite_lvl = len(suite_longname.split(u"."))
355         if suite_lvl < start_lvl:
356             # Not interested in this suite
357             continue
358
359         if suite_lvl == start_lvl:
360             # Our top-level suite
361             chapter = suite_longname.split(u'.')[-1]
362             file_name = f"{base_file_name}/{chapter}.rst"
363             logging.info(f"    Writing file {file_name}")
364             with open(f"{base_file_name}/index.rst", u"a") as file_handler:
365                 file_handler.write(f"    {chapter}\n")
366             with open(file_name, u"a") as file_handler:
367                 file_handler.write(rst_header)
368
369         title_line = get_rst_title_char(suite[u"level"] - start_lvl + 2) * \
370             len(suite[u"name"])
371         with open(file_name, u"a") as file_handler:
372             if not (u"-ndrpdr" in suite[u"name"] or
373                     u"-mrr" in suite[u"name"] or
374                     u"-dev" in suite[u"name"]):
375                 file_handler.write(f"\n{suite[u'name']}\n{title_line}\n")
376
377             if _tests_in_suite(suite[u"name"], tests):
378                 for tbl_file in table_lst:
379                     if suite[u"name"] in tbl_file:
380                         file_handler.write(
381                             f"\n{suite[u'name']}\n{title_line}\n"
382                         )
383                         file_handler.write(
384                             f"\n{suite[u'doc']}\n".replace(u'|br|', u'\n\n -')
385                         )
386                         if frmt == u"html":
387                             file_handler.write(
388                                 f"\n.. include:: {tbl_file.split(u'/')[-1]}\n"
389                             )
390                         elif frmt == u"rst":
391                             file_handler.write(
392                                 RST_INCLUDE_TABLE.format(
393                                     file_latex=tbl_file,
394                                     file_html=tbl_file.split(u"/")[-1])
395                             )
396                         break
397
398     logging.info(u"  Done.")
399
400
401 def file_test_results_html(file_spec, input_data):
402     """Generate the file(s) with algorithms
403     - file_test_results_html
404     specified in the specification file.
405
406     :param file_spec: File to generate.
407     :param input_data: Data to process.
408     :type file_spec: pandas.Series
409     :type input_data: InputData
410     """
411     file_test_results(file_spec, input_data, frmt=u"html")