5bc4097ca4757e0aa1ae3fa797fc275fbe879e24
[csit.git] / resources / tools / presentation / generator_files.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate files.
15 """
16
17 from os.path import isfile
18 from collections import OrderedDict
19
20 import logging
21
22 from pal_utils import get_files, get_rst_title_char
23
24
25 RST_INCLUDE_TABLE = (u"\n.. only:: html\n\n"
26                      u"    .. csv-table::\n"
27                      u"        :header-rows: 1\n"
28                      u"        :widths: auto\n"
29                      u"        :align: center\n"
30                      u"        :file: {file_html}\n"
31                      u"\n.. only:: latex\n\n"
32                      u"\n  .. raw:: latex\n\n"
33                      u"      \\csvautolongtable{{{file_latex}}}\n\n")
34
35
36 def generate_files(spec, data):
37     """Generate all files specified in the specification file.
38
39     :param spec: Specification read from the specification file.
40     :param data: Data to process.
41     :type spec: Specification
42     :type data: InputData
43     """
44
45     generator = {
46         u"file_details_split": file_details_split,
47         u"file_details_split_html": file_details_split_html,
48         u"file_test_results": file_test_results,
49         u"file_test_results_html": file_test_results_html
50     }
51
52     logging.info(u"Generating the files ...")
53     for file_spec in spec.files:
54         try:
55             generator[file_spec[u"algorithm"]](file_spec, data)
56         except (NameError, KeyError) as err:
57             logging.error(
58                 f"Probably algorithm {file_spec[u'algorithm']} is not defined: "
59                 f"{repr(err)}"
60             )
61     logging.info(u"Done.")
62
63
64 def _tests_in_suite(suite_name, tests):
65     """Check if the suite includes tests.
66
67     :param suite_name: Name of the suite to be checked.
68     :param tests: Set of tests
69     :type suite_name: str
70     :type tests: pandas.Series
71     :returns: True if the suite includes tests.
72     :rtype: bool
73     """
74
75     for key in tests.keys():
76         if suite_name == tests[key][u"parent"]:
77             return True
78     return False
79
80
81 def file_details_split(file_spec, input_data, frmt=u"rst"):
82     """Generate the file(s) with algorithms
83     - file_details_split
84     specified in the specification file.
85
86     :param file_spec: File to generate.
87     :param input_data: Data to process.
88     :param frmt: Format can be: rst or html
89     :type file_spec: pandas.Series
90     :type input_data: InputData
91     :type frmt: str
92     """
93
94     fileset_file_name = f"{file_spec[u'output-file']}"
95     rst_header = (
96         u"\n"
97         u".. |br| raw:: html\n\n    <br />\n\n\n"
98         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
99         u".. |preout| raw:: html\n\n    </pre>\n\n"
100     )
101     start_lvl = file_spec.get(u"data-start-level", 4)
102
103     logging.info(f"  Generating the file set {fileset_file_name} ...")
104
105     data_sets = file_spec.get(u"data", None)
106     if not data_sets:
107         logging.error(
108             f"  No data sets specified for {file_spec[u'output-file']}, exit."
109         )
110         return
111
112     table_sets = file_spec.get(u"dir-tables", None)
113     if not table_sets:
114         logging.error(
115             f"  No table sets specified for {file_spec[u'output-file']}, exit."
116         )
117         return
118
119     if len(data_sets) != len(table_sets):
120         logging.error(
121             f"  The number of data sets and the number of table sets for "
122             f"{file_spec[u'output-file']} are not equal, exit."
123         )
124         return
125
126     chapters = OrderedDict()
127     for data_set, table_set in zip(data_sets, table_sets):
128
129         logging.info(f"   Processing the table set {table_set}...")
130
131         table_lst = None
132         if frmt == u"html":
133             table_lst = get_files(table_set, u".rst", full_path=True)
134         elif frmt == u"rst":
135             table_lst = get_files(table_set, u".csv", full_path=True)
136
137         if not table_lst:
138             logging.error(
139                 f"    No tables to include in {table_set}. Skipping."
140             )
141             return
142
143         logging.info(u"    Creating the test data set...")
144         tests = input_data.filter_data(
145             element=file_spec,
146             params=[u"name", u"parent", u"doc", u"type", u"level"],
147             data=data_set,
148             data_set=u"tests",
149             continue_on_error=True
150         )
151         if tests.empty:
152             return
153         tests = input_data.merge_data(tests)
154
155         logging.info(u"    Creating the suite data set...")
156         suites = input_data.filter_data(
157             element=file_spec,
158             data=data_set,
159             continue_on_error=True,
160             data_set=u"suites"
161         )
162         if suites.empty:
163             return
164         suites = input_data.merge_data(suites)
165         suites.sort_index(inplace=True)
166
167         logging.info(u"    Generating files...")
168
169         file_name = u""
170         sub_chapter = u"-".join(table_set.split(u"_")[-2:])
171         for suite_longname, suite in suites.items():
172
173             suite_lvl = len(suite_longname.split(u"."))
174             if suite_lvl < start_lvl:
175                 # Not interested in this suite
176                 continue
177
178             if suite_lvl == start_lvl:
179                 # Our top-level suite
180                 chapter = suite_longname.split(u'.')[-1]
181                 file_name = f"{table_set}/{chapter}.rst"
182                 logging.info(f"    Writing file {file_name}")
183                 with open(file_name, u"a") as file_handler:
184                     file_handler.write(rst_header)
185                 if chapters.get(chapter, None) is None:
186                     chapters[chapter] = OrderedDict()
187                 chapters[chapter][sub_chapter] = file_name
188
189             title_line = get_rst_title_char(suite[u"level"] - start_lvl + 2) * \
190                 len(sub_chapter)
191             with open(file_name, u"a") as file_handler:
192                 if not (u"-ndrpdr" in suite[u"name"] or
193                         u"-mrr" in suite[u"name"] or
194                         u"-dev" in suite[u"name"]):
195                     file_handler.write(f"\n{sub_chapter}\n{title_line}\n")
196
197                 if _tests_in_suite(suite[u"name"], tests):
198                     for tbl_file in table_lst:
199                         if suite[u"name"] in tbl_file:
200                             file_handler.write(
201                                 f"\n{suite[u'name']}\n{title_line}\n"
202                             )
203                             file_handler.write(
204                                 f"\n{suite[u'doc']}\n".
205                                 replace(u'|br|', u'\n\n -')
206                             )
207                             if frmt == u"html":
208                                 file_handler.write(
209                                     f"\n.. include:: {tbl_file.split(u'/')[-1]}"
210                                     f"\n"
211                                 )
212                             elif frmt == u"rst":
213                                 file_handler.write(
214                                     RST_INCLUDE_TABLE.format(
215                                         file_latex=tbl_file,
216                                         file_html=tbl_file.split(u"/")[-1])
217                                 )
218                             break
219     titles = {
220         # VPP Perf, MRR
221         u"container_memif": u"LXC/DRC Container Memif",
222         u"crypto": u"IPsec IPv4 Routing",
223         u"hoststack": u"Hoststack Testing",
224         u"ip4": u"IPv4 Routing",
225         u"ip4_tunnels": u"IPv4 Tunnels",
226         u"ip6": u"IPv6 Routing",
227         u"ip6_tunnels": u"IPv6 Tunnels",
228         u"l2": u"L2 Ethernet Switching",
229         u"lb": u"LoadBalancer",
230         u"nfv_density": u"NFV Service Density",
231         u"srv6": u"SRv6 Routing",
232         u"vm_vhost": u"KVM VMs vhost-user",
233         u"vts": u"Virtual Topology System",
234         # VPP Device
235         u"interfaces": u"Interfaces",
236         u"l2bd": u"L2 Bridge-domain",
237         u"l2patch": u"L2 Patch",
238         u"l2xc": u"L2 Cross-connect",
239     }
240
241     order_chapters = file_spec.get(u"order-chapters", None)
242     if not order_chapters:
243         order_chapters = chapters.keys()
244
245     order_sub_chapters = file_spec.get(u"order-sub-chapters", None)
246
247     for chapter in order_chapters:
248         sub_chapters = chapters.get(chapter, None)
249         if not sub_chapters:
250             continue
251         with open(f"{fileset_file_name}/index.rst", u"a") as file_handler:
252             file_handler.write(f"    {chapter}\n")
253         chapter_file_name = f"{fileset_file_name}/{chapter}.rst"
254         if not isfile(chapter_file_name):
255             with open(chapter_file_name, u"a") as file_handler:
256                 title = titles.get(chapter, chapter)
257                 file_handler.write(
258                     f"{title}\n"
259                     f"{get_rst_title_char(2) * len(title)}\n\n"
260                     f".. toctree::\n\n"
261                 )
262
263         if not order_sub_chapters:
264             order_sub_chapters = sub_chapters.keys()
265         for sub_chapter in order_sub_chapters:
266             testbed = sub_chapters.get(sub_chapter, None)
267             if not testbed:
268                 continue
269             with open(chapter_file_name, u"a") as file_handler:
270                 file_handler.write(
271                     f"    ../{u'/'.join(testbed.split(u'/')[-2:])}\n"
272                 )
273
274
275 def file_details_split_html(file_spec, input_data):
276     """Generate the file(s) with algorithms
277     - file_details_split_html
278     specified in the specification file.
279
280     :param file_spec: File to generate.
281     :param input_data: Data to process.
282     :type file_spec: pandas.Series
283     :type input_data: InputData
284     """
285     file_details_split(file_spec, input_data, frmt=u"html")
286
287
288 def file_test_results(file_spec, input_data, frmt=u"rst"):
289     """Generate the file(s) with algorithms
290     - file_test_results
291     specified in the specification file.
292
293     :param file_spec: File to generate.
294     :param input_data: Data to process.
295     :param frmt: Format can be: rst or html
296     :type file_spec: pandas.Series
297     :type input_data: InputData
298     :type frmt: str
299     """
300
301     base_file_name = f"{file_spec[u'output-file']}"
302     rst_header = (
303         u"\n"
304         u".. |br| raw:: html\n\n    <br />\n\n\n"
305         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
306         u".. |preout| raw:: html\n\n    </pre>\n\n"
307     )
308     start_lvl = file_spec.get(u"data-start-level", 4)
309
310     logging.info(f"  Generating the file {base_file_name} ...")
311
312     if frmt == u"html":
313         table_lst = get_files(file_spec[u"dir-tables"], u".rst", full_path=True)
314     elif frmt == u"rst":
315         table_lst = get_files(file_spec[u"dir-tables"], u".csv", full_path=True)
316     else:
317         return
318     if not table_lst:
319         logging.error(
320             f"  No tables to include in {file_spec[u'dir-tables']}. Skipping."
321         )
322         return
323
324     logging.info(
325         f"    Creating the tests data set for the "
326         f"{file_spec.get(u'type', u'')} {file_spec.get(u'title', u'')}."
327     )
328
329     tests = input_data.filter_data(
330         file_spec,
331         params=[u"name", u"parent", u"doc", u"type", u"level"],
332         continue_on_error=True
333     )
334     if tests.empty:
335         return
336     tests = input_data.merge_data(tests)
337
338     suites = input_data.filter_data(
339         file_spec,
340         continue_on_error=True,
341         data_set=u"suites"
342     )
343     if suites.empty:
344         return
345     suites = input_data.merge_data(suites)
346     suites.sort_index(inplace=True)
347
348     file_name = u""
349     for suite_longname, suite in suites.items():
350
351         suite_lvl = len(suite_longname.split(u"."))
352         if suite_lvl < start_lvl:
353             # Not interested in this suite
354             continue
355
356         if suite_lvl == start_lvl:
357             # Our top-level suite
358             chapter = suite_longname.split(u'.')[-1]
359             file_name = f"{base_file_name}/{chapter}.rst"
360             logging.info(f"    Writing file {file_name}")
361             with open(f"{base_file_name}/index.rst", u"a") as file_handler:
362                 file_handler.write(f"    {chapter}\n")
363             with open(file_name, u"a") as file_handler:
364                 file_handler.write(rst_header)
365
366         title_line = get_rst_title_char(suite[u"level"] - start_lvl + 2) * \
367             len(suite[u"name"])
368         with open(file_name, u"a") as file_handler:
369             if not (u"-ndrpdr" in suite[u"name"] or
370                     u"-mrr" in suite[u"name"] or
371                     u"-dev" in suite[u"name"]):
372                 file_handler.write(f"\n{suite[u'name']}\n{title_line}\n")
373
374             if _tests_in_suite(suite[u"name"], tests):
375                 for tbl_file in table_lst:
376                     if suite[u"name"] in tbl_file:
377                         file_handler.write(
378                             f"\n{suite[u'name']}\n{title_line}\n"
379                         )
380                         file_handler.write(
381                             f"\n{suite[u'doc']}\n".replace(u'|br|', u'\n\n -')
382                         )
383                         if frmt == u"html":
384                             file_handler.write(
385                                 f"\n.. include:: {tbl_file.split(u'/')[-1]}\n"
386                             )
387                         elif frmt == u"rst":
388                             file_handler.write(
389                                 RST_INCLUDE_TABLE.format(
390                                     file_latex=tbl_file,
391                                     file_html=tbl_file.split(u"/")[-1])
392                             )
393                         break
394
395     logging.info(u"  Done.")
396
397
398 def file_test_results_html(file_spec, input_data):
399     """Generate the file(s) with algorithms
400     - file_test_results_html
401     specified in the specification file.
402
403     :param file_spec: File to generate.
404     :param input_data: Data to process.
405     :type file_spec: pandas.Series
406     :type input_data: InputData
407     """
408     file_test_results(file_spec, input_data, frmt=u"html")