Report: Add 2n-clx cx556a Latency graphs
[csit.git] / resources / tools / presentation / generator_files.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Algorithms to generate files.
15 """
16
17 import re
18
19 from os.path import join
20 from collections import OrderedDict
21
22 import logging
23
24 from pal_utils import get_files, get_rst_title_char
25
26
27 RST_INCLUDE_TABLE = (u"\n.. only:: html\n\n"
28                      u"    .. csv-table::\n"
29                      u"        :header-rows: 1\n"
30                      u"        :widths: auto\n"
31                      u"        :align: center\n"
32                      u"        :file: {file_html}\n"
33                      u"\n.. only:: latex\n\n"
34                      u"\n  .. raw:: latex\n\n"
35                      u"      \\csvautolongtable{{{file_latex}}}\n\n")
36
37 REGEX_NIC_SHORT = re.compile(r'(\d*ge\dp\d)(\D*\d*[a-z]*)-')
38
39
40 def generate_files(spec, data):
41     """Generate all files specified in the specification file.
42
43     :param spec: Specification read from the specification file.
44     :param data: Data to process.
45     :type spec: Specification
46     :type data: InputData
47     """
48
49     generator = {
50         u"file_details_split": file_details_split,
51         u"file_details_split_html": file_details_split_html,
52         u"file_test_results": file_test_results,
53         u"file_test_results_html": file_test_results_html
54     }
55
56     logging.info(u"Generating the files ...")
57     for file_spec in spec.files:
58         try:
59             generator[file_spec[u"algorithm"]](file_spec, data)
60         except (NameError, KeyError) as err:
61             logging.error(
62                 f"Probably algorithm {file_spec[u'algorithm']} is not defined: "
63                 f"{repr(err)}"
64             )
65     logging.info(u"Done.")
66
67
68 def _tests_in_suite(suite_name, tests):
69     """Check if the suite includes tests.
70
71     :param suite_name: Name of the suite to be checked.
72     :param tests: Set of tests
73     :type suite_name: str
74     :type tests: pandas.Series
75     :returns: True if the suite includes tests.
76     :rtype: bool
77     """
78
79     for key in tests.keys():
80         if suite_name == tests[key][u"parent"]:
81             return True
82     return False
83
84
85 def file_details_split(file_spec, input_data, frmt=u"rst"):
86     """Generate the file(s) with algorithms
87     - file_details_split
88     specified in the specification file.
89
90     :param file_spec: File to generate.
91     :param input_data: Data to process.
92     :param frmt: Format can be: rst or html
93     :type file_spec: pandas.Series
94     :type input_data: InputData
95     :type frmt: str
96     """
97
98     fileset_file_name = f"{file_spec[u'output-file']}"
99     rst_header = (
100         u"\n"
101         u".. |br| raw:: html\n\n    <br />\n\n\n"
102         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
103         u".. |preout| raw:: html\n\n    </pre>\n\n"
104     )
105     start_lvl = file_spec.get(u"data-start-level", 4)
106
107     logging.info(f"  Generating the file set {fileset_file_name} ...")
108
109     data_sets = file_spec.get(u"data", None)
110     if not data_sets:
111         logging.error(
112             f"  No data sets specified for {file_spec[u'output-file']}, exit."
113         )
114         return
115
116     table_sets = file_spec.get(u"dir-tables", None)
117     if not table_sets:
118         logging.error(
119             f"  No table sets specified for {file_spec[u'output-file']}, exit."
120         )
121         return
122
123     if len(data_sets) != len(table_sets):
124         logging.error(
125             f"  The number of data sets and the number of table sets for "
126             f"{file_spec[u'output-file']} are not equal, exit."
127         )
128         return
129
130     chapters = OrderedDict()
131     for data_set, table_set in zip(data_sets, table_sets):
132
133         logging.info(f"   Processing the table set {table_set}...")
134
135         table_lst = None
136         if frmt == u"html":
137             table_lst = get_files(table_set, u".rst", full_path=True)
138         elif frmt == u"rst":
139             table_lst = get_files(table_set, u".csv", full_path=True)
140
141         if not table_lst:
142             logging.error(
143                 f"    No tables to include in {table_set}. Skipping."
144             )
145             continue
146
147         logging.info(u"    Creating the test data set...")
148         tests = input_data.filter_data(
149             element=file_spec,
150             params=[u"name", u"parent", u"doc", u"type", u"level"],
151             data=data_set,
152             data_set=u"tests",
153             continue_on_error=True
154         )
155         if tests.empty:
156             continue
157         tests = input_data.merge_data(tests)
158         tests.sort_index(inplace=True)
159
160         logging.info(u"    Creating the suite data set...")
161         suites = input_data.filter_data(
162             element=file_spec,
163             data=data_set,
164             continue_on_error=True,
165             data_set=u"suites"
166         )
167         if suites.empty:
168             continue
169         suites = input_data.merge_data(suites)
170         suites.sort_index(inplace=True)
171
172         logging.info(u"    Generating files...")
173
174         chapter_l1 = u""
175         chapter_l2 = u"-".join(table_set.split(u"_")[-2:])
176         for suite_longname, suite in suites.items():
177
178             suite_lvl = len(suite_longname.split(u"."))
179             if suite_lvl < start_lvl:
180                 # Not interested in this suite
181                 continue
182
183             if suite_lvl == start_lvl:
184                 # Our top-level suite
185                 chapter_l1 = suite_longname.split(u'.')[-1]
186                 if chapters.get(chapter_l1, None) is None:
187                     chapters[chapter_l1] = OrderedDict()
188                 if chapters[chapter_l1].get(chapter_l2, None) is None:
189                     chapters[chapter_l1][chapter_l2] = OrderedDict()
190                 continue
191
192             if _tests_in_suite(suite[u"name"], tests):
193                 groups = re.search(REGEX_NIC_SHORT, suite[u"name"])
194                 nic = groups.group(2) if groups else None
195                 if nic is None:
196                     continue
197                 if chapters[chapter_l1][chapter_l2].get(nic, None) is None:
198                     chapters[chapter_l1][chapter_l2][nic] = dict(
199                         rst_file=f"{join(table_set, chapter_l1)}_{nic}.rst".
200                         replace(u"2n1l-", u""),
201                         tables=list()
202                     )
203                 for idx, tbl_file in enumerate(table_lst):
204                     if suite[u"name"] in tbl_file:
205                         chapters[chapter_l1][chapter_l2][nic][u"tables"].append(
206                             (
207                                 table_lst.pop(idx),
208                                 suite[u"doc"].replace(u'|br|', u'\n\n -')
209                             )
210                         )
211                         break
212     titles = {
213         # VPP Perf, MRR
214         u"container_memif": u"LXC/DRC Container Memif",
215         u"crypto": u"IPsec IPv4 Routing",
216         u"hoststack": u"Hoststack Testing",
217         u"ip4": u"IPv4 Routing",
218         u"ip4_tunnels": u"IPv4 Tunnels",
219         u"ip6": u"IPv6 Routing",
220         u"ip6_tunnels": u"IPv6 Tunnels",
221         u"l2": u"L2 Ethernet Switching",
222         u"lb": u"LoadBalancer",
223         u"nfv_density": u"NFV Service Density",
224         u"srv6": u"SRv6 Routing",
225         u"vm_vhost": u"KVM VMs vhost-user",
226         u"vts": u"Virtual Topology System",
227         # VPP Device
228         u"interfaces": u"Interfaces",
229         u"l2bd": u"L2 Bridge-domain",
230         u"l2patch": u"L2 Patch",
231         u"l2xc": u"L2 Cross-connect",
232     }
233
234     order_chapters = file_spec.get(u"order-chapters", None)
235
236     if order_chapters:
237         order_1 = order_chapters.get(u"level-1", None)
238         order_2 = order_chapters.get(u"level-2", None)
239         order_3 = order_chapters.get(u"level-3", None)
240         if not order_1:
241             order_1 = chapters.keys()
242     else:
243         order_1 = None
244         order_2 = None
245         order_3 = None
246
247     for chapter_l1 in order_1:
248         content_l1 = chapters.get(chapter_l1, None)
249         if not content_l1:
250             continue
251         with open(f"{fileset_file_name}/index.rst", u"a") as file_handler:
252             file_handler.write(f"    {chapter_l1}\n")
253         l1_file_name = f"{join(fileset_file_name, chapter_l1)}.rst"
254         title = titles.get(chapter_l1, chapter_l1)
255         logging.info(f"   Generating {title} ...")
256         with open(l1_file_name, u"w") as file_handler:
257             file_handler.write(
258                 f"{title}\n"
259                 f"{get_rst_title_char(1) * len(title)}\n\n"
260                 f".. toctree::\n\n"
261             )
262
263         if not order_2:
264             order_2 = chapters[chapter_l1].keys()
265         for chapter_l2 in order_2:
266             content_l2 = content_l1.get(chapter_l2, None)
267             if not content_l2:
268                 continue
269             if not order_3:
270                 order_3 = chapters[chapter_l1][chapter_l2].keys()
271             for chapter_l3 in order_3:
272                 content_l3 = content_l2.get(chapter_l3, None)
273                 if not content_l3:
274                     continue
275                 with open(l1_file_name, u"a") as file_handler:
276                     item = u"/".join(content_l3[u'rst_file'].split(u'/')[-2:])
277                     file_handler.write(f"    ../{item}\n")
278                 logging.info(f"    Writing the file {content_l3[u'rst_file']}")
279                 with open(content_l3[u'rst_file'], u"w+") as file_handler:
280                     title = f"{chapter_l2}-{chapter_l3}"
281                     file_handler.write(
282                         f"{rst_header}\n"
283                         f"{title}\n"
284                         f"{get_rst_title_char(2) * len(title)}\n"
285                     )
286                     for table in content_l3[u'tables']:
287                         title = table[0].split(u"/")[-1].split(u".")[0]
288                         file_handler.write(
289                             f"\n{title}\n"
290                             f"{get_rst_title_char(3) * len(title)}\n"
291                         )
292                         file_handler.write(f"\n{table[1]}\n")
293                         if frmt == u"html":
294                             file_handler.write(
295                                 f"\n.. include:: {table[0].split(u'/')[-1]}"
296                                 f"\n"
297                             )
298                         elif frmt == u"rst":
299                             file_handler.write(
300                                 RST_INCLUDE_TABLE.format(
301                                     file_latex=table[0],
302                                     file_html=table[0].split(u"/")[-1])
303                             )
304
305
306 def file_details_split_html(file_spec, input_data):
307     """Generate the file(s) with algorithms
308     - file_details_split_html
309     specified in the specification file.
310
311     :param file_spec: File to generate.
312     :param input_data: Data to process.
313     :type file_spec: pandas.Series
314     :type input_data: InputData
315     """
316     file_details_split(file_spec, input_data, frmt=u"html")
317
318
319 def file_test_results(file_spec, input_data, frmt=u"rst"):
320     """Generate the file(s) with algorithms
321     - file_test_results
322     specified in the specification file.
323
324     :param file_spec: File to generate.
325     :param input_data: Data to process.
326     :param frmt: Format can be: rst or html
327     :type file_spec: pandas.Series
328     :type input_data: InputData
329     :type frmt: str
330     """
331
332     base_file_name = f"{file_spec[u'output-file']}"
333     rst_header = (
334         u"\n"
335         u".. |br| raw:: html\n\n    <br />\n\n\n"
336         u".. |prein| raw:: html\n\n    <pre>\n\n\n"
337         u".. |preout| raw:: html\n\n    </pre>\n\n"
338     )
339     start_lvl = file_spec.get(u"data-start-level", 4)
340
341     logging.info(f"  Generating the file {base_file_name} ...")
342
343     if frmt == u"html":
344         table_lst = get_files(file_spec[u"dir-tables"], u".rst", full_path=True)
345     elif frmt == u"rst":
346         table_lst = get_files(file_spec[u"dir-tables"], u".csv", full_path=True)
347     else:
348         return
349     if not table_lst:
350         logging.error(
351             f"  No tables to include in {file_spec[u'dir-tables']}. Skipping."
352         )
353         return
354
355     logging.info(
356         f"    Creating the tests data set for the "
357         f"{file_spec.get(u'type', u'')} {file_spec.get(u'title', u'')}."
358     )
359
360     tests = input_data.filter_data(
361         file_spec,
362         params=[u"name", u"parent", u"doc", u"type", u"level"],
363         continue_on_error=True
364     )
365     if tests.empty:
366         return
367     tests = input_data.merge_data(tests)
368
369     suites = input_data.filter_data(
370         file_spec,
371         continue_on_error=True,
372         data_set=u"suites"
373     )
374     if suites.empty:
375         return
376     suites = input_data.merge_data(suites)
377     suites.sort_index(inplace=True)
378
379     file_name = u""
380     for suite_longname, suite in suites.items():
381
382         suite_lvl = len(suite_longname.split(u"."))
383         if suite_lvl < start_lvl:
384             # Not interested in this suite
385             continue
386
387         if suite_lvl == start_lvl:
388             # Our top-level suite
389             chapter = suite_longname.split(u'.')[-1]
390             file_name = f"{base_file_name}/{chapter}.rst"
391             logging.info(f"    Writing file {file_name}")
392             with open(f"{base_file_name}/index.rst", u"a") as file_handler:
393                 file_handler.write(f"    {chapter}\n")
394             with open(file_name, u"a") as file_handler:
395                 file_handler.write(rst_header)
396
397         title_line = get_rst_title_char(suite[u"level"] - start_lvl + 2) * \
398             len(suite[u"name"])
399         with open(file_name, u"a") as file_handler:
400             if not (u"-ndrpdr" in suite[u"name"] or
401                     u"-mrr" in suite[u"name"] or
402                     u"-dev" in suite[u"name"]):
403                 file_handler.write(f"\n{suite[u'name']}\n{title_line}\n")
404
405             if _tests_in_suite(suite[u"name"], tests):
406                 for tbl_file in table_lst:
407                     if suite[u"name"] in tbl_file:
408                         file_handler.write(
409                             f"\n{suite[u'name']}\n{title_line}\n"
410                         )
411                         file_handler.write(
412                             f"\n{suite[u'doc']}\n".replace(u'|br|', u'\n\n -')
413                         )
414                         if frmt == u"html":
415                             file_handler.write(
416                                 f"\n.. include:: {tbl_file.split(u'/')[-1]}\n"
417                             )
418                         elif frmt == u"rst":
419                             file_handler.write(
420                                 RST_INCLUDE_TABLE.format(
421                                     file_latex=tbl_file,
422                                     file_html=tbl_file.split(u"/")[-1])
423                             )
424                         break
425
426     logging.info(u"  Done.")
427
428
429 def file_test_results_html(file_spec, input_data):
430     """Generate the file(s) with algorithms
431     - file_test_results_html
432     specified in the specification file.
433
434     :param file_spec: File to generate.
435     :param input_data: Data to process.
436     :type file_spec: pandas.Series
437     :type input_data: InputData
438     """
439     file_test_results(file_spec, input_data, frmt=u"html")