1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Algorithms to generate files.
17 from os.path import isfile
18 from collections import OrderedDict
22 from pal_utils import get_files, get_rst_title_char
25 RST_INCLUDE_TABLE = (u"\n.. only:: html\n\n"
30 u" :file: {file_html}\n"
31 u"\n.. only:: latex\n\n"
32 u"\n .. raw:: latex\n\n"
33 u" \\csvautolongtable{{{file_latex}}}\n\n")
36 def generate_files(spec, data):
37 """Generate all files specified in the specification file.
39 :param spec: Specification read from the specification file.
40 :param data: Data to process.
41 :type spec: Specification
46 u"file_details_split": file_details_split,
47 u"file_details_split_html": file_details_split_html,
48 u"file_test_results": file_test_results,
49 u"file_test_results_html": file_test_results_html
52 logging.info(u"Generating the files ...")
53 for file_spec in spec.files:
55 generator[file_spec[u"algorithm"]](file_spec, data)
56 except (NameError, KeyError) as err:
58 f"Probably algorithm {file_spec[u'algorithm']} is not defined: "
61 logging.info(u"Done.")
64 def _tests_in_suite(suite_name, tests):
65 """Check if the suite includes tests.
67 :param suite_name: Name of the suite to be checked.
68 :param tests: Set of tests
70 :type tests: pandas.Series
71 :returns: True if the suite includes tests.
75 for key in tests.keys():
76 if suite_name == tests[key][u"parent"]:
81 def file_details_split(file_spec, input_data, frmt=u"rst"):
82 """Generate the file(s) with algorithms
84 specified in the specification file.
86 :param file_spec: File to generate.
87 :param input_data: Data to process.
88 :param frmt: Format can be: rst or html
89 :type file_spec: pandas.Series
90 :type input_data: InputData
94 fileset_file_name = f"{file_spec[u'output-file']}"
97 u".. |br| raw:: html\n\n <br />\n\n\n"
98 u".. |prein| raw:: html\n\n <pre>\n\n\n"
99 u".. |preout| raw:: html\n\n </pre>\n\n"
101 start_lvl = file_spec.get(u"data-start-level", 4)
103 logging.info(f" Generating the file set {fileset_file_name} ...")
105 data_sets = file_spec.get(u"data", None)
108 f" No data sets specified for {file_spec[u'output-file']}, exit."
112 table_sets = file_spec.get(u"dir-tables", None)
115 f" No table sets specified for {file_spec[u'output-file']}, exit."
119 if len(data_sets) != len(table_sets):
121 f" The number of data sets and the number of table sets for "
122 f"{file_spec[u'output-file']} are not equal, exit."
126 chapters = OrderedDict()
127 for data_set, table_set in zip(data_sets, table_sets):
129 logging.info(f" Processing the table set {table_set}...")
133 table_lst = get_files(table_set, u".rst", full_path=True)
135 table_lst = get_files(table_set, u".csv", full_path=True)
139 f" No tables to include in {table_set}. Skipping."
143 logging.info(u" Creating the test data set...")
144 tests = input_data.filter_data(
146 params=[u"name", u"parent", u"doc", u"type", u"level"],
149 continue_on_error=True
153 tests = input_data.merge_data(tests)
155 logging.info(u" Creating the suite data set...")
156 suites = input_data.filter_data(
159 continue_on_error=True,
164 suites = input_data.merge_data(suites)
165 suites.sort_index(inplace=True)
167 logging.info(u" Generating files...")
170 sub_chapter = u"-".join(table_set.split(u"_")[-2:])
171 for suite_longname, suite in suites.items():
173 suite_lvl = len(suite_longname.split(u"."))
174 if suite_lvl < start_lvl:
175 # Not interested in this suite
178 if suite_lvl == start_lvl:
179 # Our top-level suite
180 chapter = suite_longname.split(u'.')[-1]
181 file_name = f"{table_set}/{chapter}.rst"
182 logging.info(f" Writing file {file_name}")
183 with open(file_name, u"a") as file_handler:
184 file_handler.write(rst_header)
185 if chapters.get(chapter, None) is None:
186 chapters[chapter] = OrderedDict()
187 chapters[chapter][sub_chapter] = file_name
189 title_line = get_rst_title_char(suite[u"level"] - start_lvl + 2) * \
191 with open(file_name, u"a") as file_handler:
192 if not (u"-ndrpdr" in suite[u"name"] or
193 u"-mrr" in suite[u"name"] or
194 u"-dev" in suite[u"name"]):
195 file_handler.write(f"\n{sub_chapter}\n{title_line}\n")
197 if _tests_in_suite(suite[u"name"], tests):
198 for tbl_file in table_lst:
199 if suite[u"name"] in tbl_file:
200 title_line = get_rst_title_char(
201 suite[u"level"] - start_lvl + 2) * \
204 f"\n{suite[u'name']}\n{title_line}\n"
207 f"\n{suite[u'doc']}\n".
208 replace(u'|br|', u'\n\n -')
212 f"\n.. include:: {tbl_file.split(u'/')[-1]}"
217 RST_INCLUDE_TABLE.format(
219 file_html=tbl_file.split(u"/")[-1])
224 u"container_memif": u"LXC/DRC Container Memif",
225 u"crypto": u"IPsec IPv4 Routing",
226 u"hoststack": u"Hoststack Testing",
227 u"ip4": u"IPv4 Routing",
228 u"ip4_tunnels": u"IPv4 Tunnels",
229 u"ip6": u"IPv6 Routing",
230 u"ip6_tunnels": u"IPv6 Tunnels",
231 u"l2": u"L2 Ethernet Switching",
232 u"lb": u"LoadBalancer",
233 u"nfv_density": u"NFV Service Density",
234 u"srv6": u"SRv6 Routing",
235 u"vm_vhost": u"KVM VMs vhost-user",
236 u"vts": u"Virtual Topology System",
238 u"interfaces": u"Interfaces",
239 u"l2bd": u"L2 Bridge-domain",
240 u"l2patch": u"L2 Patch",
241 u"l2xc": u"L2 Cross-connect",
244 order_chapters = file_spec.get(u"order-chapters", None)
245 if not order_chapters:
246 order_chapters = chapters.keys()
248 order_sub_chapters = file_spec.get(u"order-sub-chapters", None)
250 for chapter in order_chapters:
251 sub_chapters = chapters.get(chapter, None)
254 with open(f"{fileset_file_name}/index.rst", u"a") as file_handler:
255 file_handler.write(f" {chapter}\n")
256 chapter_file_name = f"{fileset_file_name}/{chapter}.rst"
257 if not isfile(chapter_file_name):
258 with open(chapter_file_name, u"a") as file_handler:
259 title = titles.get(chapter, chapter)
262 f"{get_rst_title_char(1) * len(title)}\n\n"
266 if not order_sub_chapters:
267 order_sub_chapters = sub_chapters.keys()
268 for sub_chapter in order_sub_chapters:
269 testbed = sub_chapters.get(sub_chapter, None)
272 with open(chapter_file_name, u"a") as file_handler:
274 f" ../{u'/'.join(testbed.split(u'/')[-2:])}\n"
278 def file_details_split_html(file_spec, input_data):
279 """Generate the file(s) with algorithms
280 - file_details_split_html
281 specified in the specification file.
283 :param file_spec: File to generate.
284 :param input_data: Data to process.
285 :type file_spec: pandas.Series
286 :type input_data: InputData
288 file_details_split(file_spec, input_data, frmt=u"html")
291 def file_test_results(file_spec, input_data, frmt=u"rst"):
292 """Generate the file(s) with algorithms
294 specified in the specification file.
296 :param file_spec: File to generate.
297 :param input_data: Data to process.
298 :param frmt: Format can be: rst or html
299 :type file_spec: pandas.Series
300 :type input_data: InputData
304 base_file_name = f"{file_spec[u'output-file']}"
307 u".. |br| raw:: html\n\n <br />\n\n\n"
308 u".. |prein| raw:: html\n\n <pre>\n\n\n"
309 u".. |preout| raw:: html\n\n </pre>\n\n"
311 start_lvl = file_spec.get(u"data-start-level", 4)
313 logging.info(f" Generating the file {base_file_name} ...")
316 table_lst = get_files(file_spec[u"dir-tables"], u".rst", full_path=True)
318 table_lst = get_files(file_spec[u"dir-tables"], u".csv", full_path=True)
323 f" No tables to include in {file_spec[u'dir-tables']}. Skipping."
328 f" Creating the tests data set for the "
329 f"{file_spec.get(u'type', u'')} {file_spec.get(u'title', u'')}."
332 tests = input_data.filter_data(
334 params=[u"name", u"parent", u"doc", u"type", u"level"],
335 continue_on_error=True
339 tests = input_data.merge_data(tests)
341 suites = input_data.filter_data(
343 continue_on_error=True,
348 suites = input_data.merge_data(suites)
349 suites.sort_index(inplace=True)
352 for suite_longname, suite in suites.items():
354 suite_lvl = len(suite_longname.split(u"."))
355 if suite_lvl < start_lvl:
356 # Not interested in this suite
359 if suite_lvl == start_lvl:
360 # Our top-level suite
361 chapter = suite_longname.split(u'.')[-1]
362 file_name = f"{base_file_name}/{chapter}.rst"
363 logging.info(f" Writing file {file_name}")
364 with open(f"{base_file_name}/index.rst", u"a") as file_handler:
365 file_handler.write(f" {chapter}\n")
366 with open(file_name, u"a") as file_handler:
367 file_handler.write(rst_header)
369 title_line = get_rst_title_char(suite[u"level"] - start_lvl + 2) * \
371 with open(file_name, u"a") as file_handler:
372 if not (u"-ndrpdr" in suite[u"name"] or
373 u"-mrr" in suite[u"name"] or
374 u"-dev" in suite[u"name"]):
375 file_handler.write(f"\n{suite[u'name']}\n{title_line}\n")
377 if _tests_in_suite(suite[u"name"], tests):
378 for tbl_file in table_lst:
379 if suite[u"name"] in tbl_file:
381 f"\n{suite[u'name']}\n{title_line}\n"
384 f"\n{suite[u'doc']}\n".replace(u'|br|', u'\n\n -')
388 f"\n.. include:: {tbl_file.split(u'/')[-1]}\n"
392 RST_INCLUDE_TABLE.format(
394 file_html=tbl_file.split(u"/")[-1])
398 logging.info(u" Done.")
401 def file_test_results_html(file_spec, input_data):
402 """Generate the file(s) with algorithms
403 - file_test_results_html
404 specified in the specification file.
406 :param file_spec: File to generate.
407 :param input_data: Data to process.
408 :type file_spec: pandas.Series
409 :type input_data: InputData
411 file_test_results(file_spec, input_data, frmt=u"html")