1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Module tracking json in-memory data and saving it to files.
16 Each test case, suite setup (hierarchical) and teardown has its own file pair.
18 Validation is performed for output files with available JSON schema.
19 Validation is performed in data deserialized from disk,
20 as serialization might have introduced subtle errors.
26 from dateutil.parser import parse
27 from robot.api import logger
28 from robot.libraries.BuiltIn import BuiltIn
30 from resources.libraries.python.Constants import Constants
31 from resources.libraries.python.jumpavg.AvgStdevStats import AvgStdevStats
32 from resources.libraries.python.model.ExportResult import (
33 export_dut_type_and_version, export_tg_type_and_version
35 from resources.libraries.python.model.MemDump import write_output
36 from resources.libraries.python.model.validate import (
37 get_validators, validate
42 """Class handling the json data setting and export."""
44 ROBOT_LIBRARY_SCOPE = u"GLOBAL"
47 """Declare required fields, cache output dir.
49 Also memorize schema validator instances.
51 self.output_dir = BuiltIn().get_variable_value(u"\\${OUTPUT_DIR}", ".")
54 self.validators = get_validators()
56 def _detect_test_type(self):
57 """Return test_type, as inferred from robot test tags.
59 :returns: The inferred test type value.
61 :raises RuntimeError: If the test tags does not contain expected values.
63 tags = self.data[u"tags"]
64 # First 5 options are specific for VPP tests.
65 if u"DEVICETEST" in tags:
67 elif u"LDP_NGINX" in tags:
69 elif u"HOSTSTACK" in tags:
70 test_type = u"hoststack"
71 elif u"GSO_TRUE" in tags or u"GSO_FALSE" in tags:
73 elif u"RECONF" in tags:
75 # The remaining 3 options could also apply to DPDK and TRex tests.
78 elif u"NDRPDR" in tags:
83 raise RuntimeError(f"Unable to infer test type from tags: {tags}")
86 def export_pending_data(self):
87 """Write the accumulated data to disk.
89 Create missing directories.
90 Reset both file path and data to avoid writing multiple times.
92 Functions which finalize content for given file are calling this,
93 so make sure each test and non-empty suite setup or teardown
94 is calling this as their last keyword.
96 If no file path is set, do not write anything,
97 as that is the failsafe behavior when caller from unexpected place.
98 Aso do not write anything when EXPORT_JSON constant is false.
100 Regardless of whether data was written, it is cleared.
102 if not Constants.EXPORT_JSON or not self.file_path:
104 self.file_path = None
106 new_file_path = write_output(self.file_path, self.data)
107 # Data is going to be cleared (as a sign that export succeeded),
108 # so this is the last chance to detect if it was for a test case.
109 is_testcase = u"result" in self.data
111 # Validation for output goes here when ready.
112 self.file_path = None
114 validate(new_file_path, self.validators[u"tc_info"])
116 def warn_on_bad_export(self):
117 """If bad state is detected, log a warning and clean up state."""
118 if self.file_path is not None or self.data is not None:
119 logger.warn(f"Previous export not clean, path {self.file_path}")
121 self.file_path = None
123 def start_suite_setup_export(self):
124 """Set new file path, initialize data for the suite setup.
126 This has to be called explicitly at start of suite setup,
127 otherwise Robot likes to postpone initialization
128 until first call by a data-adding keyword.
130 File path is set based on suite.
132 self.warn_on_bad_export()
133 start_time = datetime.datetime.utcnow().strftime(
134 u"%Y-%m-%dT%H:%M:%S.%fZ"
136 suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
137 suite_id = suite_name.lower().replace(u" ", u"_")
138 suite_path_part = os.path.join(*suite_id.split(u"."))
139 output_dir = self.output_dir
140 self.file_path = os.path.join(
141 output_dir, suite_path_part, u"setup.info.json"
144 self.data[u"version"] = Constants.MODEL_VERSION
145 self.data[u"start_time"] = start_time
146 self.data[u"suite_name"] = suite_name
147 self.data[u"suite_documentation"] = BuiltIn().get_variable_value(
148 u"\\${SUITE_DOCUMENTATION}"
150 # "end_time" and "duration" are added on flush.
151 self.data[u"hosts"] = set()
152 self.data[u"telemetry"] = list()
154 def start_test_export(self):
155 """Set new file path, initialize data to minimal tree for the test case.
157 It is assumed Robot variables DUT_TYPE and DUT_VERSION
158 are already set (in suite setup) to correct values.
160 This function has to be called explicitly at the start of test setup,
161 otherwise Robot likes to postpone initialization
162 until first call by a data-adding keyword.
164 File path is set based on suite and test.
166 self.warn_on_bad_export()
167 start_time = datetime.datetime.utcnow().strftime(
168 u"%Y-%m-%dT%H:%M:%S.%fZ"
170 suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
171 suite_id = suite_name.lower().replace(u" ", u"_")
172 suite_path_part = os.path.join(*suite_id.split(u"."))
173 test_name = BuiltIn().get_variable_value(u"\\${TEST_NAME}")
174 self.file_path = os.path.join(
175 self.output_dir, suite_path_part,
176 test_name.lower().replace(u" ", u"_") + u".info.json"
179 self.data[u"version"] = Constants.MODEL_VERSION
180 self.data[u"start_time"] = start_time
181 self.data[u"suite_name"] = suite_name
182 self.data[u"test_name"] = test_name
183 test_doc = BuiltIn().get_variable_value(u"\\${TEST_DOCUMENTATION}", u"")
184 self.data[u"test_documentation"] = test_doc
185 # "test_type" is added on flush.
186 # "tags" is detected and added on flush.
187 # "end_time" and "duration" is added on flush.
188 # Robot status and message are added on flush.
189 self.data[u"result"] = dict(type=u"unknown")
190 self.data[u"hosts"] = BuiltIn().get_variable_value(u"\\${hosts}")
191 self.data[u"telemetry"] = list()
192 export_dut_type_and_version()
193 export_tg_type_and_version()
195 def start_suite_teardown_export(self):
196 """Set new file path, initialize data for the suite teardown.
198 This has to be called explicitly at start of suite teardown,
199 otherwise Robot likes to postpone initialization
200 until first call by a data-adding keyword.
202 File path is set based on suite.
204 self.warn_on_bad_export()
205 start_time = datetime.datetime.utcnow().strftime(
206 u"%Y-%m-%dT%H:%M:%S.%fZ"
208 suite_name = BuiltIn().get_variable_value(u"\\${SUITE_NAME}")
209 suite_id = suite_name.lower().replace(u" ", u"_")
210 suite_path_part = os.path.join(*suite_id.split(u"."))
211 self.file_path = os.path.join(
212 self.output_dir, suite_path_part, u"teardown.info.json"
215 self.data[u"version"] = Constants.MODEL_VERSION
216 self.data[u"start_time"] = start_time
217 self.data[u"suite_name"] = suite_name
218 # "end_time" and "duration" is added on flush.
219 self.data[u"hosts"] = BuiltIn().get_variable_value(u"\\${hosts}")
220 self.data[u"telemetry"] = list()
222 def finalize_suite_setup_export(self):
223 """Add the missing fields to data. Do not write yet.
225 Should be run at the end of suite setup.
226 The write is done at next start (or at the end of global teardown).
228 end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
229 self.data[u"hosts"] = BuiltIn().get_variable_value(u"\\${hosts}")
230 self.data[u"end_time"] = end_time
231 self.export_pending_data()
233 def finalize_test_export(self):
234 """Add the missing fields to data. Do not write yet.
236 Should be at the end of test teardown, as the implementation
237 reads various Robot variables, some of them only available at teardown.
239 The write is done at next start (or at the end of global teardown).
241 end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
242 message = BuiltIn().get_variable_value(u"\\${TEST_MESSAGE}")
243 test_tags = BuiltIn().get_variable_value(u"\\${TEST_TAGS}")
244 self.data[u"end_time"] = end_time
245 start_float = parse(self.data[u"start_time"]).timestamp()
246 end_float = parse(self.data[u"end_time"]).timestamp()
247 self.data[u"duration"] = end_float - start_float
248 self.data[u"tags"] = list(test_tags)
249 self.data[u"message"] = message
250 self.process_passed()
251 self.process_test_name()
252 self.process_results()
253 self.export_pending_data()
255 def finalize_suite_teardown_export(self):
256 """Add the missing fields to data. Do not write yet.
258 Should be run at the end of suite teardown
259 (but before the explicit write in the global suite teardown).
260 The write is done at next start (or explicitly for global teardown).
262 end_time = datetime.datetime.utcnow().strftime(u"%Y-%m-%dT%H:%M:%S.%fZ")
263 self.data[u"end_time"] = end_time
264 self.export_pending_data()
266 def process_test_name(self):
267 """Replace raw test name with short and long test name and set
270 Perform in-place edits on the data dictionary.
271 Remove raw suite_name and test_name, they are not published.
272 Return early if the data is not for test case.
273 Insert test ID and long and short test name into the data.
274 Besides suite_name and test_name, also test tags are read.
276 Short test name is basically a suite tag, but with NIC driver prefix,
277 if the NIC driver used is not the default one (drv_vfio_pci for VPP
280 Long test name has the following form:
281 {nic_short_name}-{frame_size}-{threads_and_cores}-{suite_part}
282 Lookup in test tags is needed to get the threads value.
283 The threads_and_cores part may be empty, e.g. for TRex tests.
285 Test ID has form {suite_name}.{test_name} where the two names come from
286 Robot variables, converted to lower case and spaces replaces by
289 Test type is set in an internal function.
291 :raises RuntimeError: If the data does not contain expected values.
293 suite_part = self.data.pop(u"suite_name").lower().replace(u" ", u"_")
294 if u"test_name" not in self.data:
295 # There will be no test_id, provide suite_id instead.
296 self.data[u"suite_id"] = suite_part
298 test_part = self.data.pop(u"test_name").lower().replace(u" ", u"_")
299 self.data[u"test_id"] = f"{suite_part}.{test_part}"
300 tags = self.data[u"tags"]
301 # Test name does not contain thread count.
302 subparts = test_part.split(u"c-", 1)
303 if len(subparts) < 2 or subparts[0][-2:-1] != u"-":
304 # Physical core count not detected, assume it is a TRex test.
305 if u"--" not in test_part:
306 raise RuntimeError(f"Cores not found for {subparts}")
307 short_name = test_part.split(u"--", 1)[1]
309 short_name = subparts[1]
310 # Add threads to test_part.
311 core_part = subparts[0][-1] + u"c"
314 if len(tag) == 4 and core_part == tag[2:] and tag[1] == u"t":
315 test_part = test_part.replace(f"-{core_part}-", f"-{tag}-")
319 f"Threads not found for {test_part} tags {tags}"
321 # For long name we need NIC model, which is only in suite name.
322 last_suite_part = suite_part.split(u".")[-1]
323 # Short name happens to be the suffix we want to ignore.
324 prefix_part = last_suite_part.split(short_name)[0]
325 # Also remove the trailing dash.
326 prefix_part = prefix_part[:-1]
327 # Throw away possible link prefix such as "1n1l-".
328 nic_code = prefix_part.split(u"-", 1)[-1]
329 nic_short = Constants.NIC_CODE_TO_SHORT_NAME[nic_code]
330 long_name = f"{nic_short}-{test_part}"
332 test_type = self._detect_test_type()
333 self.data[u"test_type"] = test_type
334 # Remove trailing test type from names (if present).
335 short_name = short_name.split(f"-{test_type}")[0]
336 long_name = long_name.split(f"-{test_type}")[0]
338 self.data[u"test_name_short"] = short_name
339 self.data[u"test_name_long"] = long_name
341 def process_passed(self):
342 """Process the test status information as boolean.
344 Boolean is used to make post processing more efficient.
345 In case the test status is PASS, we will truncate the test message.
347 status = BuiltIn().get_variable_value(u"\\${TEST_STATUS}")
348 if status is not None:
349 self.data[u"passed"] = (status == u"PASS")
350 if self.data[u"passed"]:
351 # Also truncate success test messages.
352 self.data[u"message"] = u""
354 def process_results(self):
355 """Process measured results.
357 Results are used to avoid future post processing, making it more
358 efficient to consume.
360 if u"result" not in self.data:
362 result_node = self.data[u"result"]
363 result_type = result_node[u"type"]
364 if result_type == u"unknown":
365 # Device or something else not supported.
368 # Compute avg and stdev for mrr.
369 if result_type == u"mrr":
370 rate_node = result_node[u"receive_rate"][u"rate"]
371 stats = AvgStdevStats.for_runs(rate_node[u"values"])
372 rate_node[u"avg"] = stats.avg
373 rate_node[u"stdev"] = stats.stdev
376 # Multiple processing steps for ndrpdr.
377 if result_type != u"ndrpdr":
379 # Filter out invalid latencies.
380 for which_key in (u"latency_forward", u"latency_reverse"):
381 if which_key not in result_node:
382 # Probably just an unidir test.
384 for load in (u"pdr_0", u"pdr_10", u"pdr_50", u"pdr_90"):
385 if result_node[which_key][load][u"max"] <= 0:
386 # One invalid number is enough to remove all loads.
389 # No break means all numbers are ok, nothing to do here.
391 # Break happened, something is invalid, remove all loads.
392 result_node.pop(which_key)