1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Module facilitating conversion from raw outputs into info outputs."""
20 import dateutil.parser
22 from resources.libraries.python.Constants import Constants
23 from resources.libraries.python.jumpavg.AvgStdevStats import AvgStdevStats
26 def _raw_to_info_path(raw_path):
27 """Compute path for info output corresponding to given raw output.
29 :param raw_path: Local filesystem path to read raw JSON data from.
31 :returns: Local filesystem path to write info JSON content to.
33 :raises RuntimeError: If the input path does not meet all expectations.
35 raw_extension = u".raw.json"
36 tmp_parts = raw_path.split(raw_extension)
37 if len(tmp_parts) != 2 or tmp_parts[1] != u"":
38 raise RuntimeError(f"Not good extension {raw_extension}: {raw_path}")
39 info_path = tmp_parts[0] + u".info.json"
43 def _process_test_name(data):
44 """Replace raw test name with short and long test name and set test_type.
46 Perform in-place edits on the data dictionary.
47 Remove raw suite_name and test_name, they are not part of info schema.
48 Return early if the data is not for test case.
49 Inserttest ID and long and short test name into the data.
50 Besides suite_name and test_name, also test tags are read.
52 Short test name is basically a suite tag, but with NIC driver prefix,
53 if the NIC driver used is not the default one (drv_vfio_pci for VPP tests).
55 Long test name has the following form:
56 {nic_short_name}-{frame_size}-{threads_and_cores}-{suite_part}
57 Lookup in test tags is needed to get the threads value.
58 The threads_and_cores part may be empty, e.g. for TRex tests.
60 Test ID has form {suite_name}.{test_name} where the two names come from
61 Robot variables, converted to lower case and spaces replaces by undescores.
63 Test type is set in an internal function.
65 :param data: Raw data, perhaps some fields converted into info data already.
67 :raises RuntimeError: If the raw data does not contain expected values.
69 suite_part = data.pop(u"suite_name").lower().replace(u" ", u"_")
70 if u"test_name" not in data:
71 # There will be no test_id, provide suite_id instead.
72 data[u"suite_id"] = suite_part
74 test_part = data.pop(u"test_name").lower().replace(u" ", u"_")
75 data[u"test_id"] = f"{suite_part}.{test_part}"
77 # Test name does not contain thread count.
78 subparts = test_part.split(u"c-", 1)
79 if len(subparts) < 2 or subparts[0][-2:-1] != u"-":
80 # Physical core count not detected, assume it is a TRex test.
81 if u"--" not in test_part:
82 raise RuntimeError(f"Cores not found for {subparts}")
83 short_name = test_part.split(u"--", 1)[1]
85 short_name = subparts[1]
86 # Add threads to test_part.
87 core_part = subparts[0][-1] + u"c"
90 if len(tag) == 4 and core_part == tag[2:] and tag[1] == u"t":
91 test_part = test_part.replace(f"-{core_part}-", f"-{tag}-")
94 raise RuntimeError(f"Threads not found for {test_part} tags {tags}")
95 # For long name we need NIC model, which is only in suite name.
96 last_suite_part = suite_part.split(u".")[-1]
97 # Short name happens to be the suffix we want to ignore.
98 prefix_part = last_suite_part.split(short_name)[0]
99 # Also remove the trailing dash.
100 prefix_part = prefix_part[:-1]
101 # Throw away possible link prefix such as "1n1l-".
102 nic_code = prefix_part.split(u"-", 1)[-1]
103 nic_short = Constants.NIC_CODE_TO_SHORT_NAME[nic_code]
104 long_name = f"{nic_short}-{test_part}"
106 test_type = _detect_test_type(data)
107 data[u"test_type"] = test_type
108 # Remove trailing test type from names (if present).
109 short_name = short_name.split(f"-{test_type}")[0]
110 long_name = long_name.split(f"-{test_type}")[0]
112 data[u"test_name_short"] = short_name
113 data[u"test_name_long"] = long_name
116 def _detect_test_type(data):
117 """Return test_type, as inferred from robot test tags.
119 :param data: Raw data, perhaps some fields converted into info data already.
121 :returns: The inferred test type value.
123 :raises RuntimeError: If the test tags does not contain expected values.
126 # First 5 options are specific for VPP tests.
127 if u"DEVICETEST" in tags:
128 test_type = u"device"
129 elif u"LDP_NGINX" in tags:
131 elif u"HOSTSTACK" in tags:
132 test_type = u"hoststack"
133 elif u"GSO_TRUE" in tags or u"GSO_FALSE" in tags:
135 elif u"RECONF" in tags:
136 test_type = u"reconf"
137 # The remaining 3 options could also apply to DPDK and TRex tests.
138 elif u"SOAK" in tags:
140 elif u"NDRPDR" in tags:
141 test_type = u"ndrpdr"
145 raise RuntimeError(f"Unable to infer test type from tags: {tags}")
149 def _convert_to_info_in_memory(data):
150 """Perform all changes needed for processing of data, return new data.
152 Data is assumed to be valid for raw schema, so no exceptions are expected.
153 The original argument object is not edited,
154 a new copy is created for edits and returned,
155 because there is no easy way to sort keys in-place.
157 :param data: The whole composite object to filter and enhance.
159 :returns: New object with the edited content.
162 data = copy.deepcopy(data)
164 # Drop any SSH log items.
165 data[u"log"] = list()
167 # Duration is computed for every file.
168 start_float = dateutil.parser.parse(data[u"start_time"]).timestamp()
169 end_float = dateutil.parser.parse(data[u"end_time"]).timestamp()
170 data[u"duration"] = end_float - start_float
172 # Reorder impotant fields to the top.
173 sorted_data = dict(version=data.pop(u"version"))
174 sorted_data[u"duration"] = data.pop(u"duration")
175 sorted_data[u"start_time"] = data.pop(u"start_time")
176 sorted_data[u"end_time"] = data.pop(u"end_time")
177 sorted_data.update(data)
179 # TODO: Do we care about the order of subsequently added fields?
181 # Convert status into a boolean.
182 status = data.pop(u"status", None)
183 if status is not None:
184 data[u"passed"] = (status == u"PASS")
186 # Also truncate success test messages.
187 data[u"message"] = u""
189 # Replace raw names with processed ones, set test_id and test_type.
190 _process_test_name(data)
192 # The rest is only relevant for test case outputs.
193 if u"result" not in data:
195 result_node = data[u"result"]
196 result_type = result_node[u"type"]
197 if result_type == u"unknown":
198 # Device or something else not supported.
201 # More processing depending on result type. TODO: Separate functions?
203 # Compute avg and stdev for mrr.
204 if result_type == u"mrr":
205 rate_node = result_node[u"receive_rate"][u"rate"]
206 stats = AvgStdevStats.for_runs(rate_node[u"values"])
207 rate_node[u"avg"] = stats.avg
208 rate_node[u"stdev"] = stats.stdev
210 # Multiple processing steps for ndrpdr.
211 if result_type != u"ndrpdr":
213 # Filter out invalid latencies.
214 for which_key in (u"latency_forward", u"latency_reverse"):
215 if which_key not in result_node:
216 # Probably just an unidir test.
218 for load in (u"pdr_0", u"pdr_10", u"pdr_50", u"pdr_90"):
219 if result_node[which_key][load][u"max"] <= 0:
220 # One invalid number is enough to remove all loads.
223 # No break means all numbers are ok, nothing to do here.
225 # Break happened, something is invalid, remove all loads.
226 result_node.pop(which_key)
231 def _merge_into_suite_info_file(teardown_info_path):
232 """Move setup and teardown data into a singe file, remove old files.
234 The caller has to confirm the argument is correct, e.g. ending in
235 "/teardown.info.json".
237 :param teardown_info_path: Local filesystem path to teardown info file.
238 :type teardown_info_path: str
239 :returns: Local filesystem path to newly created suite info file.
242 # Manual right replace: https://stackoverflow.com/a/9943875
243 setup_info_path = u"setup".join(teardown_info_path.rsplit(u"teardown", 1))
244 with open(teardown_info_path, u"rt", encoding="utf-8") as file_in:
245 teardown_data = json.load(file_in)
246 # Transforming setup data into suite data.
247 with open(setup_info_path, u"rt", encoding="utf-8") as file_in:
248 suite_data = json.load(file_in)
250 end_time = teardown_data[u"end_time"]
251 suite_data[u"end_time"] = end_time
252 start_float = dateutil.parser.parse(suite_data[u"start_time"]).timestamp()
253 end_float = dateutil.parser.parse(suite_data[u"end_time"]).timestamp()
254 suite_data[u"duration"] = end_float - start_float
255 setup_log = suite_data.pop(u"log")
256 suite_data[u"setup_log"] = setup_log
257 suite_data[u"teardown_log"] = teardown_data[u"log"]
259 suite_info_path = u"suite".join(teardown_info_path.rsplit(u"teardown", 1))
260 with open(suite_info_path, u"wt", encoding="utf-8") as file_out:
261 json.dump(suite_data, file_out, indent=1)
262 # We moved everything useful from temporary setup/teardown info files.
263 os.remove(setup_info_path)
264 os.remove(teardown_info_path)
266 return suite_info_path
269 def convert_content_to_info(from_raw_path):
270 """Read raw output, perform filtering, add derivatves, write info output.
272 Directory path is created if missing.
274 When processing teardown, create also suite output using setup info.
276 :param from_raw_path: Local filesystem path to read raw JSON data from.
277 :type from_raw_path: str
278 :returns: Local filesystem path to written info JSON file.
280 :raises RuntimeError: If path or content do not match expectations.
282 to_info_path = _raw_to_info_path(from_raw_path)
283 with open(from_raw_path, u"rt", encoding="utf-8") as file_in:
284 data = json.load(file_in)
286 data = _convert_to_info_in_memory(data)
288 with open(to_info_path, u"wt", encoding="utf-8") as file_out:
289 json.dump(data, file_out, indent=1)
290 if to_info_path.endswith(u"/teardown.info.json"):
291 to_info_path = _merge_into_suite_info_file(to_info_path)
292 # TODO: Return both paths for validation?