1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Prepare data for Plotly Dash."""
16 from logging import info
19 import awswrangler as wr
20 from awswrangler.exceptions import EmptyDataFrame, NoFilesFound
21 from boto3 import session
24 S3_DOCS_BUCKET="fdio-docs-s3-cloudfront-index"
26 def create_dataframe_from_parquet(
27 path, partition_filter=None, columns=None,
28 validate_schema=False, last_modified_begin=None,
29 last_modified_end=None):
30 """Read parquet stored in S3 compatible storage and returns Pandas
33 :param path: S3 prefix (accepts Unix shell-style wildcards) (e.g.
34 s3://bucket/prefix) or list of S3 objects paths (e.g. [s3://bucket/key0,
36 :param partition_filter: Callback Function filters to apply on PARTITION
37 columns (PUSH-DOWN filter). This function MUST receive a single argument
38 (Dict[str, str]) where keys are partitions names and values are
39 partitions values. Partitions values will be always strings extracted
40 from S3. This function MUST return a bool, True to read the partition or
41 False to ignore it. Ignored if dataset=False.
42 :param columns: Names of columns to read from the file(s).
43 :param validate_schema: Check that individual file schemas are all the
44 same / compatible. Schemas within a folder prefix should all be the
45 same. Disable if you have schemas that are different and want to disable
47 :param last_modified_begin: Filter the s3 files by the Last modified date of
48 the object. The filter is applied only after list all s3 files.
49 :param last_modified_end: Filter the s3 files by the Last modified date of
50 the object. The filter is applied only after list all s3 files.
51 :type path: Union[str, List[str]]
52 :type partition_filter: Callable[[Dict[str, str]], bool], optional
53 :type columns: List[str], optional
54 :type validate_schema: bool, optional
55 :type last_modified_begin: datetime, optional
56 :type last_modified_end: datetime, optional
57 :returns: Pandas DataFrame or None if DataFrame cannot be fetched.
63 df = wr.s3.read_parquet(
65 path_suffix="parquet",
67 validate_schema=validate_schema,
71 partition_filter=partition_filter,
72 last_modified_begin=last_modified_begin,
73 last_modified_end=last_modified_end
75 info(f"Create dataframe {path} took: {time() - start}")
77 info(df.info(memory_usage="deep"))
85 """Read Suite Result Analysis data partition from parquet.
87 lambda_f = lambda part: True if part["stats_type"] == "sra" else False
89 return create_dataframe_from_parquet(
90 path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/stats",
91 partition_filter=lambda_f
94 def read_trending_mrr():
95 """Read MRR data partition from parquet.
97 lambda_f = lambda part: True if part["test_type"] == "mrr" else False
99 return create_dataframe_from_parquet(
100 path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/trending",
101 partition_filter=lambda_f,
102 columns=["job", "build", "dut_type", "dut_version", "hosts",
103 "start_time", "passed", "test_id", "test_name_long",
104 "test_name_short", "version",
105 "result_receive_rate_rate_avg",
106 "result_receive_rate_rate_stdev",
107 "result_receive_rate_rate_unit",
108 "result_receive_rate_rate_values"
112 def read_iterative_mrr():
113 """Read MRR data partition from iterative parquet.
115 lambda_f = lambda part: True if part["test_type"] == "mrr" else False
117 return create_dataframe_from_parquet(
118 path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2202",
119 partition_filter=lambda_f,
120 columns=["job", "build", "dut_type", "dut_version", "hosts",
121 "start_time", "passed", "test_id", "test_name_long",
122 "test_name_short", "version",
123 "result_receive_rate_rate_avg",
124 "result_receive_rate_rate_stdev",
125 "result_receive_rate_rate_unit",
126 "result_receive_rate_rate_values"
130 def read_trending_ndrpdr():
131 """Read NDRPDR data partition from iterative parquet.
133 lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
135 return create_dataframe_from_parquet(
136 path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/trending",
137 partition_filter=lambda_f,
138 columns=["job", "build", "dut_type", "dut_version", "hosts",
139 "start_time", "passed", "test_id", "test_name_long",
140 "test_name_short", "version",
141 "result_pdr_upper_rate_unit",
142 "result_pdr_upper_rate_value",
143 "result_pdr_upper_bandwidth_unit",
144 "result_pdr_upper_bandwidth_value",
145 "result_pdr_lower_rate_unit",
146 "result_pdr_lower_rate_value",
147 "result_pdr_lower_bandwidth_unit",
148 "result_pdr_lower_bandwidth_value",
149 "result_ndr_upper_rate_unit",
150 "result_ndr_upper_rate_value",
151 "result_ndr_upper_bandwidth_unit",
152 "result_ndr_upper_bandwidth_value",
153 "result_ndr_lower_rate_unit",
154 "result_ndr_lower_rate_value",
155 "result_ndr_lower_bandwidth_unit",
156 "result_ndr_lower_bandwidth_value",
157 "result_latency_reverse_pdr_90_avg",
158 "result_latency_reverse_pdr_90_hdrh",
159 "result_latency_reverse_pdr_90_max",
160 "result_latency_reverse_pdr_90_min",
161 "result_latency_reverse_pdr_90_unit",
162 "result_latency_reverse_pdr_50_avg",
163 "result_latency_reverse_pdr_50_hdrh",
164 "result_latency_reverse_pdr_50_max",
165 "result_latency_reverse_pdr_50_min",
166 "result_latency_reverse_pdr_50_unit",
167 "result_latency_reverse_pdr_10_avg",
168 "result_latency_reverse_pdr_10_hdrh",
169 "result_latency_reverse_pdr_10_max",
170 "result_latency_reverse_pdr_10_min",
171 "result_latency_reverse_pdr_10_unit",
172 "result_latency_reverse_pdr_0_avg",
173 "result_latency_reverse_pdr_0_hdrh",
174 "result_latency_reverse_pdr_0_max",
175 "result_latency_reverse_pdr_0_min",
176 "result_latency_reverse_pdr_0_unit",
177 "result_latency_forward_pdr_90_avg",
178 "result_latency_forward_pdr_90_hdrh",
179 "result_latency_forward_pdr_90_max",
180 "result_latency_forward_pdr_90_min",
181 "result_latency_forward_pdr_90_unit",
182 "result_latency_forward_pdr_50_avg",
183 "result_latency_forward_pdr_50_hdrh",
184 "result_latency_forward_pdr_50_max",
185 "result_latency_forward_pdr_50_min",
186 "result_latency_forward_pdr_50_unit",
187 "result_latency_forward_pdr_10_avg",
188 "result_latency_forward_pdr_10_hdrh",
189 "result_latency_forward_pdr_10_max",
190 "result_latency_forward_pdr_10_min",
191 "result_latency_forward_pdr_10_unit",
192 "result_latency_forward_pdr_0_avg",
193 "result_latency_forward_pdr_0_hdrh",
194 "result_latency_forward_pdr_0_max",
195 "result_latency_forward_pdr_0_min",
196 "result_latency_forward_pdr_0_unit"
200 def read_iterative_ndrpdr():
201 """Read NDRPDR data partition from parquet.
203 lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
205 return create_dataframe_from_parquet(
206 path=f"s3://{S3_DOCS_BUCKET}/csit/parquet/iterative_rls2202",
207 partition_filter=lambda_f,
208 columns=["job", "build", "dut_type", "dut_version", "hosts",
209 "start_time", "passed", "test_id", "test_name_long",
210 "test_name_short", "version",
211 "result_pdr_upper_rate_unit",
212 "result_pdr_upper_rate_value",
213 "result_pdr_upper_bandwidth_unit",
214 "result_pdr_upper_bandwidth_value",
215 "result_pdr_lower_rate_unit",
216 "result_pdr_lower_rate_value",
217 "result_pdr_lower_bandwidth_unit",
218 "result_pdr_lower_bandwidth_value",
219 "result_ndr_upper_rate_unit",
220 "result_ndr_upper_rate_value",
221 "result_ndr_upper_bandwidth_unit",
222 "result_ndr_upper_bandwidth_value",
223 "result_ndr_lower_rate_unit",
224 "result_ndr_lower_rate_value",
225 "result_ndr_lower_bandwidth_unit",
226 "result_ndr_lower_bandwidth_value",
227 "result_latency_reverse_pdr_90_avg",
228 "result_latency_reverse_pdr_90_hdrh",
229 "result_latency_reverse_pdr_90_max",
230 "result_latency_reverse_pdr_90_min",
231 "result_latency_reverse_pdr_90_unit",
232 "result_latency_reverse_pdr_50_avg",
233 "result_latency_reverse_pdr_50_hdrh",
234 "result_latency_reverse_pdr_50_max",
235 "result_latency_reverse_pdr_50_min",
236 "result_latency_reverse_pdr_50_unit",
237 "result_latency_reverse_pdr_10_avg",
238 "result_latency_reverse_pdr_10_hdrh",
239 "result_latency_reverse_pdr_10_max",
240 "result_latency_reverse_pdr_10_min",
241 "result_latency_reverse_pdr_10_unit",
242 "result_latency_reverse_pdr_0_avg",
243 "result_latency_reverse_pdr_0_hdrh",
244 "result_latency_reverse_pdr_0_max",
245 "result_latency_reverse_pdr_0_min",
246 "result_latency_reverse_pdr_0_unit",
247 "result_latency_forward_pdr_90_avg",
248 "result_latency_forward_pdr_90_hdrh",
249 "result_latency_forward_pdr_90_max",
250 "result_latency_forward_pdr_90_min",
251 "result_latency_forward_pdr_90_unit",
252 "result_latency_forward_pdr_50_avg",
253 "result_latency_forward_pdr_50_hdrh",
254 "result_latency_forward_pdr_50_max",
255 "result_latency_forward_pdr_50_min",
256 "result_latency_forward_pdr_50_unit",
257 "result_latency_forward_pdr_10_avg",
258 "result_latency_forward_pdr_10_hdrh",
259 "result_latency_forward_pdr_10_max",
260 "result_latency_forward_pdr_10_min",
261 "result_latency_forward_pdr_10_unit",
262 "result_latency_forward_pdr_0_avg",
263 "result_latency_forward_pdr_0_hdrh",
264 "result_latency_forward_pdr_0_max",
265 "result_latency_forward_pdr_0_min",
266 "result_latency_forward_pdr_0_unit"