1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Prepare data for Plotly Dash applications.
18 import awswrangler as wr
20 from yaml import load, FullLoader, YAMLError
21 from datetime import datetime, timedelta
24 from pandas import DataFrame
25 from awswrangler.exceptions import EmptyDataFrame, NoFilesFound
29 """Gets the data from parquets and stores it for further use by dash
33 def __init__(self, data_spec_file: str, debug: bool=False) -> None:
34 """Initialize the Data object.
36 :param data_spec_file: Path to file specifying the data to be read from
38 :param debug: If True, the debuf information is printed to stdout.
39 :type data_spec_file: str
41 :raises RuntimeError: if it is not possible to open data_spec_file or it
42 is not a valid yaml file.
46 self._data_spec_file = data_spec_file
49 # Specification of data to be read from parquets:
50 self._data_spec = None
52 # Data frame to keep the data:
57 with open(self._data_spec_file, "r") as file_read:
58 self._data_spec = load(file_read, Loader=FullLoader)
59 except IOError as err:
61 f"Not possible to open the file {self._data_spec_file,}\n{err}"
63 except YAMLError as err:
65 f"An error occurred while parsing the specification file "
66 f"{self._data_spec_file,}\n"
74 def _get_columns(self, parquet: str) -> list:
75 """Get the list of columns from the data specification file to be read
78 :param parquet: The parquet's name.
80 :raises RuntimeError: if the parquet is not defined in the data
81 specification file or it does not have any columns specified.
82 :returns: List of columns.
87 return self._data_spec[parquet]["columns"]
88 except KeyError as err:
90 f"The parquet {parquet} is not defined in the specification "
91 f"file {self._data_spec_file} or it does not have any columns "
95 def _get_path(self, parquet: str) -> str:
96 """Get the path from the data specification file to be read from
99 :param parquet: The parquet's name.
101 :raises RuntimeError: if the parquet is not defined in the data
102 specification file or it does not have the path specified.
108 return self._data_spec[parquet]["path"]
109 except KeyError as err:
111 f"The parquet {parquet} is not defined in the specification "
112 f"file {self._data_spec_file} or it does not have the path "
116 def _get_list_of_files(self,
118 last_modified_begin=None,
119 last_modified_end=None,
121 """Get list of interested files stored in S3 compatible storage and
124 :param path: S3 prefix (accepts Unix shell-style wildcards)
125 (e.g. s3://bucket/prefix) or list of S3 objects paths
126 (e.g. [s3://bucket/key0, s3://bucket/key1]).
127 :param last_modified_begin: Filter the s3 files by the Last modified
128 date of the object. The filter is applied only after list all s3
130 :param last_modified_end: Filter the s3 files by the Last modified date
131 of the object. The filter is applied only after list all s3 files.
132 :param days: Number of days to filter.
133 :type path: Union[str, List[str]]
134 :type last_modified_begin: datetime, optional
135 :type last_modified_end: datetime, optional
136 :type days: integer, optional
137 :returns: List of file names.
142 last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
144 file_list = wr.s3.list_objects(
147 last_modified_begin=last_modified_begin,
148 last_modified_end=last_modified_end
151 logging.info("\n".join(file_list))
152 except NoFilesFound as err:
153 logging.error(f"No parquets found.\n{err}")
154 except EmptyDataFrame as err:
155 logging.error(f"No data.\n{err}")
159 def _create_dataframe_from_parquet(self,
160 path, partition_filter=None,
162 validate_schema=False,
163 last_modified_begin=None,
164 last_modified_end=None,
165 days=None) -> DataFrame:
166 """Read parquet stored in S3 compatible storage and returns Pandas
169 :param path: S3 prefix (accepts Unix shell-style wildcards)
170 (e.g. s3://bucket/prefix) or list of S3 objects paths
171 (e.g. [s3://bucket/key0, s3://bucket/key1]).
172 :param partition_filter: Callback Function filters to apply on PARTITION
173 columns (PUSH-DOWN filter). This function MUST receive a single
174 argument (Dict[str, str]) where keys are partitions names and values
175 are partitions values. Partitions values will be always strings
176 extracted from S3. This function MUST return a bool, True to read
177 the partition or False to ignore it. Ignored if dataset=False.
178 :param columns: Names of columns to read from the file(s).
179 :param validate_schema: Check that individual file schemas are all the
180 same / compatible. Schemas within a folder prefix should all be the
181 same. Disable if you have schemas that are different and want to
183 :param last_modified_begin: Filter the s3 files by the Last modified
184 date of the object. The filter is applied only after list all s3
186 :param last_modified_end: Filter the s3 files by the Last modified date
187 of the object. The filter is applied only after list all s3 files.
188 :param days: Number of days to filter.
189 :type path: Union[str, List[str]]
190 :type partition_filter: Callable[[Dict[str, str]], bool], optional
191 :type columns: List[str], optional
192 :type validate_schema: bool, optional
193 :type last_modified_begin: datetime, optional
194 :type last_modified_end: datetime, optional
195 :type days: integer, optional
196 :returns: Pandas DataFrame or None if DataFrame cannot be fetched.
202 last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
204 df = wr.s3.read_parquet(
206 path_suffix="parquet",
208 validate_schema=validate_schema,
212 partition_filter=partition_filter,
213 last_modified_begin=last_modified_begin,
214 last_modified_end=last_modified_end
217 df.info(verbose=True, memory_usage='deep')
219 f"\nCreation of dataframe {path} took: {time() - start}\n"
221 except NoFilesFound as err:
222 logging.error(f"No parquets found.\n{err}")
223 except EmptyDataFrame as err:
224 logging.error(f"No data.\n{err}")
229 def check_datasets(self, days: int=None):
230 """Read structure from parquet.
232 :param days: Number of days back to the past for which the data will be
236 self._get_list_of_files(path=self._get_path("trending"), days=days)
237 self._get_list_of_files(path=self._get_path("statistics"), days=days)
239 def read_stats(self, days: int=None) -> tuple:
240 """Read statistics from parquet.
243 - Suite Result Analysis (SRA) partition,
244 - NDRPDR trending partition,
245 - MRR trending partition.
247 :param days: Number of days back to the past for which the data will be
250 :returns: tuple of pandas DataFrame-s with data read from specified
252 :rtype: tuple of pandas DataFrame-s
255 l_stats = lambda part: True if part["stats_type"] == "sra" else False
256 l_mrr = lambda part: True if part["test_type"] == "mrr" else False
257 l_ndrpdr = lambda part: True if part["test_type"] == "ndrpdr" else False
260 self._create_dataframe_from_parquet(
261 path=self._get_path("statistics"),
262 partition_filter=l_stats,
263 columns=self._get_columns("statistics"),
266 self._create_dataframe_from_parquet(
267 path=self._get_path("statistics-trending-mrr"),
268 partition_filter=l_mrr,
269 columns=self._get_columns("statistics-trending-mrr"),
272 self._create_dataframe_from_parquet(
273 path=self._get_path("statistics-trending-ndrpdr"),
274 partition_filter=l_ndrpdr,
275 columns=self._get_columns("statistics-trending-ndrpdr"),
280 def read_trending_mrr(self, days: int=None) -> DataFrame:
281 """Read MRR data partition from parquet.
283 :param days: Number of days back to the past for which the data will be
286 :returns: Pandas DataFrame with read data.
290 lambda_f = lambda part: True if part["test_type"] == "mrr" else False
292 return self._create_dataframe_from_parquet(
293 path=self._get_path("trending-mrr"),
294 partition_filter=lambda_f,
295 columns=self._get_columns("trending-mrr"),
299 def read_trending_ndrpdr(self, days: int=None) -> DataFrame:
300 """Read NDRPDR data partition from iterative parquet.
302 :param days: Number of days back to the past for which the data will be
305 :returns: Pandas DataFrame with read data.
309 lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
311 return self._create_dataframe_from_parquet(
312 path=self._get_path("trending-ndrpdr"),
313 partition_filter=lambda_f,
314 columns=self._get_columns("trending-ndrpdr"),
318 def read_iterative_mrr(self, release: str) -> DataFrame:
319 """Read MRR data partition from iterative parquet.
321 :param release: The CSIT release from which the data will be read.
323 :returns: Pandas DataFrame with read data.
327 lambda_f = lambda part: True if part["test_type"] == "mrr" else False
329 return self._create_dataframe_from_parquet(
330 path=self._get_path("iterative-mrr").format(release=release),
331 partition_filter=lambda_f,
332 columns=self._get_columns("iterative-mrr")
335 def read_iterative_ndrpdr(self, release: str) -> DataFrame:
336 """Read NDRPDR data partition from parquet.
338 :param release: The CSIT release from which the data will be read.
340 :returns: Pandas DataFrame with read data.
344 lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
346 return self._create_dataframe_from_parquet(
347 path=self._get_path("iterative-ndrpdr").format(release=release),
348 partition_filter=lambda_f,
349 columns=self._get_columns("iterative-ndrpdr")