1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Prepare data for Plotly Dash applications.
19 from yaml import load, FullLoader, YAMLError
20 from datetime import datetime, timedelta
23 from pandas import DataFrame
25 import awswrangler as wr
27 from awswrangler.exceptions import EmptyDataFrame, NoFilesFound
31 """Gets the data from parquets and stores it for further use by dash
35 def __init__(self, data_spec_file: str, debug: bool=False) -> None:
36 """Initialize the Data object.
38 :param data_spec_file: Path to file specifying the data to be read from
40 :param debug: If True, the debuf information is printed to stdout.
41 :type data_spec_file: str
43 :raises RuntimeError: if it is not possible to open data_spec_file or it
44 is not a valid yaml file.
48 self._data_spec_file = data_spec_file
51 # Specification of data to be read from parquets:
52 self._data_spec = None
54 # Data frame to keep the data:
59 with open(self._data_spec_file, "r") as file_read:
60 self._data_spec = load(file_read, Loader=FullLoader)
61 except IOError as err:
63 f"Not possible to open the file {self._data_spec_file,}\n{err}"
65 except YAMLError as err:
67 f"An error occurred while parsing the specification file "
68 f"{self._data_spec_file,}\n"
76 def _get_columns(self, parquet: str) -> list:
77 """Get the list of columns from the data specification file to be read
80 :param parquet: The parquet's name.
82 :raises RuntimeError: if the parquet is not defined in the data
83 specification file or it does not have any columns specified.
84 :returns: List of columns.
89 return self._data_spec[parquet]["columns"]
90 except KeyError as err:
92 f"The parquet {parquet} is not defined in the specification "
93 f"file {self._data_spec_file} or it does not have any columns "
97 def _get_path(self, parquet: str) -> str:
98 """Get the path from the data specification file to be read from
101 :param parquet: The parquet's name.
103 :raises RuntimeError: if the parquet is not defined in the data
104 specification file or it does not have the path specified.
110 return self._data_spec[parquet]["path"]
111 except KeyError as err:
113 f"The parquet {parquet} is not defined in the specification "
114 f"file {self._data_spec_file} or it does not have the path "
118 def _create_dataframe_from_parquet(self,
119 path, partition_filter=None,
121 validate_schema=False,
122 last_modified_begin=None,
123 last_modified_end=None,
124 days=None) -> DataFrame:
125 """Read parquet stored in S3 compatible storage and returns Pandas
128 :param path: S3 prefix (accepts Unix shell-style wildcards)
129 (e.g. s3://bucket/prefix) or list of S3 objects paths
130 (e.g. [s3://bucket/key0, s3://bucket/key1]).
131 :param partition_filter: Callback Function filters to apply on PARTITION
132 columns (PUSH-DOWN filter). This function MUST receive a single
133 argument (Dict[str, str]) where keys are partitions names and values
134 are partitions values. Partitions values will be always strings
135 extracted from S3. This function MUST return a bool, True to read
136 the partition or False to ignore it. Ignored if dataset=False.
137 :param columns: Names of columns to read from the file(s).
138 :param validate_schema: Check that individual file schemas are all the
139 same / compatible. Schemas within a folder prefix should all be the
140 same. Disable if you have schemas that are different and want to
142 :param last_modified_begin: Filter the s3 files by the Last modified
143 date of the object. The filter is applied only after list all s3
145 :param last_modified_end: Filter the s3 files by the Last modified date
146 of the object. The filter is applied only after list all s3 files.
147 :type path: Union[str, List[str]]
148 :type partition_filter: Callable[[Dict[str, str]], bool], optional
149 :type columns: List[str], optional
150 :type validate_schema: bool, optional
151 :type last_modified_begin: datetime, optional
152 :type last_modified_end: datetime, optional
153 :returns: Pandas DataFrame or None if DataFrame cannot be fetched.
159 last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
161 df = wr.s3.read_parquet(
163 path_suffix="parquet",
165 validate_schema=validate_schema,
169 partition_filter=partition_filter,
170 last_modified_begin=last_modified_begin,
171 last_modified_end=last_modified_end
174 df.info(verbose=True, memory_usage='deep')
177 f"Creation of dataframe {path} took: {time() - start}"
180 except NoFilesFound as err:
181 logging.error(f"No parquets found.\n{err}")
182 except EmptyDataFrame as err:
183 logging.error(f"No data.\n{err}")
188 def read_stats(self, days: int=None) -> tuple:
189 """Read statistics from parquet.
192 - Suite Result Analysis (SRA) partition,
193 - NDRPDR trending partition,
194 - MRR trending partition.
196 :param days: Number of days back to the past for which the data will be
199 :returns: tuple of pandas DataFrame-s with data read from specified
201 :rtype: tuple of pandas DataFrame-s
204 l_stats = lambda part: True if part["stats_type"] == "sra" else False
205 l_mrr = lambda part: True if part["test_type"] == "mrr" else False
206 l_ndrpdr = lambda part: True if part["test_type"] == "ndrpdr" else False
209 self._create_dataframe_from_parquet(
210 path=self._get_path("statistics"),
211 partition_filter=l_stats,
212 columns=self._get_columns("statistics"),
215 self._create_dataframe_from_parquet(
216 path=self._get_path("statistics-trending-mrr"),
217 partition_filter=l_mrr,
218 columns=self._get_columns("statistics-trending-mrr"),
221 self._create_dataframe_from_parquet(
222 path=self._get_path("statistics-trending-ndrpdr"),
223 partition_filter=l_ndrpdr,
224 columns=self._get_columns("statistics-trending-ndrpdr"),
229 def read_trending_mrr(self, days: int=None) -> DataFrame:
230 """Read MRR data partition from parquet.
232 :param days: Number of days back to the past for which the data will be
235 :returns: Pandas DataFrame with read data.
239 lambda_f = lambda part: True if part["test_type"] == "mrr" else False
241 return self._create_dataframe_from_parquet(
242 path=self._get_path("trending-mrr"),
243 partition_filter=lambda_f,
244 columns=self._get_columns("trending-mrr"),
248 def read_trending_ndrpdr(self, days: int=None) -> DataFrame:
249 """Read NDRPDR data partition from iterative parquet.
251 :param days: Number of days back to the past for which the data will be
254 :returns: Pandas DataFrame with read data.
258 lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
260 return self._create_dataframe_from_parquet(
261 path=self._get_path("trending-ndrpdr"),
262 partition_filter=lambda_f,
263 columns=self._get_columns("trending-ndrpdr"),
267 def read_iterative_mrr(self, release: str) -> DataFrame:
268 """Read MRR data partition from iterative parquet.
270 :param release: The CSIT release from which the data will be read.
272 :returns: Pandas DataFrame with read data.
276 lambda_f = lambda part: True if part["test_type"] == "mrr" else False
278 return self._create_dataframe_from_parquet(
279 path=self._get_path("iterative-mrr").format(release=release),
280 partition_filter=lambda_f,
281 columns=self._get_columns("iterative-mrr")
284 def read_iterative_ndrpdr(self, release: str) -> DataFrame:
285 """Read NDRPDR data partition from parquet.
287 :param release: The CSIT release from which the data will be read.
289 :returns: Pandas DataFrame with read data.
293 lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
295 return self._create_dataframe_from_parquet(
296 path=self._get_path("iterative-ndrpdr").format(release=release),
297 partition_filter=lambda_f,
298 columns=self._get_columns("iterative-ndrpdr")