1 # Copyright (c) 2023 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Prepare data for Plotly Dash applications.
19 import awswrangler as wr
22 from yaml import load, FullLoader, YAMLError
23 from datetime import datetime, timedelta
26 from awswrangler.exceptions import EmptyDataFrame, NoFilesFound
30 """Gets the data from parquets and stores it for further use by dash
34 def __init__(self, data_spec_file: str) -> None:
35 """Initialize the Data object.
37 :param data_spec_file: Path to file specifying the data to be read from
39 :type data_spec_file: str
40 :raises RuntimeError: if it is not possible to open data_spec_file or it
41 is not a valid yaml file.
45 self._data_spec_file = data_spec_file
47 # Specification of data to be read from parquets:
48 self._data_spec = list()
50 # Data frame to keep the data:
52 "statistics": pd.DataFrame(),
53 "trending": pd.DataFrame(),
54 "iterative": pd.DataFrame(),
55 "coverage": pd.DataFrame()
60 with open(self._data_spec_file, "r") as file_read:
61 self._data_spec = load(file_read, Loader=FullLoader)
62 except IOError as err:
64 f"Not possible to open the file {self._data_spec_file,}\n{err}"
66 except YAMLError as err:
68 f"An error occurred while parsing the specification file "
69 f"{self._data_spec_file,}\n"
78 def _get_list_of_files(
80 last_modified_begin=None,
81 last_modified_end=None,
84 """Get list of interested files stored in S3 compatible storage and
87 :param path: S3 prefix (accepts Unix shell-style wildcards)
88 (e.g. s3://bucket/prefix) or list of S3 objects paths
89 (e.g. [s3://bucket/key0, s3://bucket/key1]).
90 :param last_modified_begin: Filter the s3 files by the Last modified
91 date of the object. The filter is applied only after list all s3
93 :param last_modified_end: Filter the s3 files by the Last modified date
94 of the object. The filter is applied only after list all s3 files.
95 :param days: Number of days to filter.
96 :type path: Union[str, List[str]]
97 :type last_modified_begin: datetime, optional
98 :type last_modified_end: datetime, optional
99 :type days: integer, optional
100 :returns: List of file names.
105 last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
107 file_list = wr.s3.list_objects(
110 last_modified_begin=last_modified_begin,
111 last_modified_end=last_modified_end
113 logging.debug("\n".join(file_list))
114 except NoFilesFound as err:
115 logging.error(f"No parquets found.\n{err}")
116 except EmptyDataFrame as err:
117 logging.error(f"No data.\n{err}")
122 def _create_dataframe_from_parquet(
123 path, partition_filter=None,
126 validate_schema=False,
127 last_modified_begin=None,
128 last_modified_end=None,
131 """Read parquet stored in S3 compatible storage and returns Pandas
134 :param path: S3 prefix (accepts Unix shell-style wildcards)
135 (e.g. s3://bucket/prefix) or list of S3 objects paths
136 (e.g. [s3://bucket/key0, s3://bucket/key1]).
137 :param partition_filter: Callback Function filters to apply on PARTITION
138 columns (PUSH-DOWN filter). This function MUST receive a single
139 argument (Dict[str, str]) where keys are partitions names and values
140 are partitions values. Partitions values will be always strings
141 extracted from S3. This function MUST return a bool, True to read
142 the partition or False to ignore it. Ignored if dataset=False.
143 :param columns: Names of columns to read from the file(s).
144 :param categories: List of columns names that should be returned as
146 :param validate_schema: Check that individual file schemas are all the
147 same / compatible. Schemas within a folder prefix should all be the
148 same. Disable if you have schemas that are different and want to
150 :param last_modified_begin: Filter the s3 files by the Last modified
151 date of the object. The filter is applied only after list all s3
153 :param last_modified_end: Filter the s3 files by the Last modified date
154 of the object. The filter is applied only after list all s3 files.
155 :param days: Number of days to filter.
156 :type path: Union[str, List[str]]
157 :type partition_filter: Callable[[Dict[str, str]], bool], optional
158 :type columns: List[str], optional
159 :type categories: List[str], optional
160 :type validate_schema: bool, optional
161 :type last_modified_begin: datetime, optional
162 :type last_modified_end: datetime, optional
163 :type days: integer, optional
164 :returns: Pandas DataFrame or None if DataFrame cannot be fetched.
170 last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
172 df = wr.s3.read_parquet(
174 path_suffix="parquet",
176 validate_schema=validate_schema,
180 categories=categories,
181 partition_filter=partition_filter,
182 last_modified_begin=last_modified_begin,
183 last_modified_end=last_modified_end
185 df.info(verbose=True, memory_usage="deep")
187 f"\nCreation of dataframe {path} took: {time() - start}\n"
189 except NoFilesFound as err:
191 f"No parquets found in specified time period.\n"
192 f"Nr of days: {days}\n"
193 f"last_modified_begin: {last_modified_begin}\n"
196 except EmptyDataFrame as err:
198 f"No data in parquets in specified time period.\n"
199 f"Nr of days: {days}\n"
200 f"last_modified_begin: {last_modified_begin}\n"
206 def read_all_data(self, days: int=None) -> dict:
207 """Read all data necessary for all applications.
209 :param days: Number of days to filter. If None, all data will be
212 :returns: A dictionary where keys are names of parquets and values are
213 the pandas dataframes with fetched data.
214 :rtype: dict(str: pandas.DataFrame)
217 lst_trending = list()
218 lst_iterative = list()
219 lst_coverage = list()
221 for data_set in self._data_spec:
223 f"Reading data for {data_set['data_type']} "
224 f"{data_set['partition_name']} {data_set.get('release', '')}"
226 partition_filter = lambda part: True \
227 if part[data_set["partition"]] == data_set["partition_name"] \
229 if data_set["data_type"] in ("trending", "statistics"):
233 data = Data._create_dataframe_from_parquet(
234 path=data_set["path"],
235 partition_filter=partition_filter,
236 columns=data_set.get("columns", None),
237 categories=data_set.get("categories", None),
241 if data_set["data_type"] == "statistics":
242 self._data["statistics"] = data
243 elif data_set["data_type"] == "trending":
244 lst_trending.append(data)
245 elif data_set["data_type"] == "iterative":
246 data["release"] = data_set["release"]
247 data["release"] = data["release"].astype("category")
248 lst_iterative.append(data)
249 elif data_set["data_type"] == "coverage":
250 data["release"] = data_set["release"]
251 data["release"] = data["release"].astype("category")
252 lst_coverage.append(data)
254 raise NotImplementedError(
255 f"The data type {data_set['data_type']} is not implemented."
258 self._data["iterative"] = pd.concat(
263 self._data["trending"] = pd.concat(
268 self._data["coverage"] = pd.concat(
274 for key in self._data.keys():
276 f"\nData frame {key}:"
277 f"\n{self._data[key].memory_usage(deep=True)}\n"
279 self._data[key].info(verbose=True, memory_usage="deep")
282 resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
283 logging.info(f"Memory allocation: {mem_alloc:.0f}MB")