+ def _validate_columns(self, data_type: str) -> str:
+ """Check if all columns are present in the dataframe.
+
+ :param data_type: The data type defined in data.yaml
+ :type data_type: str
+ :returns: Error message if validation fails, otherwise empty string.
+ :rtype: str
+ """
+ defined_columns = set()
+ for data_set in self._data_spec:
+ if data_set.get("data_type", str()) == data_type:
+ defined_columns.update(data_set.get("columns", set()))
+
+ if not defined_columns:
+ return "No columns defined in the data set(s)."
+
+ if self.data[data_type].empty:
+ return "No data."
+
+ ret_msg = str()
+ for col in defined_columns:
+ if col not in self.data[data_type].columns:
+ if not ret_msg:
+ ret_msg = "Missing columns: "
+ else:
+ ret_msg += ", "
+ ret_msg += f"{col}"
+ return ret_msg
+
+ @staticmethod
+ def _write_parquet_schema(
+ path,
+ partition_filter=None,
+ columns=None,
+ validate_schema=False,
+ last_modified_begin=None,
+ last_modified_end=None,
+ days=None
+ ) -> None:
+ """Auxiliary function to write parquet schemas. Use it instead of
+ "_create_dataframe_from_parquet" in "read_all_data".
+
+ :param path: S3 prefix (accepts Unix shell-style wildcards)
+ (e.g. s3://bucket/prefix) or list of S3 objects paths
+ (e.g. [s3://bucket/key0, s3://bucket/key1]).
+ :param partition_filter: Callback Function filters to apply on PARTITION
+ columns (PUSH-DOWN filter). This function MUST receive a single
+ argument (Dict[str, str]) where keys are partitions names and values
+ are partitions values. Partitions values will be always strings
+ extracted from S3. This function MUST return a bool, True to read
+ the partition or False to ignore it. Ignored if dataset=False.
+ :param columns: Names of columns to read from the file(s).
+ :param validate_schema: Check that individual file schemas are all the
+ same / compatible. Schemas within a folder prefix should all be the
+ same. Disable if you have schemas that are different and want to
+ disable this check.
+ :param last_modified_begin: Filter the s3 files by the Last modified
+ date of the object. The filter is applied only after list all s3
+ files.
+ :param last_modified_end: Filter the s3 files by the Last modified date
+ of the object. The filter is applied only after list all s3 files.
+ :param days: Number of days to filter.
+ :type path: Union[str, List[str]]
+ :type partition_filter: Callable[[Dict[str, str]], bool], optional
+ :type columns: List[str], optional
+ :type validate_schema: bool, optional
+ :type last_modified_begin: datetime, optional
+ :type last_modified_end: datetime, optional
+ :type days: integer, optional
+ """
+ if days:
+ last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
+
+ df = wr.s3.read_parquet(
+ path=path,
+ path_suffix="parquet",
+ ignore_empty=True,
+ validate_schema=validate_schema,
+ use_threads=True,
+ dataset=True,
+ columns=columns,
+ partition_filter=partition_filter,
+ last_modified_begin=last_modified_begin,
+ last_modified_end=last_modified_end,
+ chunked=1
+ )
+
+ for itm in df:
+ try:
+ # Specify the condition or remove it:
+ if pd.api.types.is_string_dtype(itm["result_rate_unit"]):
+ print(pa.Schema.from_pandas(itm))
+ pa.parquet.write_metadata(
+ pa.Schema.from_pandas(itm),
+ f"{C.PATH_TO_SCHEMAS}_tmp_schema"
+ )
+ print(itm)
+ break
+ except KeyError:
+ pass
+
+ @staticmethod
+ def _create_dataframe_from_parquet(
+ path,
+ partition_filter=None,
+ columns=None,
+ validate_schema=False,
+ last_modified_begin=None,
+ last_modified_end=None,
+ days=None,
+ schema=None
+ ) -> pd.DataFrame: