X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=resources%2Ftools%2Fdash%2Fapp%2Fpal%2Fdata%2Fdata.py;h=77fd113a9c705112e53cb2b73b15ef50b4577418;hb=22a45eda880bee367ff8937d8e764cd41905a0cd;hp=9ce09e7f8b6f21d165c3cabe1e910828647802d3;hpb=47962ee624efeaec469473a5569b59bfd230babf;p=csit.git diff --git a/resources/tools/dash/app/pal/data/data.py b/resources/tools/dash/app/pal/data/data.py index 9ce09e7f8b..77fd113a9c 100644 --- a/resources/tools/dash/app/pal/data/data.py +++ b/resources/tools/dash/app/pal/data/data.py @@ -11,23 +11,35 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Prepare data for Plotly Dash.""" +"""Prepare data for Plotly Dash applications. +""" import logging -from time import time - import awswrangler as wr from yaml import load, FullLoader, YAMLError +from datetime import datetime, timedelta +from time import time +from pytz import UTC +from pandas import DataFrame from awswrangler.exceptions import EmptyDataFrame, NoFilesFound class Data: - """ + """Gets the data from parquets and stores it for further use by dash + applications. """ - def __init__(self, data_spec_file, debug=False): - """ + def __init__(self, data_spec_file: str, debug: bool=False) -> None: + """Initialize the Data object. + + :param data_spec_file: Path to file specifying the data to be read from + parquets. + :param debug: If True, the debuf information is printed to stdout. + :type data_spec_file: str + :type debug: bool + :raises RuntimeError: if it is not possible to open data_spec_file or it + is not a valid yaml file. """ # Inputs: @@ -59,7 +71,18 @@ class Data: def data(self): return self._data - def _get_columns(self, parquet): + def _get_columns(self, parquet: str) -> list: + """Get the list of columns from the data specification file to be read + from parquets. + + :param parquet: The parquet's name. + :type parquet: str + :raises RuntimeError: if the parquet is not defined in the data + specification file or it does not have any columns specified. + :returns: List of columns. + :rtype: list + """ + try: return self._data_spec[parquet]["columns"] except KeyError as err: @@ -69,7 +92,18 @@ class Data: f"specified.\n{err}" ) - def _get_path(self, parquet): + def _get_path(self, parquet: str) -> str: + """Get the path from the data specification file to be read from + parquets. + + :param parquet: The parquet's name. + :type parquet: str + :raises RuntimeError: if the parquet is not defined in the data + specification file or it does not have the path specified. + :returns: Path. + :rtype: str + """ + try: return self._data_spec[parquet]["path"] except KeyError as err: @@ -79,10 +113,55 @@ class Data: f"specified.\n{err}" ) + def _get_list_of_files(self, + path, + last_modified_begin=None, + last_modified_end=None, + days=None) -> list: + """Get list of interested files stored in S3 compatible storage and + returns it. + + :param path: S3 prefix (accepts Unix shell-style wildcards) + (e.g. s3://bucket/prefix) or list of S3 objects paths + (e.g. [s3://bucket/key0, s3://bucket/key1]). + :param last_modified_begin: Filter the s3 files by the Last modified + date of the object. The filter is applied only after list all s3 + files. + :param last_modified_end: Filter the s3 files by the Last modified date + of the object. The filter is applied only after list all s3 files. + :param days: Number of days to filter. + :type path: Union[str, List[str]] + :type last_modified_begin: datetime, optional + :type last_modified_end: datetime, optional + :type days: integer, optional + :returns: List of file names. + :rtype: List + """ + if days: + last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days) + try: + file_list = wr.s3.list_objects( + path=path, + suffix="parquet", + last_modified_begin=last_modified_begin, + last_modified_end=last_modified_end + ) + if self._debug: + logging.info("\n".join(file_list)) + except NoFilesFound as err: + logging.error(f"No parquets found.\n{err}") + except EmptyDataFrame as err: + logging.error(f"No data.\n{err}") + + return file_list + def _create_dataframe_from_parquet(self, - path, partition_filter=None, columns=None, - validate_schema=False, last_modified_begin=None, - last_modified_end=None): + path, partition_filter=None, + columns=None, + validate_schema=False, + last_modified_begin=None, + last_modified_end=None, + days=None) -> DataFrame: """Read parquet stored in S3 compatible storage and returns Pandas Dataframe. @@ -105,17 +184,21 @@ class Data: files. :param last_modified_end: Filter the s3 files by the Last modified date of the object. The filter is applied only after list all s3 files. + :param days: Number of days to filter. :type path: Union[str, List[str]] :type partition_filter: Callable[[Dict[str, str]], bool], optional :type columns: List[str], optional :type validate_schema: bool, optional :type last_modified_begin: datetime, optional :type last_modified_end: datetime, optional + :type days: integer, optional :returns: Pandas DataFrame or None if DataFrame cannot be fetched. :rtype: DataFrame """ df = None start = time() + if days: + last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days) try: df = wr.s3.read_parquet( path=path, @@ -135,8 +218,6 @@ class Data: u"\n" f"Creation of dataframe {path} took: {time() - start}" u"\n" - f"{df}" - u"\n" ) except NoFilesFound as err: logging.error(f"No parquets found.\n{err}") @@ -146,57 +227,125 @@ class Data: self._data = df return df - def read_stats(self): - """Read Suite Result Analysis data partition from parquet. + def check_datasets(self, days: int=None): + """Read structure from parquet. + + :param days: Number of days back to the past for which the data will be + read. + :type days: int """ - lambda_f = lambda part: True if part["stats_type"] == "sra" else False + self._get_list_of_files(path=self._get_path("trending"), days=days) + self._get_list_of_files(path=self._get_path("statistics"), days=days) - return self._create_dataframe_from_parquet( - path=self._get_path("statistics"), - partition_filter=lambda_f, - columns=None # Get all columns. + def read_stats(self, days: int=None) -> tuple: + """Read statistics from parquet. + + It reads from: + - Suite Result Analysis (SRA) partition, + - NDRPDR trending partition, + - MRR trending partition. + + :param days: Number of days back to the past for which the data will be + read. + :type days: int + :returns: tuple of pandas DataFrame-s with data read from specified + parquets. + :rtype: tuple of pandas DataFrame-s + """ + + l_stats = lambda part: True if part["stats_type"] == "sra" else False + l_mrr = lambda part: True if part["test_type"] == "mrr" else False + l_ndrpdr = lambda part: True if part["test_type"] == "ndrpdr" else False + + return ( + self._create_dataframe_from_parquet( + path=self._get_path("statistics"), + partition_filter=l_stats, + columns=self._get_columns("statistics"), + days=days + ), + self._create_dataframe_from_parquet( + path=self._get_path("statistics-trending-mrr"), + partition_filter=l_mrr, + columns=self._get_columns("statistics-trending-mrr"), + days=days + ), + self._create_dataframe_from_parquet( + path=self._get_path("statistics-trending-ndrpdr"), + partition_filter=l_ndrpdr, + columns=self._get_columns("statistics-trending-ndrpdr"), + days=days + ) ) - def read_trending_mrr(self): + def read_trending_mrr(self, days: int=None) -> DataFrame: """Read MRR data partition from parquet. + + :param days: Number of days back to the past for which the data will be + read. + :type days: int + :returns: Pandas DataFrame with read data. + :rtype: DataFrame """ + lambda_f = lambda part: True if part["test_type"] == "mrr" else False return self._create_dataframe_from_parquet( path=self._get_path("trending-mrr"), partition_filter=lambda_f, - columns=self._get_columns("trending-mrr") + columns=self._get_columns("trending-mrr"), + days=days ) - def read_trending_ndrpdr(self): + def read_trending_ndrpdr(self, days: int=None) -> DataFrame: """Read NDRPDR data partition from iterative parquet. + + :param days: Number of days back to the past for which the data will be + read. + :type days: int + :returns: Pandas DataFrame with read data. + :rtype: DataFrame """ + lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False return self._create_dataframe_from_parquet( path=self._get_path("trending-ndrpdr"), partition_filter=lambda_f, - columns=self._get_columns("trending-ndrpdr") + columns=self._get_columns("trending-ndrpdr"), + days=days ) - def read_iterative_mrr(self): + def read_iterative_mrr(self, release: str) -> DataFrame: """Read MRR data partition from iterative parquet. + + :param release: The CSIT release from which the data will be read. + :type release: str + :returns: Pandas DataFrame with read data. + :rtype: DataFrame """ + lambda_f = lambda part: True if part["test_type"] == "mrr" else False return self._create_dataframe_from_parquet( - path=self._get_path("iterative-mrr"), + path=self._get_path("iterative-mrr").format(release=release), partition_filter=lambda_f, columns=self._get_columns("iterative-mrr") ) - def read_iterative_ndrpdr(self): + def read_iterative_ndrpdr(self, release: str) -> DataFrame: """Read NDRPDR data partition from parquet. + + :param release: The CSIT release from which the data will be read. + :type release: str + :returns: Pandas DataFrame with read data. + :rtype: DataFrame """ + lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False return self._create_dataframe_from_parquet( - path=self._get_path("iterative-ndrpdr"), + path=self._get_path("iterative-ndrpdr").format(release=release), partition_filter=lambda_f, columns=self._get_columns("iterative-ndrpdr") )