X-Git-Url: https://gerrit.fd.io/r/gitweb?p=csit.git;a=blobdiff_plain;f=resources%2Ftools%2Fdash%2Fapp%2Fpal%2Fdata%2Fdata.py;h=efe2a2d1b6181b86534260abef44b1f734988e1a;hp=9ce09e7f8b6f21d165c3cabe1e910828647802d3;hb=3343fe81729eb4005319ca15b1e6881630d38c5b;hpb=47962ee624efeaec469473a5569b59bfd230babf diff --git a/resources/tools/dash/app/pal/data/data.py b/resources/tools/dash/app/pal/data/data.py index 9ce09e7f8b..efe2a2d1b6 100644 --- a/resources/tools/dash/app/pal/data/data.py +++ b/resources/tools/dash/app/pal/data/data.py @@ -14,11 +14,15 @@ """Prepare data for Plotly Dash.""" import logging + +from yaml import load, FullLoader, YAMLError +from datetime import datetime, timedelta from time import time +from pytz import UTC +from pandas import DataFrame import awswrangler as wr -from yaml import load, FullLoader, YAMLError from awswrangler.exceptions import EmptyDataFrame, NoFilesFound @@ -26,7 +30,7 @@ class Data: """ """ - def __init__(self, data_spec_file, debug=False): + def __init__(self, data_spec_file: str, debug: bool=False) -> None: """ """ @@ -59,7 +63,7 @@ class Data: def data(self): return self._data - def _get_columns(self, parquet): + def _get_columns(self, parquet: str) -> list: try: return self._data_spec[parquet]["columns"] except KeyError as err: @@ -69,7 +73,7 @@ class Data: f"specified.\n{err}" ) - def _get_path(self, parquet): + def _get_path(self, parquet: str) -> str: try: return self._data_spec[parquet]["path"] except KeyError as err: @@ -82,7 +86,7 @@ class Data: def _create_dataframe_from_parquet(self, path, partition_filter=None, columns=None, validate_schema=False, last_modified_begin=None, - last_modified_end=None): + last_modified_end=None, days=None) -> DataFrame: """Read parquet stored in S3 compatible storage and returns Pandas Dataframe. @@ -116,6 +120,8 @@ class Data: """ df = None start = time() + if days: + last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days) try: df = wr.s3.read_parquet( path=path, @@ -135,8 +141,6 @@ class Data: u"\n" f"Creation of dataframe {path} took: {time() - start}" u"\n" - f"{df}" - u"\n" ) except NoFilesFound as err: logging.error(f"No parquets found.\n{err}") @@ -146,18 +150,35 @@ class Data: self._data = df return df - def read_stats(self): + def read_stats(self, days: int=None) -> tuple: """Read Suite Result Analysis data partition from parquet. """ - lambda_f = lambda part: True if part["stats_type"] == "sra" else False - - return self._create_dataframe_from_parquet( - path=self._get_path("statistics"), - partition_filter=lambda_f, - columns=None # Get all columns. + l_stats = lambda part: True if part["stats_type"] == "sra" else False + l_mrr = lambda part: True if part["test_type"] == "mrr" else False + l_ndrpdr = lambda part: True if part["test_type"] == "ndrpdr" else False + + return ( + self._create_dataframe_from_parquet( + path=self._get_path("statistics"), + partition_filter=l_stats, + columns=self._get_columns("statistics"), + days=days + ), + self._create_dataframe_from_parquet( + path=self._get_path("statistics-trending"), + partition_filter=l_mrr, + columns=self._get_columns("statistics-trending"), + days=days + ), + self._create_dataframe_from_parquet( + path=self._get_path("statistics-trending"), + partition_filter=l_ndrpdr, + columns=self._get_columns("statistics-trending"), + days=days + ) ) - def read_trending_mrr(self): + def read_trending_mrr(self, days: int=None) -> DataFrame: """Read MRR data partition from parquet. """ lambda_f = lambda part: True if part["test_type"] == "mrr" else False @@ -165,10 +186,11 @@ class Data: return self._create_dataframe_from_parquet( path=self._get_path("trending-mrr"), partition_filter=lambda_f, - columns=self._get_columns("trending-mrr") + columns=self._get_columns("trending-mrr"), + days=days ) - def read_trending_ndrpdr(self): + def read_trending_ndrpdr(self, days: int=None) -> DataFrame: """Read NDRPDR data partition from iterative parquet. """ lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False @@ -176,27 +198,28 @@ class Data: return self._create_dataframe_from_parquet( path=self._get_path("trending-ndrpdr"), partition_filter=lambda_f, - columns=self._get_columns("trending-ndrpdr") + columns=self._get_columns("trending-ndrpdr"), + days=days ) - def read_iterative_mrr(self): + def read_iterative_mrr(self, release: str) -> DataFrame: """Read MRR data partition from iterative parquet. """ lambda_f = lambda part: True if part["test_type"] == "mrr" else False return self._create_dataframe_from_parquet( - path=self._get_path("iterative-mrr"), + path=self._get_path("iterative-mrr").format(release=release), partition_filter=lambda_f, columns=self._get_columns("iterative-mrr") ) - def read_iterative_ndrpdr(self): + def read_iterative_ndrpdr(self, release: str) -> DataFrame: """Read NDRPDR data partition from parquet. """ lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False return self._create_dataframe_from_parquet( - path=self._get_path("iterative-ndrpdr"), + path=self._get_path("iterative-ndrpdr").format(release=release), partition_filter=lambda_f, columns=self._get_columns("iterative-ndrpdr") )