feat(uti): Add iterative data
[csit.git] / resources / tools / dash / app / pal / data / data.py
index 859c7d3..efe2a2d 100644 (file)
 """Prepare data for Plotly Dash."""
 
 import logging
+
+from yaml import load, FullLoader, YAMLError
+from datetime import datetime, timedelta
 from time import time
+from pytz import UTC
+from pandas import DataFrame
 
 import awswrangler as wr
 
-from yaml import load, FullLoader, YAMLError
 from awswrangler.exceptions import EmptyDataFrame, NoFilesFound
 
 
@@ -26,7 +30,7 @@ class Data:
     """
     """
 
-    def __init__(self, data_spec_file, debug=False):
+    def __init__(self, data_spec_file: str, debug: bool=False) -> None:
         """
         """
 
@@ -59,7 +63,7 @@ class Data:
     def data(self):
         return self._data
 
-    def _get_columns(self, parquet):
+    def _get_columns(self, parquet: str) -> list:
         try:
             return self._data_spec[parquet]["columns"]
         except KeyError as err:
@@ -69,7 +73,7 @@ class Data:
                 f"specified.\n{err}"
             )
 
-    def _get_path(self, parquet):
+    def _get_path(self, parquet: str) -> str:
         try:
             return self._data_spec[parquet]["path"]
         except KeyError as err:
@@ -82,7 +86,7 @@ class Data:
     def _create_dataframe_from_parquet(self,
         path, partition_filter=None, columns=None,
         validate_schema=False, last_modified_begin=None,
-        last_modified_end=None):
+        last_modified_end=None, days=None) -> DataFrame:
         """Read parquet stored in S3 compatible storage and returns Pandas
         Dataframe.
 
@@ -116,6 +120,8 @@ class Data:
         """
         df = None
         start = time()
+        if days:
+            last_modified_begin = datetime.now(tz=UTC) - timedelta(days=days)
         try:
             df = wr.s3.read_parquet(
                 path=path,
@@ -144,18 +150,35 @@ class Data:
         self._data = df
         return df
 
-    def read_stats(self):
+    def read_stats(self, days: int=None) -> tuple:
         """Read Suite Result Analysis data partition from parquet.
         """
-        lambda_f = lambda part: True if part["stats_type"] == "sra" else False
-
-        return self._create_dataframe_from_parquet(
-            path=self._get_path("statistics"),
-            partition_filter=lambda_f,
-            columns=None  # Get all columns.
+        l_stats = lambda part: True if part["stats_type"] == "sra" else False
+        l_mrr = lambda part: True if part["test_type"] == "mrr" else False
+        l_ndrpdr = lambda part: True if part["test_type"] == "ndrpdr" else False
+
+        return (
+            self._create_dataframe_from_parquet(
+                path=self._get_path("statistics"),
+                partition_filter=l_stats,
+                columns=self._get_columns("statistics"),
+                days=days
+            ),
+            self._create_dataframe_from_parquet(
+                path=self._get_path("statistics-trending"),
+                partition_filter=l_mrr,
+                columns=self._get_columns("statistics-trending"),
+                days=days
+            ),
+            self._create_dataframe_from_parquet(
+                path=self._get_path("statistics-trending"),
+                partition_filter=l_ndrpdr,
+                columns=self._get_columns("statistics-trending"),
+                days=days
+            )
         )
 
-    def read_trending_mrr(self):
+    def read_trending_mrr(self, days: int=None) -> DataFrame:
         """Read MRR data partition from parquet.
         """
         lambda_f = lambda part: True if part["test_type"] == "mrr" else False
@@ -163,10 +186,11 @@ class Data:
         return self._create_dataframe_from_parquet(
             path=self._get_path("trending-mrr"),
             partition_filter=lambda_f,
-            columns=self._get_columns("trending-mrr")
+            columns=self._get_columns("trending-mrr"),
+            days=days
         )
 
-    def read_trending_ndrpdr(self):
+    def read_trending_ndrpdr(self, days: int=None) -> DataFrame:
         """Read NDRPDR data partition from iterative parquet.
         """
         lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
@@ -174,27 +198,28 @@ class Data:
         return self._create_dataframe_from_parquet(
             path=self._get_path("trending-ndrpdr"),
             partition_filter=lambda_f,
-            columns=self._get_columns("trending-ndrpdr")
+            columns=self._get_columns("trending-ndrpdr"),
+            days=days
         )
 
-    def read_iterative_mrr(self):
+    def read_iterative_mrr(self, release: str) -> DataFrame:
         """Read MRR data partition from iterative parquet.
         """
         lambda_f = lambda part: True if part["test_type"] == "mrr" else False
 
         return self._create_dataframe_from_parquet(
-            path=self._get_path("iterative-mrr"),
+            path=self._get_path("iterative-mrr").format(release=release),
             partition_filter=lambda_f,
             columns=self._get_columns("iterative-mrr")
         )
 
-    def read_iterative_ndrpdr(self):
+    def read_iterative_ndrpdr(self, release: str) -> DataFrame:
         """Read NDRPDR data partition from parquet.
         """
         lambda_f = lambda part: True if part["test_type"] == "ndrpdr" else False
 
         return self._create_dataframe_from_parquet(
-            path=self._get_path("iterative-ndrpdr"),
+            path=self._get_path("iterative-ndrpdr").format(release=release),
             partition_filter=lambda_f,
             columns=self._get_columns("iterative-ndrpdr")
         )