- if not sel:
- return no_update, no_update
-
- def _generate_trace(ttype: str, name: str, df: pd.DataFrame,
- start: datetime, end: datetime):
-
- value = {
- "mrr": "result_receive_rate_rate_avg",
- "ndr": "result_ndr_lower_rate_value",
- "pdr": "result_pdr_lower_rate_value"
- }
- unit = {
- "mrr": "result_receive_rate_rate_unit",
- "ndr": "result_ndr_lower_rate_unit",
- "pdr": "result_pdr_lower_rate_unit"
- }
-
- x_axis = [
- d for d in df["start_time"] if d >= start and d <= end
- ]
- hover_txt = list()
- for _, row in df.iterrows():
- hover_itm = (
- f"date: "
- f"{row['start_time'].strftime('%d-%m-%Y %H:%M:%S')}<br>"
- f"average [{row[unit[ttype]]}]: "
- f"{row[value[ttype]]}<br>"
- f"{row['dut_type']}-ref: {row['dut_version']}<br>"
- f"csit-ref: {row['job']}/{row['build']}"
+ latencies = {"name": name}
+ for key in _LAT_HDRH:
+ try:
+ latencies[key] = row[key]
+ except KeyError:
+ return None
+
+ return latencies
+
+
+def _classify_anomalies(data):
+ """Process the data and return anomalies and trending values.
+
+ Gather data into groups with average as trend value.
+ Decorate values within groups to be normal,
+ the first value of changed average as a regression, or a progression.
+
+ :param data: Full data set with unavailable samples replaced by nan.
+ :type data: OrderedDict
+ :returns: Classification and trend values
+ :rtype: 3-tuple, list of strings, list of floats and list of floats
+ """
+ # NaN means something went wrong.
+ # Use 0.0 to cause that being reported as a severe regression.
+ bare_data = [0.0 if isnan(sample) else sample for sample in data.values()]
+ # TODO: Make BitCountingGroupList a subclass of list again?
+ group_list = classify(bare_data).group_list
+ group_list.reverse() # Just to use .pop() for FIFO.
+ classification = list()
+ avgs = list()
+ stdevs = list()
+ active_group = None
+ values_left = 0
+ avg = 0.0
+ stdv = 0.0
+ for sample in data.values():
+ if isnan(sample):
+ classification.append(u"outlier")
+ avgs.append(sample)
+ stdevs.append(sample)
+ continue
+ if values_left < 1 or active_group is None:
+ values_left = 0
+ while values_left < 1: # Ignore empty groups (should not happen).
+ active_group = group_list.pop()
+ values_left = len(active_group.run_list)
+ avg = active_group.stats.avg
+ stdv = active_group.stats.stdev
+ classification.append(active_group.comment)
+ avgs.append(avg)
+ stdevs.append(stdv)
+ values_left -= 1
+ continue
+ classification.append(u"normal")
+ avgs.append(avg)
+ stdevs.append(stdv)
+ values_left -= 1
+ return classification, avgs, stdevs
+
+
+def select_trending_data(data: pd.DataFrame, itm:dict) -> pd.DataFrame:
+ """
+ """
+
+ phy = itm["phy"].split("-")
+ if len(phy) == 4:
+ topo, arch, nic, drv = phy
+ if drv == "dpdk":
+ drv = ""
+ else:
+ drv += "-"
+ drv = drv.replace("_", "-")
+ else:
+ return None
+
+ core = str() if itm["dut"] == "trex" else f"{itm['core']}"
+ ttype = "ndrpdr" if itm["testtype"] in ("ndr", "pdr") else itm["testtype"]
+ dut = "none" if itm["dut"] == "trex" else itm["dut"].upper()
+
+ df = data.loc[(
+ (data["dut_type"] == dut) &
+ (data["test_type"] == ttype) &
+ (data["passed"] == True)
+ )]
+ df = df[df.job.str.endswith(f"{topo}-{arch}")]
+ df = df[df.test_id.str.contains(
+ f"^.*[.|-]{nic}.*{itm['framesize']}-{core}-{drv}{itm['test']}-{ttype}$",
+ regex=True
+ )].sort_values(by="start_time", ignore_index=True)
+
+ return df
+
+
+def _generate_trending_traces(ttype: str, name: str, df: pd.DataFrame,
+ start: datetime, end: datetime, color: str) -> list:
+ """
+ """
+
+ df = df.dropna(subset=[_VALUE[ttype], ])
+ if df.empty:
+ return list()
+ df = df.loc[((df["start_time"] >= start) & (df["start_time"] <= end))]
+ if df.empty:
+ return list()
+
+ x_axis = df["start_time"].tolist()
+
+ anomalies, trend_avg, trend_stdev = _classify_anomalies(
+ {k: v for k, v in zip(x_axis, df[_VALUE[ttype]])}
+ )
+
+ hover = list()
+ customdata = list()
+ for _, row in df.iterrows():
+ d_type = "trex" if row["dut_type"] == "none" else row["dut_type"]
+ hover_itm = (
+ f"date: {row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}<br>"
+ f"<prop> [{row[_UNIT[ttype]]}]: {row[_VALUE[ttype]]:,.0f}<br>"
+ f"<stdev>"
+ f"{d_type}-ref: {row['dut_version']}<br>"
+ f"csit-ref: {row['job']}/{row['build']}<br>"
+ f"hosts: {', '.join(row['hosts'])}"
+ )
+ if ttype == "mrr":
+ stdev = (
+ f"stdev [{row['result_receive_rate_rate_unit']}]: "
+ f"{row['result_receive_rate_rate_stdev']:,.0f}<br>"