-# Copyright (c) 2023 Cisco and/or its affiliates.
+# Copyright (c) 2024 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
"""Implementation of graphs for trending data.
"""
+import logging
import plotly.graph_objects as go
import pandas as pd
+from numpy import nan
+from datetime import datetime
+from pytz import UTC
+
from ..utils.constants import Constants as C
from ..utils.utils import get_color, get_hdrh_latencies
from ..utils.anomalies import classify_anomalies
:rtype: pandas.DataFrame
"""
- phy = itm["phy"].split("-")
- if len(phy) == 4:
- topo, arch, nic, drv = phy
+ phy = itm["phy"].rsplit("-", maxsplit=2)
+ if len(phy) == 3:
+ topo_arch, nic, drv = phy
if drv == "dpdk":
drv = ""
else:
test_type = "ndrpdr"
elif itm["testtype"] == "mrr":
test_type = "mrr"
+ elif itm["testtype"] == "soak":
+ test_type = "soak"
elif itm["area"] == "hoststack":
test_type = "hoststack"
df = data.loc[(
(data["test_type"] == test_type) &
(data["passed"] == True)
)]
- df = df[df.job.str.endswith(f"{topo}-{arch}")]
- core = str() if itm["dut"] == "trex" else f"{itm['core']}"
+ df = df[df.job.str.endswith(topo_arch)]
+ core = str() if itm["dut"] == "trex" else itm["core"]
ttype = "ndrpdr" if itm["testtype"] in ("ndr", "pdr") else itm["testtype"]
df = df[df.test_id.str.contains(
f"^.*[.|-]{nic}.*{itm['framesize']}-{core}-{drv}{itm['test']}-{ttype}$",
data: pd.DataFrame,
sel: dict,
layout: dict,
- normalize: bool
+ normalize: bool=False,
+ trials: bool=False
) -> tuple:
"""Generate the trending graph(s) - MRR, NDR, PDR and for PDR also Latences
(result_latency_forward_pdr_50_avg).
:param layout: Layout of plot.ly graph.
:param normalize: If True, the data is normalized to CPU frquency
Constants.NORM_FREQUENCY.
+ :param trials: If True, MRR trials are displayed in the trending graph.
:type data: pandas.DataFrame
:type sel: dict
:type layout: dict
:type normalize: bool
+ :type: trials: bool
:returns: Trending graph(s)
:rtype: tuple(plotly.graph_objects.Figure, plotly.graph_objects.Figure)
"""
name: str,
df: pd.DataFrame,
color: str,
- norm_factor: float
+ nf: float
) -> list:
"""Generate the trending traces for the trending graph.
:param name: The test name to be displayed as the graph title.
:param df: Data frame with test data.
:param color: The color of the trace (samples and trend line).
- :param norm_factor: The factor used for normalization of the results to
+ :param nf: The factor used for normalization of the results to
CPU frequency set to Constants.NORM_FREQUENCY.
:type ttype: str
:type name: str
:type df: pandas.DataFrame
:type color: str
- :type norm_factor: float
+ :type nf: float
:returns: Traces (samples, trending line, anomalies)
:rtype: list
"""
if df.empty:
return list(), list()
- x_axis = df["start_time"].tolist()
- if ttype == "latency":
- y_data = [(v / norm_factor) for v in df[C.VALUE[ttype]].tolist()]
- else:
- y_data = [(v * norm_factor) for v in df[C.VALUE[ttype]].tolist()]
- units = df[C.UNIT[ttype]].unique().tolist()
-
- anomalies, trend_avg, trend_stdev = classify_anomalies(
- {k: v for k, v in zip(x_axis, y_data)}
- )
-
hover = list()
customdata = list()
customdata_samples = list()
- for idx, (_, row) in enumerate(df.iterrows()):
- d_type = "trex" if row["dut_type"] == "none" else row["dut_type"]
- hover_itm = (
- f"date: {row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}<br>"
- f"<prop> [{row[C.UNIT[ttype]]}]: {y_data[idx]:,.0f}<br>"
- f"<stdev>"
- f"<additional-info>"
- f"{d_type}-ref: {row['dut_version']}<br>"
- f"csit-ref: {row['job']}/{row['build']}<br>"
- f"hosts: {', '.join(row['hosts'])}"
- )
- if ttype == "mrr":
- stdev = (
- f"stdev [{row['result_receive_rate_rate_unit']}]: "
- f"{row['result_receive_rate_rate_stdev']:,.0f}<br>"
+ name_lst = name.split("-")
+ for _, row in df.iterrows():
+ h_tput, h_band, h_lat, h_tput_trials, h_band_trials = \
+ str(), str(), str(), str(), str()
+ if ttype in ("mrr", "mrr-bandwidth"):
+ h_tput = (
+ f"tput avg [{row['result_receive_rate_rate_unit']}]: "
+ f"{row['result_receive_rate_rate_avg'] * nf:,.0f}<br>"
+ f"tput stdev [{row['result_receive_rate_rate_unit']}]: "
+ f"{row['result_receive_rate_rate_stdev'] * nf:,.0f}<br>"
)
- else:
- stdev = str()
- if ttype in ("hoststack-cps", "hoststack-rps"):
- add_info = (
- f"bandwidth [{row[C.UNIT['hoststack-bps']]}]: "
- f"{row[C.VALUE['hoststack-bps']]:,.0f}<br>"
- f"latency [{row[C.UNIT['hoststack-lat']]}]: "
- f"{row[C.VALUE['hoststack-lat']]:,.0f}<br>"
+ if pd.notna(row["result_receive_rate_bandwidth_avg"]):
+ h_band = (
+ f"bandwidth avg "
+ f"[{row['result_receive_rate_bandwidth_unit']}]: "
+ f"{row['result_receive_rate_bandwidth_avg'] * nf:,.0f}"
+ "<br>"
+ f"bandwidth stdev "
+ f"[{row['result_receive_rate_bandwidth_unit']}]: "
+ f"{row['result_receive_rate_bandwidth_stdev']* nf:,.0f}"
+ "<br>"
+ )
+ if trials:
+ h_tput_trials = (
+ f"tput trials "
+ f"[{row['result_receive_rate_rate_unit']}]: "
+ )
+ for itm in row["result_receive_rate_rate_values"]:
+ h_tput_trials += f"{itm * nf:,.0f}; "
+ h_tput_trials = h_tput_trials[:-2] + "<br>"
+ if pd.notna(row["result_receive_rate_bandwidth_avg"]):
+ h_band_trials = (
+ f"bandwidth trials "
+ f"[{row['result_receive_rate_bandwidth_unit']}]: "
+ )
+ for itm in row["result_receive_rate_bandwidth_values"]:
+ h_band_trials += f"{itm * nf:,.0f}; "
+ h_band_trials = h_band_trials[:-2] + "<br>"
+
+ elif ttype in ("ndr", "ndr-bandwidth"):
+ h_tput = (
+ f"tput [{row['result_ndr_lower_rate_unit']}]: "
+ f"{row['result_ndr_lower_rate_value'] * nf:,.0f}<br>"
+ )
+ if pd.notna(row["result_ndr_lower_bandwidth_value"]):
+ h_band = (
+ f"bandwidth [{row['result_ndr_lower_bandwidth_unit']}]:"
+ f" {row['result_ndr_lower_bandwidth_value'] * nf:,.0f}"
+ "<br>"
+ )
+ elif ttype in ("pdr", "pdr-bandwidth", "latency"):
+ h_tput = (
+ f"tput [{row['result_pdr_lower_rate_unit']}]: "
+ f"{row['result_pdr_lower_rate_value'] * nf:,.0f}<br>"
+ )
+ if pd.notna(row["result_pdr_lower_bandwidth_value"]):
+ h_band = (
+ f"bandwidth [{row['result_pdr_lower_bandwidth_unit']}]:"
+ f" {row['result_pdr_lower_bandwidth_value'] * nf:,.0f}"
+ "<br>"
+ )
+ if pd.notna(row["result_latency_forward_pdr_50_avg"]):
+ h_lat = (
+ f"latency "
+ f"[{row['result_latency_forward_pdr_50_unit']}]: "
+ f"{row['result_latency_forward_pdr_50_avg'] / nf:,.0f}"
+ "<br>"
+ )
+ elif ttype in ("hoststack-cps", "hoststack-rps",
+ "hoststack-cps-bandwidth",
+ "hoststack-rps-bandwidth", "hoststack-latency"):
+ h_tput = (
+ f"tput [{row['result_rate_unit']}]: "
+ f"{row['result_rate_value'] * nf:,.0f}<br>"
+ )
+ h_band = (
+ f"bandwidth [{row['result_bandwidth_unit']}]: "
+ f"{row['result_bandwidth_value'] * nf:,.0f}<br>"
+ )
+ h_lat = (
+ f"latency [{row['result_latency_unit']}]: "
+ f"{row['result_latency_value'] / nf:,.0f}<br>"
+ )
+ elif ttype in ("hoststack-bps", ):
+ h_band = (
+ f"bandwidth [{row['result_bandwidth_unit']}]: "
+ f"{row['result_bandwidth_value'] * nf:,.0f}<br>"
)
+ elif ttype in ("soak", "soak-bandwidth"):
+ h_tput = (
+ f"tput [{row['result_critical_rate_lower_rate_unit']}]: "
+ f"{row['result_critical_rate_lower_rate_value'] * nf:,.0f}"
+ "<br>"
+ )
+ if pd.notna(row["result_critical_rate_lower_bandwidth_value"]):
+ bv = row['result_critical_rate_lower_bandwidth_value']
+ h_band = (
+ "bandwidth "
+ f"[{row['result_critical_rate_lower_bandwidth_unit']}]:"
+ f" {bv * nf:,.0f}"
+ "<br>"
+ )
+ try:
+ hosts = f"<br>hosts: {', '.join(row['hosts'])}"
+ except (KeyError, TypeError):
+ hosts = str()
+
+ for drv in C.DRIVERS:
+ if drv in name_lst:
+ split_idx = name_lst.index(drv) + 1
+ break
else:
- add_info = str()
- hover_itm = hover_itm.replace(
- "<prop>", "latency" if ttype == "latency" else "average"
- ).replace("<stdev>", stdev).replace("<additional-info>", add_info)
+ split_idx = 5
+ hover_itm = (
+ f"dut: {name_lst[0]}<br>"
+ f"infra: {'-'.join(name_lst[1:split_idx])}<br>"
+ f"test: {'-'.join(name_lst[split_idx:])}<br>"
+ f"date: {row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}<br>"
+ f"{h_tput}{h_tput_trials}{h_band}{h_band_trials}{h_lat}"
+ f"{row['dut_type']}-ref: {row['dut_version']}<br>"
+ f"csit-ref: {row['job']}/{row['build']}"
+ f"{hosts}"
+ )
hover.append(hover_itm)
if ttype == "latency":
customdata_samples.append(get_hdrh_latencies(row, name))
)
customdata.append({"name": name})
+ x_axis = df["start_time"].tolist()
+ if "latency" in ttype:
+ y_data = [(v / nf) for v in df[C.VALUE[ttype]].tolist()]
+ else:
+ y_data = [(v * nf) for v in df[C.VALUE[ttype]].tolist()]
+ units = df[C.UNIT[ttype]].unique().tolist()
+
+ try:
+ anomalies, trend_avg, trend_stdev = classify_anomalies(
+ {k: v for k, v in zip(x_axis, y_data)}
+ )
+ except ValueError as err:
+ logging.error(err)
+ return list(), list()
+
hover_trend = list()
for avg, stdev, (_, row) in zip(trend_avg, trend_stdev, df.iterrows()):
- d_type = "trex" if row["dut_type"] == "none" else row["dut_type"]
+ try:
+ hosts = f"<br>hosts: {', '.join(row['hosts'])}"
+ except (KeyError, TypeError):
+ hosts = str()
hover_itm = (
+ f"dut: {name_lst[0]}<br>"
+ f"infra: {'-'.join(name_lst[1:5])}<br>"
+ f"test: {'-'.join(name_lst[5:])}<br>"
f"date: {row['start_time'].strftime('%Y-%m-%d %H:%M:%S')}<br>"
f"trend [{row[C.UNIT[ttype]]}]: {avg:,.0f}<br>"
f"stdev [{row[C.UNIT[ttype]]}]: {stdev:,.0f}<br>"
- f"{d_type}-ref: {row['dut_version']}<br>"
- f"csit-ref: {row['job']}/{row['build']}<br>"
- f"hosts: {', '.join(row['hosts'])}"
+ f"{row['dut_type']}-ref: {row['dut_version']}<br>"
+ f"csit-ref: {row['job']}/{row['build']}"
+ f"{hosts}"
)
if ttype == "latency":
hover_itm = hover_itm.replace("[pps]", "[us]")
marker={
"size": 5,
"color": color,
- "symbol": "circle",
+ "symbol": "circle"
},
text=hover,
- hoverinfo="text+name",
+ hoverinfo="text",
showlegend=True,
legendgroup=name,
customdata=customdata_samples
"color": color,
},
text=hover_trend,
- hoverinfo="text+name",
+ hoverinfo="text",
showlegend=False,
legendgroup=name,
customdata=customdata
anomaly_y.append(trend_avg[idx])
anomaly_color.append(C.ANOMALY_COLOR[anomaly])
hover_itm = (
+ f"dut: {name_lst[0]}<br>"
+ f"infra: {'-'.join(name_lst[1:5])}<br>"
+ f"test: {'-'.join(name_lst[5:])}<br>"
f"date: {x_axis[idx].strftime('%Y-%m-%d %H:%M:%S')}<br>"
f"trend [pps]: {trend_avg[idx]:,.0f}<br>"
f"classification: {anomaly}"
y=anomaly_y,
mode="markers",
text=hover,
- hoverinfo="text+name",
+ hoverinfo="text",
showlegend=False,
legendgroup=name,
name=name,
return traces, units
+ def _add_mrr_trials_traces(
+ ttype: str,
+ name: str,
+ df: pd.DataFrame,
+ color: str,
+ nf: float
+ ) -> list:
+ """Add the traces with mrr trials.
+
+ :param ttype: Test type (mrr, mrr-bandwidth).
+ :param name: The test name to be displayed in hover.
+ :param df: Data frame with test data.
+ :param color: The color of the trace.
+ :param nf: The factor used for normalization of the results to
+ CPU frequency set to Constants.NORM_FREQUENCY.
+ :type ttype: str
+ :type name: str
+ :type df: pandas.DataFrame
+ :type color: str
+ :type nf: float
+ :returns: list of Traces
+ :rtype: list
+ """
+ traces = list()
+ x_axis = df["start_time"].tolist()
+ y_data = df[C.VALUE[ttype].replace("avg", "values")].tolist()
+
+ for idx_trial in range(10):
+ y_axis = list()
+ for idx_run in range(len(x_axis)):
+ try:
+ y_axis.append(y_data[idx_run][idx_trial] * nf)
+ except (IndexError, TypeError, ValueError):
+ y_axis.append(nan)
+ traces.append(go.Scatter(
+ x=x_axis,
+ y=y_axis,
+ name=name,
+ mode="markers",
+ marker={
+ "size": 2,
+ "color": color,
+ "symbol": "circle"
+ },
+ showlegend=True,
+ legendgroup=name,
+ hoverinfo="skip"
+ ))
+ return traces
+
+
fig_tput = None
fig_lat = None
+ fig_band = None
+ start_times = list()
y_units = set()
for idx, itm in enumerate(sel):
df = select_trending_data(data, itm)
if df is None or df.empty:
continue
+ start_times.append(df["start_time"][0])
if normalize:
- phy = itm["phy"].split("-")
- topo_arch = f"{phy[0]}-{phy[1]}" if len(phy) == 4 else str()
- norm_factor = (C.NORM_FREQUENCY / C.FREQUENCY[topo_arch]) \
+ phy = itm["phy"].rsplit("-", maxsplit=2)
+ topo_arch = phy[0] if len(phy) == 3 else str()
+ norm_factor = (C.NORM_FREQUENCY / C.FREQUENCY.get(topo_arch, 1.0)) \
if topo_arch else 1.0
else:
norm_factor = 1.0
if traces:
if not fig_tput:
fig_tput = go.Figure()
+ if trials and "mrr" in ttype:
+ traces.extend(_add_mrr_trials_traces(
+ ttype,
+ itm["id"],
+ df,
+ get_color(idx),
+ norm_factor
+ ))
fig_tput.add_traces(traces)
- if itm["testtype"] == "pdr":
+ if ttype in C.TESTS_WITH_BANDWIDTH:
+ traces, _ = _generate_trending_traces(
+ f"{ttype}-bandwidth",
+ itm["id"],
+ df,
+ get_color(idx),
+ norm_factor
+ )
+ if traces:
+ if not fig_band:
+ fig_band = go.Figure()
+ if trials and "mrr" in ttype:
+ traces.extend(_add_mrr_trials_traces(
+ f"{ttype}-bandwidth",
+ itm["id"],
+ df,
+ get_color(idx),
+ norm_factor
+ ))
+ fig_band.add_traces(traces)
+
+ if ttype in C.TESTS_WITH_LATENCY:
traces, _ = _generate_trending_traces(
- "latency",
+ "latency" if ttype == "pdr" else "hoststack-latency",
itm["id"],
df,
get_color(idx),
y_units.update(units)
+ x_range = [min(start_times), datetime.now(tz=UTC).strftime("%Y-%m-%d")]
if fig_tput:
- fig_layout = layout.get("plot-trending-tput", dict())
- fig_layout["yaxis"]["title"] = \
+ layout_tput = layout.get("plot-trending-tput", dict())
+ layout_tput["yaxis"]["title"] = \
f"Throughput [{'|'.join(sorted(y_units))}]"
- fig_tput.update_layout(fig_layout)
+ layout_tput["xaxis"]["range"] = x_range
+ fig_tput.update_layout(layout_tput)
+ if fig_band:
+ layout_band = layout.get("plot-trending-bandwidth", dict())
+ layout_band["xaxis"]["range"] = x_range
+ fig_band.update_layout(layout_band)
if fig_lat:
- fig_lat.update_layout(layout.get("plot-trending-lat", dict()))
+ layout_lat = layout.get("plot-trending-lat", dict())
+ layout_lat["xaxis"]["range"] = x_range
+ fig_lat.update_layout(layout_lat)
- return fig_tput, fig_lat
+ return fig_tput, fig_band, fig_lat
def graph_tm_trending(
anomaly_color.append(C.ANOMALY_COLOR[anomaly])
hover_itm = (
f"date: {x_axis[idx].strftime('%Y-%m-%d %H:%M:%S')}"
- f"<br>trend: {trend_avg[idx]:,.0f}"
+ f"<br>trend: {trend_avg[idx]:,.2f}"
f"<br>classification: {anomaly}"
)
hover.append(hover_itm)
graph.update_layout(graph_layout)
tm_trending_graphs.append((graph, all_tests, ))
- return tm_trending_graphs, all_metrics
+ return tm_trending_graphs, list(all_metrics)