1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Generation of Continuous Performance Trending and Analysis.
20 from collections import OrderedDict
21 from datetime import datetime
22 from copy import deepcopy
23 from os import listdir
26 import plotly.offline as ploff
27 import plotly.graph_objs as plgo
28 import plotly.exceptions as plerr
30 from pal_utils import archive_input_data, execute_command, classify_anomalies
33 # Command to build the html format of the report
34 HTML_BUILDER = u'sphinx-build -v -c sphinx_conf/trending -a ' \
37 u'-D version="{date}" ' \
41 # .css file for the html format of the report
42 THEME_OVERRIDES = u"""/* override table width restrictions */
44 max-width: 1200px !important;
46 .rst-content blockquote {
52 display: inline-block;
60 .wy-menu-vertical li.current a {
62 border-right: solid 1px #c9c9c9;
65 .wy-menu-vertical li.toctree-l2.current > a {
69 .wy-menu-vertical li.toctree-l2.current li.toctree-l3 > a {
74 .wy-menu-vertical li.toctree-l3.current li.toctree-l4 > a {
79 .wy-menu-vertical li.on a, .wy-menu-vertical li.current > a {
86 border-top-width: medium;
87 border-bottom-width: medium;
88 border-top-style: none;
89 border-bottom-style: none;
90 border-top-color: currentcolor;
91 border-bottom-color: currentcolor;
92 padding-left: 2em -4px;
123 def generate_cpta(spec, data):
124 """Generate all formats and versions of the Continuous Performance Trending
127 :param spec: Specification read from the specification file.
128 :param data: Full data set.
129 :type spec: Specification
130 :type data: InputData
133 logging.info(u"Generating the Continuous Performance Trending and Analysis "
136 ret_code = _generate_all_charts(spec, data)
138 cmd = HTML_BUILDER.format(
139 date=datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),
140 working_dir=spec.environment[u'paths'][u'DIR[WORKING,SRC]'],
141 build_dir=spec.environment[u'paths'][u'DIR[BUILD,HTML]'])
144 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE]'], u'w') as \
146 css_file.write(THEME_OVERRIDES)
148 with open(spec.environment[u'paths'][u'DIR[CSS_PATCH_FILE2]'], u'w') as \
150 css_file.write(THEME_OVERRIDES)
152 if spec.environment.get(u"archive-inputs", False):
153 archive_input_data(spec)
155 logging.info(u"Done.")
160 def _generate_trending_traces(in_data, job_name, build_info,
161 name=u"", color=u"", incl_tests=u"mrr"):
162 """Generate the trending traces:
164 - outliers, regress, progress
165 - average of normal samples (trending line)
167 :param in_data: Full data set.
168 :param job_name: The name of job which generated the data.
169 :param build_info: Information about the builds.
170 :param name: Name of the plot
171 :param color: Name of the color for the plot.
172 :param incl_tests: Included tests, accepted values: mrr, ndr, pdr
173 :type in_data: OrderedDict
175 :type build_info: dict
178 :type incl_tests: str
179 :returns: Generated traces (list) and the evaluated result.
180 :rtype: tuple(traces, result)
183 if incl_tests not in (u"mrr", u"ndr", u"pdr", u"pdr-lat"):
186 data_x = list(in_data.keys())
189 data_y_stdev = list()
190 if incl_tests == u"pdr-lat":
191 for item in in_data.values():
192 data_y_pps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
193 data_y_stdev.append(float(u"nan"))
194 data_y_mpps.append(float(item.get(u"lat_1", u"nan")) / 1e6)
197 for item in in_data.values():
198 data_y_pps.append(float(item[u"receive-rate"]))
199 data_y_stdev.append(float(item[u"receive-stdev"]) / 1e6)
200 data_y_mpps.append(float(item[u"receive-rate"]) / 1e6)
204 for index, key in enumerate(data_x):
206 date = build_info[job_name][str_key][0]
207 hover_str = (u"date: {date}<br>"
208 u"{property} [Mpps]: <val><br>"
210 u"{sut}-ref: {build}<br>"
211 u"csit-ref: {test}-{period}-build-{build_nr}<br>"
212 u"testbed: {testbed}")
213 if incl_tests == u"mrr":
214 hover_str = hover_str.replace(
215 u"<stdev>", f"stdev [Mpps]: {data_y_stdev[index]:.3f}<br>"
218 hover_str = hover_str.replace(u"<stdev>", u"")
219 if incl_tests == u"pdr-lat":
220 hover_str = hover_str.replace(u"<val>", u"{value:.1e}")
222 hover_str = hover_str.replace(u"<val>", u"{value:.3f}")
224 hover_str = hover_str.replace(u"[Mpps]", u"[Mcps]").\
225 replace(u"throughput", u"connection rate")
226 if u"vpp" in job_name:
227 hover_str = hover_str.format(
229 property=u"average" if incl_tests == u"mrr" else u"throughput",
230 value=data_y_mpps[index],
232 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
234 period=u"daily" if incl_tests == u"mrr" else u"weekly",
236 testbed=build_info[job_name][str_key][2])
237 elif u"dpdk" in job_name:
238 hover_str = hover_str.format(
240 property=u"average" if incl_tests == u"mrr" else u"throughput",
241 value=data_y_mpps[index],
243 build=build_info[job_name][str_key][1].rsplit(u'~', 1)[0],
247 testbed=build_info[job_name][str_key][2])
248 elif u"trex" in job_name:
249 hover_str = hover_str.format(
251 property=u"average" if incl_tests == u"mrr" else u"throughput",
252 value=data_y_mpps[index],
256 period=u"daily" if incl_tests == u"mrr" else u"weekly",
258 testbed=build_info[job_name][str_key][2])
259 if incl_tests == u"pdr-lat":
260 hover_str = hover_str.replace(
261 u"throughput [Mpps]", u"latency [s]"
263 hover_text.append(hover_str)
264 xaxis.append(datetime(int(date[0:4]), int(date[4:6]), int(date[6:8]),
265 int(date[9:11]), int(date[12:])))
267 data_pd = OrderedDict()
268 for key, value in zip(xaxis, data_y_pps):
272 anomaly_classification, avgs_pps, stdevs_pps = \
273 classify_anomalies(data_pd)
274 except ValueError as err:
275 logging.info(f"{err} Skipping")
277 avgs_mpps = [avg_pps / multi for avg_pps in avgs_pps]
278 stdevs_mpps = [stdev_pps / multi for stdev_pps in stdevs_pps]
280 anomalies = OrderedDict()
281 anomalies_colors = list()
282 anomalies_avgs = list()
288 if anomaly_classification:
289 for index, (key, value) in enumerate(data_pd.items()):
290 if anomaly_classification[index] in (u"regression", u"progression"):
291 anomalies[key] = value / multi
292 anomalies_colors.append(
293 anomaly_color[anomaly_classification[index]])
294 anomalies_avgs.append(avgs_mpps[index])
295 anomalies_colors.extend([0.0, 0.5, 1.0])
299 trace_samples = plgo.Scatter(
312 u"symbol": u"circle",
315 hoverinfo=u"text+name"
317 traces = [trace_samples, ]
319 trend_hover_text = list()
320 for idx in range(len(data_x)):
321 if incl_tests == u"pdr-lat":
323 f"trend [s]: {avgs_mpps[idx]:.1e}<br>"
327 f"trend [Mpps]: {avgs_mpps[idx]:.3f}<br>"
328 f"stdev [Mpps]: {stdevs_mpps[idx]:.3f}"
330 trend_hover_text.append(trend_hover_str)
332 trace_trend = plgo.Scatter(
344 text=trend_hover_text,
345 hoverinfo=u"text+name"
347 traces.append(trace_trend)
349 if incl_tests == u"pdr-lat":
358 ticktext = [u"Progression", u"Normal", u"Regression"]
368 ticktext = [u"Regression", u"Normal", u"Progression"]
369 trace_anomalies = plgo.Scatter(
370 x=list(anomalies.keys()),
376 name=f"{name}-anomalies",
379 u"symbol": u"circle-open",
380 u"color": anomalies_colors,
381 u"colorscale": colorscale,
389 u"title": u"Circles Marking Data Classification",
390 u"titleside": u"right",
394 u"tickmode": u"array",
395 u"tickvals": [0.167, 0.500, 0.833],
396 u"ticktext": ticktext,
404 traces.append(trace_anomalies)
406 if anomaly_classification:
407 return traces, anomaly_classification[-1]
412 def _generate_all_charts(spec, input_data):
413 """Generate all charts specified in the specification file.
415 :param spec: Specification.
416 :param input_data: Full data set.
417 :type spec: Specification
418 :type input_data: InputData
421 def _generate_chart(graph):
422 """Generates the chart.
424 :param graph: The graph to be generated
426 :returns: Dictionary with the job name, csv table with results and
427 list of tests classification results.
431 logging.info(f" Generating the chart {graph.get(u'title', u'')} ...")
433 job_name = list(graph[u"data"].keys())[0]
437 f" Creating the data set for the {graph.get(u'type', u'')} "
438 f"{graph.get(u'title', u'')}."
441 data = input_data.filter_tests_by_name(
443 params=[u"type", u"result", u"throughput", u"latency", u"tags"],
444 continue_on_error=True
447 if data is None or data.empty:
448 logging.error(u"No data.")
453 for ttype in graph.get(u"test-type", (u"mrr", )):
454 for core in graph.get(u"core", tuple()):
456 csv_tbl_lat_1 = list()
457 csv_tbl_lat_2 = list()
461 for item in graph.get(u"include", tuple()):
462 reg_ex = re.compile(str(item.format(core=core)).lower())
463 for job, job_data in data.items():
466 for index, bld in job_data.items():
467 for test_id, test in bld.items():
468 if not re.match(reg_ex, str(test_id).lower()):
470 if chart_data.get(test_id, None) is None:
471 chart_data[test_id] = OrderedDict()
476 rate = test[u"result"][u"receive-rate"]
478 test[u"result"][u"receive-stdev"]
479 elif ttype == u"ndr":
481 test["throughput"][u"NDR"][u"LOWER"]
482 stdev = float(u"nan")
483 elif ttype == u"pdr":
485 test["throughput"][u"PDR"][u"LOWER"]
486 stdev = float(u"nan")
487 lat_1 = test[u"latency"][u"PDR50"]\
488 [u"direction1"][u"avg"]
489 lat_2 = test[u"latency"][u"PDR50"]\
490 [u"direction2"][u"avg"]
493 chart_data[test_id][int(index)] = {
494 u"receive-rate": rate,
495 u"receive-stdev": stdev
498 chart_data[test_id][int(index)].update(
504 chart_tags[test_id] = \
505 test.get(u"tags", None)
506 except (KeyError, TypeError):
509 # Add items to the csv table:
510 for tst_name, tst_data in chart_data.items():
512 tst_lst_lat_1 = list()
513 tst_lst_lat_2 = list()
514 for bld in builds_dict[job_name]:
515 itm = tst_data.get(int(bld), dict())
516 # CSIT-1180: Itm will be list, compute stats.
518 tst_lst.append(str(itm.get(u"receive-rate", u"")))
520 tst_lst_lat_1.append(
521 str(itm.get(u"lat_1", u""))
523 tst_lst_lat_2.append(
524 str(itm.get(u"lat_2", u""))
526 except AttributeError:
529 tst_lst_lat_1.append(u"")
530 tst_lst_lat_2.append(u"")
531 csv_tbl.append(f"{tst_name}," + u",".join(tst_lst) + u'\n')
532 csv_tbl_lat_1.append(
533 f"{tst_name}," + u",".join(tst_lst_lat_1) + u"\n"
535 csv_tbl_lat_2.append(
536 f"{tst_name}," + u",".join(tst_lst_lat_2) + u"\n"
543 groups = graph.get(u"groups", None)
550 for tst_name, test_data in chart_data.items():
553 f"No data for the test {tst_name}"
556 if tag not in chart_tags[tst_name]:
559 trace, rslt = _generate_trending_traces(
562 build_info=build_info,
563 name=u'-'.join(tst_name.split(u'.')[-1].
569 logging.error(f"Out of colors: index: "
570 f"{index}, test: {tst_name}")
575 [True for _ in range(len(trace))]
580 visibility.append(visible)
582 for tst_name, test_data in chart_data.items():
584 logging.warning(f"No data for the test {tst_name}")
587 trace, rslt = _generate_trending_traces(
590 build_info=build_info,
592 tst_name.split(u'.')[-1].split(u'-')[2:-1]),
597 trace_lat, _ = _generate_trending_traces(
600 build_info=build_info,
602 tst_name.split(u'.')[-1].split(
605 incl_tests=u"pdr-lat"
607 traces_lat.extend(trace_lat)
610 f"Out of colors: index: "
611 f"{index}, test: {tst_name}"
620 # Generate the chart:
622 layout = deepcopy(graph[u"layout"])
623 except KeyError as err:
624 logging.error(u"Finished with error: No layout defined")
625 logging.error(repr(err))
629 for i in range(len(visibility)):
631 for vis_idx, _ in enumerate(visibility):
632 for _ in range(len(visibility[vis_idx])):
633 visible.append(i == vis_idx)
641 [True for _ in range(len(show[0]))]}, ]
643 for i in range(len(groups)):
645 label = graph[u"group-names"][i]
646 except (IndexError, KeyError):
647 label = f"Group {i + 1}"
651 args=[{u"visible": show[i]}, ]
654 layout[u"updatemenus"] = list([
668 f"{spec.cpta[u'output-file']}/"
669 f"{graph[u'output-file-name']}.html"
671 name_file = name_file.format(core=core, test_type=ttype)
673 logging.info(f" Writing the file {name_file}")
674 plpl = plgo.Figure(data=traces, layout=layout)
682 except plerr.PlotlyEmptyDataError:
683 logging.warning(u"No data for the plot. Skipped.")
687 layout = deepcopy(graph[u"layout"])
688 layout[u"yaxis"][u"title"] = u"Latency [s]"
689 layout[u"yaxis"][u"tickformat"] = u".3s"
690 except KeyError as err:
691 logging.error(u"Finished with error: No layout defined")
692 logging.error(repr(err))
695 f"{spec.cpta[u'output-file']}/"
696 f"{graph[u'output-file-name']}-lat.html"
698 name_file = name_file.format(core=core, test_type=ttype)
700 logging.info(f" Writing the file {name_file}")
701 plpl = plgo.Figure(data=traces_lat, layout=layout)
709 except plerr.PlotlyEmptyDataError:
710 logging.warning(u"No data for the plot. Skipped.")
714 u"job_name": job_name,
715 u"csv_table": csv_tbl,
716 u"csv_lat_1": csv_tbl_lat_1,
717 u"csv_lat_2": csv_tbl_lat_2,
725 for job, builds in spec.input.items():
726 if builds_dict.get(job, None) is None:
727 builds_dict[job] = list()
729 if build[u"status"] not in (u"failed", u"not found", u"removed",
731 builds_dict[job].append(str(build[u"build"]))
733 # Create "build ID": "date" dict:
735 tb_tbl = spec.environment.get(u"testbeds", None)
736 for job_name, job_data in builds_dict.items():
737 if build_info.get(job_name, None) is None:
738 build_info[job_name] = OrderedDict()
739 for build in job_data:
741 tb_ip = input_data.metadata(job_name, build).get(u"testbed", u"")
743 testbed = tb_tbl.get(tb_ip, u"")
744 build_info[job_name][build] = (
745 input_data.metadata(job_name, build).get(u"generated", u""),
746 input_data.metadata(job_name, build).get(u"version", u""),
750 anomaly_classifications = dict()
752 # Create the table header:
754 csv_tables_l1 = dict()
755 csv_tables_l2 = dict()
756 for job_name in builds_dict:
757 if csv_tables.get(job_name, None) is None:
758 csv_tables[job_name] = list()
759 if csv_tables_l1.get(job_name, None) is None:
760 csv_tables_l1[job_name] = list()
761 if csv_tables_l2.get(job_name, None) is None:
762 csv_tables_l2[job_name] = list()
763 header = f"Build Number:,{u','.join(builds_dict[job_name])}\n"
764 csv_tables[job_name].append(header)
765 csv_tables_l1[job_name].append(header)
766 csv_tables_l2[job_name].append(header)
767 build_dates = [x[0] for x in build_info[job_name].values()]
768 header = f"Build Date:,{u','.join(build_dates)}\n"
769 csv_tables[job_name].append(header)
770 csv_tables_l1[job_name].append(header)
771 csv_tables_l2[job_name].append(header)
772 versions = [x[1] for x in build_info[job_name].values()]
773 header = f"Version:,{u','.join(versions)}\n"
774 csv_tables[job_name].append(header)
775 csv_tables_l1[job_name].append(header)
776 csv_tables_l2[job_name].append(header)
777 testbed = [x[2] for x in build_info[job_name].values()]
778 header = f"Test bed:,{u','.join(testbed)}\n"
779 csv_tables[job_name].append(header)
780 csv_tables_l1[job_name].append(header)
781 csv_tables_l2[job_name].append(header)
783 for chart in spec.cpta[u"plots"]:
784 results = _generate_chart(chart)
788 for result in results:
789 csv_tables[result[u"job_name"]].extend(result[u"csv_table"])
790 csv_tables_l1[result[u"job_name"]].extend(result[u"csv_lat_1"])
791 csv_tables_l2[result[u"job_name"]].extend(result[u"csv_lat_2"])
793 if anomaly_classifications.get(result[u"job_name"], None) is None:
794 anomaly_classifications[result[u"job_name"]] = dict()
795 anomaly_classifications[result[u"job_name"]].\
796 update(result[u"results"])
799 for job_name, csv_table in csv_tables.items():
800 file_name = f"{spec.cpta[u'output-file']}/{job_name}-trending"
801 with open(f"{file_name}.csv", u"wt") as file_handler:
802 file_handler.writelines(csv_table)
805 with open(f"{file_name}.csv", u"rt") as csv_file:
806 csv_content = csv.reader(csv_file, delimiter=u',', quotechar=u'"')
808 for row in csv_content:
809 if txt_table is None:
810 txt_table = prettytable.PrettyTable(row)
813 for idx, item in enumerate(row):
815 row[idx] = str(round(float(item) / 1000000, 2))
819 txt_table.add_row(row)
820 # PrettyTable raises Exception
821 except Exception as err:
823 f"Error occurred while generating TXT table:\n{err}"
826 txt_table.align[u"Build Number:"] = u"l"
827 with open(f"{file_name}.txt", u"wt") as txt_file:
828 txt_file.write(str(txt_table))
830 for job_name, csv_table in csv_tables_l1.items():
831 file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d1"
832 with open(f"{file_name}.csv", u"wt") as file_handler:
833 file_handler.writelines(csv_table)
834 for job_name, csv_table in csv_tables_l2.items():
835 file_name = f"{spec.cpta[u'output-file']}/{job_name}-lat-P50-50-d2"
836 with open(f"{file_name}.csv", u"wt") as file_handler:
837 file_handler.writelines(csv_table)
840 if anomaly_classifications:
844 """Class to store the max lengths of strings displayed in
845 regressions and progressions.
848 def __init__(self, tst, nic, frmsize, trend, run, ltc):
851 :param tst: Name of the test.
852 :param nic: NIC used in the test.
853 :param frmsize: Frame size used in the test.
854 :param trend: Trend Change.
855 :param run: Number of runs for last trend.
856 :param ltc: Regression or Progression
860 self.frmsize = frmsize
865 for job_name, job_data in anomaly_classifications.items():
875 frmsize_prog_lst = []
879 max_len = MaxLens(0, 0, 0, 0, 0, 0)
881 # tb - testbed (2n-skx, 3n-dnv, etc)
882 tb = u"-".join(job_name.split(u"-")[-2:])
883 # data - read all txt dashboard files for tb
884 for file in listdir(f"{spec.cpta[u'output-file']}"):
885 if tb in file and u"performance-trending-dashboard" in \
886 file and u"txt" in file:
887 file_to_read = f"{spec.cpta[u'output-file']}/{file}"
888 with open(f"{file_to_read}", u"rt") as f_in:
889 data = data + f_in.readlines()
891 for test_name, classification in job_data.items():
892 if classification != u"normal":
893 if u"2n" in test_name:
894 test_name = test_name.split("-", 2)
895 tst = test_name[2].split(".")[-1]
898 test_name = test_name.split("-", 1)
899 tst = test_name[1].split(".")[-1]
900 nic = test_name[0].split(".")[-1]
901 frmsize = tst.split("-")[0]
902 tst = u"-".join(tst.split("-")[1:])
903 tst_name = f"{nic}-{frmsize}-{tst}"
904 if len(tst) > max_len.tst:
905 max_len.tst = len(tst)
906 if len(nic) > max_len.nic:
907 max_len.nic = len(nic)
908 if len(frmsize) > max_len.frmsize:
909 max_len.frmsize = len(frmsize)
913 line = line.replace(" ", "")
914 trend = line.split("|")[2]
915 if len(str(trend)) > max_len.trend:
916 max_len.trend = len(str(trend))
917 number = line.split("|")[3]
918 if len(str(number)) > max_len.run:
919 max_len.run = len(str(number))
920 ltc = line.split("|")[4]
921 if len(str(ltc)) > max_len.ltc:
922 max_len.ltc = len(str(ltc))
923 if classification == u'regression':
924 test_reg_lst.append(tst)
925 nic_reg_lst.append(nic)
926 frmsize_reg_lst.append(frmsize)
927 trend_reg_lst.append(trend)
928 number_reg_lst.append(number)
929 ltc_reg_lst.append(ltc)
930 elif classification == u'progression':
931 test_prog_lst.append(tst)
932 nic_prog_lst.append(nic)
933 frmsize_prog_lst.append(frmsize)
934 trend_prog_lst.append(trend)
935 number_prog_lst.append(number)
936 ltc_prog_lst.append(ltc)
939 for idx in range(len(test_reg_lst)):
941 f"{test_reg_lst[idx]}"
942 f"{u' ' * (max_len.tst - len(test_reg_lst[idx]))} "
943 f"{nic_reg_lst[idx]}"
944 f"{u' ' * (max_len.nic - len(nic_reg_lst[idx]))} "
945 f"{frmsize_reg_lst[idx].upper()}"
946 f"{u' ' * (max_len.frmsize - len(frmsize_reg_lst[idx]))} "
947 f"{trend_reg_lst[idx]}"
948 f"{u' ' * (max_len.trend - len(str(trend_reg_lst[idx])))} "
949 f"{number_reg_lst[idx]}"
950 f"{u' ' * (max_len.run - len(str(number_reg_lst[idx])))} "
951 f"{ltc_reg_lst[idx]}"
952 f"{u' ' * (max_len.ltc - len(str(ltc_reg_lst[idx])))} "
957 f"{spec.cpta[u'output-file']}/regressions-{job_name}.txt"
960 with open(f"{file_name}", u'w') as txt_file:
964 f"Not possible to write the file {file_name}.")
967 for idx in range(len(test_prog_lst)):
969 f"{test_prog_lst[idx]}"
970 f"{u' ' * (max_len.tst - len(test_prog_lst[idx]))} "
971 f"{nic_prog_lst[idx]}"
972 f"{u' ' * (max_len.nic - len(nic_prog_lst[idx]))} "
973 f"{frmsize_prog_lst[idx].upper()}"
974 f"{u' ' * (max_len.frmsize - len(frmsize_prog_lst[idx]))} "
975 f"{trend_prog_lst[idx]}"
976 f"{u' ' * (max_len.trend -len(str(trend_prog_lst[idx])))} "
977 f"{number_prog_lst[idx]}"
978 f"{u' ' * (max_len.run - len(str(number_prog_lst[idx])))} "
979 f"{ltc_prog_lst[idx]}"
980 f"{u' ' * (max_len.ltc - len(str(ltc_prog_lst[idx])))} "
985 f"{spec.cpta[u'output-file']}/progressions-{job_name}.txt"
987 with open(f"{file_name}", u'w') as txt_file:
990 logging.error(f"Not possible to write the file {file_name}.")
995 logging.info(f"Partial results: {anomaly_classifications}")
996 logging.info(f"Result: {result}")