Code Review
/
csit.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
Report: Add rc2 data
[csit.git]
/
resources
/
tools
/
presentation
/
pal_utils.py
diff --git
a/resources/tools/presentation/pal_utils.py
b/resources/tools/presentation/pal_utils.py
index
3efa00a
..
76db539
100644
(file)
--- a/
resources/tools/presentation/pal_utils.py
+++ b/
resources/tools/presentation/pal_utils.py
@@
-1,4
+1,4
@@
-# Copyright (c) 20
19
Cisco and/or its affiliates.
+# Copyright (c) 20
23
Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@
-227,7
+227,7
@@
def archive_input_data(spec):
logging.info(u" Archiving the input data files ...")
logging.info(u" Archiving the input data files ...")
- extension = spec.
in
put[u"arch-file-format"]
+ extension = spec.
out
put[u"arch-file-format"]
data_files = list()
for ext in extension:
data_files.extend(get_files(
data_files = list()
for ext in extension:
data_files.extend(get_files(
@@
-262,7
+262,7
@@
def classify_anomalies(data):
:param data: Full data set with unavailable samples replaced by nan.
:type data: OrderedDict
:returns: Classification and trend values
:param data: Full data set with unavailable samples replaced by nan.
:type data: OrderedDict
:returns: Classification and trend values
- :rtype:
2-tuple, list of string
s and list of floats
+ :rtype:
3-tuple, list of strings, list of float
s and list of floats
"""
# Nan means something went wrong.
# Use 0.0 to cause that being reported as a severe regression.
"""
# Nan means something went wrong.
# Use 0.0 to cause that being reported as a severe regression.
@@
-273,13
+273,16
@@
def classify_anomalies(data):
group_list.reverse() # Just to use .pop() for FIFO.
classification = []
avgs = []
group_list.reverse() # Just to use .pop() for FIFO.
classification = []
avgs = []
+ stdevs = []
active_group = None
values_left = 0
avg = 0.0
active_group = None
values_left = 0
avg = 0.0
+ stdv = 0.0
for sample in data.values():
if np.isnan(sample):
classification.append(u"outlier")
avgs.append(sample)
for sample in data.values():
if np.isnan(sample):
classification.append(u"outlier")
avgs.append(sample)
+ stdevs.append(sample)
continue
if values_left < 1 or active_group is None:
values_left = 0
continue
if values_left < 1 or active_group is None:
values_left = 0
@@
-287,14
+290,17
@@
def classify_anomalies(data):
active_group = group_list.pop()
values_left = len(active_group.run_list)
avg = active_group.stats.avg
active_group = group_list.pop()
values_left = len(active_group.run_list)
avg = active_group.stats.avg
+ stdv = active_group.stats.stdev
classification.append(active_group.comment)
avgs.append(avg)
classification.append(active_group.comment)
avgs.append(avg)
+ stdevs.append(stdv)
values_left -= 1
continue
classification.append(u"normal")
avgs.append(avg)
values_left -= 1
continue
classification.append(u"normal")
avgs.append(avg)
+ stdevs.append(stdv)
values_left -= 1
values_left -= 1
- return classification, avgs
+ return classification, avgs
, stdevs
def convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u","):
def convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u","):
@@
-315,17
+321,16
@@
def convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u","):
if txt_table is None:
txt_table = prettytable.PrettyTable(row)
else:
if txt_table is None:
txt_table = prettytable.PrettyTable(row)
else:
- txt_table.add_row(row)
- txt_table.align = u"r"
- txt_table.align[u"Test Case"] = u"l"
- txt_table.align[u"RCA"] = u"l"
- txt_table.align[u"RCA1"] = u"l"
- txt_table.align[u"RCA2"] = u"l"
- txt_table.align[u"RCA3"] = u"l"
-
+ txt_table.add_row(
+ [str(itm.replace(u"\u00B1", u"+-")) for itm in row]
+ )
if not txt_table:
return
if not txt_table:
return
+ txt_table.align = u"r"
+ for itm in (u"Test Case", u"Build", u"Version", u"VPP Version"):
+ txt_table.align[itm] = u"l"
+
if txt_file_name.endswith(u".txt"):
with open(txt_file_name, u"wt", encoding='utf-8') as txt_file:
txt_file.write(str(txt_table))
if txt_file_name.endswith(u".txt"):
with open(txt_file_name, u"wt", encoding='utf-8') as txt_file:
txt_file.write(str(txt_table))