import datetime
import logging
+import csv
+import prettytable
import plotly.offline as ploff
import plotly.graph_objs as plgo
+import plotly.exceptions as plerr
import numpy as np
import pandas as pd
HTML_BUILDER = 'sphinx-build -v -c conf_cpta -a ' \
'-b html -E ' \
'-t html ' \
+ '-D version="Generated on {date}" ' \
'{working_dir} ' \
'{build_dir}/'
traces.append(trace_anomalies)
if show_moving_median:
+ min_periods = moving_win_size / 2 + 1
data_mean_y = pd.Series(data_y).rolling(
- window=moving_win_size).median()
+ window=moving_win_size, min_periods=min_periods).median()
trace_median = plgo.Scatter(
x=data_x,
y=data_mean_y,
"width": 1,
"color": color,
},
- name='{name}-trend'.format(name=name, size=moving_win_size)
+ name='{name}-trend'.format(name=name)
)
traces.append(trace_median)
# Create plot
logging.info(" Writing the file '{0}' ...".format(file_name))
plpl = plgo.Figure(data=traces, layout=layout)
- ploff.plot(plpl, show_link=False, auto_open=False, filename=file_name)
+ try:
+ ploff.plot(plpl, show_link=False, auto_open=False, filename=file_name)
+ except plerr.PlotlyEmptyDataError:
+ logging.warning(" No data for the plot. Skipped.")
def _generate_all_charts(spec, input_data):
:type input_data: InputData
"""
+ csv_table = list()
+ # Create the header:
+ builds = spec.cpta["data"].values()[0]
+ builds_lst = [str(build) for build in range(builds[0], builds[-1] + 1)]
+ header = "Build Number:," + ",".join(builds_lst) + '\n'
+ csv_table.append(header)
+
results = list()
for chart in spec.cpta["plots"]:
logging.info(" Generating the chart '{0}' ...".
chart_data = dict()
for job in data:
for idx, build in job.items():
- for test in build:
- if chart_data.get(test["name"], None) is None:
- chart_data[test["name"]] = OrderedDict()
+ for test_name, test in build.items():
+ if chart_data.get(test_name, None) is None:
+ chart_data[test_name] = OrderedDict()
try:
- chart_data[test["name"]][int(idx)] = \
+ chart_data[test_name][int(idx)] = \
test["result"]["throughput"]
except (KeyError, TypeError):
- chart_data[test["name"]][int(idx)] = None
+ pass
+
+ # Add items to the csv table:
+ for tst_name, tst_data in chart_data.items():
+ tst_lst = list()
+ for build in builds_lst:
+ item = tst_data.get(int(build), '')
+ tst_lst.append(str(item) if item else '')
+ csv_table.append("{0},".format(tst_name) + ",".join(tst_lst) + '\n')
for period in chart["periods"]:
# Generate traces:
logging.warning("No data for the test '{0}'".
format(test_name))
continue
+ test_name = test_name.split('.')[-1]
trace, result = _generate_trending_traces(
test_data,
period=period,
logging.info(" Done.")
+ # Write the tables:
+ file_name = spec.cpta["output-file"] + "-trending"
+ with open("{0}.csv".format(file_name), 'w') as file_handler:
+ file_handler.writelines(csv_table)
+
+ txt_table = None
+ with open("{0}.csv".format(file_name), 'rb') as csv_file:
+ csv_content = csv.reader(csv_file, delimiter=',', quotechar='"')
+ for row in csv_content:
+ if txt_table is None:
+ txt_table = prettytable.PrettyTable(row)
+ else:
+ txt_table.add_row(row)
+ txt_table.align["Build Number:"] = "l"
+ with open("{0}.txt".format(file_name), "w") as txt_file:
+ txt_file.write(str(txt_table))
+
+ # Evaluate result:
result = "PASS"
for item in results:
if item is None:
result = "PASS"
elif item == 0.33 or item == 0.0:
result = "FAIL"
- print(results)
- print(result)
- if result == "FAIL":
- return 1
- else:
- return 0
+
+ logging.info("Partial results: {0}".format(results))
+ logging.info("Result: {0}".format(result))
+
+ return result