ELKConnect - new module to fetch data from ELK server instead of GA, TRexDataAnalysis...
authoritraviv <[email protected]>
Sun, 5 Mar 2017 12:22:07 +0000 (14:22 +0200)
committeritraviv <[email protected]>
Sun, 5 Mar 2017 12:22:07 +0000 (14:22 +0200)
Signed-off-by: itraviv <[email protected]>
doc/AnalyticsWebReport.py
doc/ELKConnect.py [new file with mode: 0755]
doc/TRexDataAnalysisV2.py

index 04058a0..a4c4e0a 100755 (executable)
@@ -1,33 +1,35 @@
-import os\r
-import sys\r
-import AnalyticsConnect as ac\r
-import TRexDataAnalysisV2 as tr\r
-import time\r
-import datetime\r
-\r
-\r
-def main(verbose=False, source='ga', detailed_test_stats='yes'):\r
-    if source == 'ga':\r
-        if verbose:\r
-            print('Retrieving data from Google Analytics')\r
-        analytics = ac.initialize_analyticsreporting()\r
-        current_date = time.strftime("%Y-%m-%d")\r
-        k_days_ago = datetime.datetime.now() - datetime.timedelta(days=15)\r
-        start_date = str(k_days_ago.date())\r
-        response = ac.get_report(analytics, start_date, current_date)\r
-        all_data_dict, setups = ac.export_to_tuples(response)\r
-    if source == 'elk':\r
-        all_data_dict = 0  # INSERT JSON FROM ELK HERE\r
-    dest_path = os.path.join(os.getcwd(), 'build', 'images')\r
-    if verbose:\r
-        print('Saving data to %s' % dest_path)\r
-        if detailed_test_stats:\r
-            print('generating detailed table for test results')\r
-    tr.create_all_data(all_data_dict, start_date, current_date, save_path=dest_path,\r
-                       detailed_test_stats=detailed_test_stats)\r
-    if verbose:\r
-        print('Done without errors.')\r
-\r
-\r
-if __name__ == "__main__":\r
-    main()\r
+import os
+import sys
+import ELKConnect as ec
+import AnalyticsConnect as ac
+import TRexDataAnalysisV2 as tr
+import time
+import datetime
+
+
+def main(verbose=False, source='elk', detailed_test_stats='yes'):
+    current_date = time.strftime("%Y-%m-%d")
+    k_days_ago = datetime.datetime.now() - datetime.timedelta(days=15)
+    start_date = str(k_days_ago.date())
+    if source == 'ga':
+        if verbose:
+            print('Retrieving data from Google Analytics')
+        analytics = ac.initialize_analyticsreporting()
+        response = ac.get_report(analytics, start_date, current_date)
+        all_data_dict, setups = ac.export_to_tuples(response)
+    if source == 'elk':
+        elk_manager = ec.ELKManager(hostname='sceasr-b20', index='trex_perf-000004', port=9200)
+        all_data_dict = elk_manager.fetch_and_parse()
+    dest_path = os.path.join(os.getcwd(), 'build', 'images')
+    if verbose:
+        print('Saving data to %s' % dest_path)
+        if detailed_test_stats:
+            print('generating detailed table for test results')
+    tr.create_all_data(all_data_dict, current_date, save_path=dest_path,
+                       detailed_test_stats=detailed_test_stats)
+    if verbose:
+        print('Done without errors.')
+
+
+if __name__ == "__main__":
+    main()
diff --git a/doc/ELKConnect.py b/doc/ELKConnect.py
new file mode 100755 (executable)
index 0000000..fb7e2c2
--- /dev/null
@@ -0,0 +1,88 @@
+import sys\r
+import os\r
+import json\r
+import datetime\r
+import time\r
+\r
+ext_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'scripts', 'external_libs'))\r
+elk_path = os.path.join(ext_path, 'elasticsearch')\r
+urllib_path = os.path.join(ext_path, 'urllib3')\r
+\r
+if elk_path not in sys.path:\r
+    sys.path.append(elk_path)\r
+if urllib_path not in sys.path:\r
+    sys.path.append(urllib_path)\r
+\r
+import elasticsearch\r
+import elasticsearch.helpers\r
+\r
+\r
+class ELKManager:\r
+    def __init__(self, hostname, index='trex_perf-000004', port=9200):\r
+        self.hostname = hostname\r
+        self.index = index\r
+        self.port = port\r
+        self.setup_names = ['trex07', 'trex08', 'trex09', 'trex11', 'kiwi02']\r
+        self.es = elasticsearch.Elasticsearch([{"host": hostname, "port": port}])\r
+        self.all_data_raw = {}\r
+        self.all_data_parsed = {}\r
+\r
+    @staticmethod\r
+    def time_res_calculation():\r
+        milli_since_epoch = int(time.time() * 1000)\r
+        time_2w_ago = datetime.date.timetuple(datetime.datetime.utcnow() - datetime.timedelta(weeks=2))\r
+        two_w_ago_epoch_milli = int(time.mktime(time_2w_ago) * 1000)\r
+        return milli_since_epoch, two_w_ago_epoch_milli\r
+\r
+    def fetch_all_data(self):\r
+        res = {}\r
+        milli_since_epoch, two_weeks_ago_epoch_milli = self.time_res_calculation()\r
+        for setup_name in self.setup_names:\r
+            query = {\r
+                "_source": ["info.setup.name", "test.name", "test.mpps_pc", "timestamp", "build_id"],\r
+                "size": 10000,\r
+                "query": {\r
+                    "bool": {\r
+                        "filter": [\r
+                            {"range": {\r
+                                "timestamp": {"gte": two_weeks_ago_epoch_milli, "lte": milli_since_epoch,\r
+                                              "format": "epoch_millis"}}},\r
+                            {"term": {"info.setup.name": setup_name}},\r
+                            {"term": {"test.type": "stateless"}}\r
+                        ]\r
+                    }\r
+                }\r
+            }\r
+            res[setup_name] = list(elasticsearch.helpers.scan(self.es, index=self.index, query=query, size=10000))\r
+        self.all_data_raw = res\r
+\r
+    def parse_raw_data(self):\r
+        for setup_name in self.all_data_raw:\r
+            for query in self.all_data_raw[setup_name]:\r
+                setup_name = query['_source']['info']['setup']['name']\r
+                test_name = query['_source']['test']['name']\r
+                test_result = query['_source']['test']['mpps_pc']\r
+                timestamp = query['_source']['timestamp']\r
+                build_id = query['_source']['build_id']\r
+                if setup_name not in self.all_data_parsed.keys():\r
+                    self.all_data_parsed[setup_name] = {}\r
+                if test_name not in self.all_data_parsed[setup_name].keys():\r
+                    self.all_data_parsed[setup_name][test_name] = []\r
+                self.all_data_parsed[setup_name][test_name].append(tuple((test_name, timestamp, test_result, build_id)))\r
+            self.all_data_parsed = self.sorted(self.all_data_parsed)\r
+\r
+    @staticmethod\r
+    def sorted(parsed_data):\r
+        sorted_tests_data = {}\r
+        for setup_name in parsed_data:\r
+            setup_tests_data = parsed_data[setup_name]\r
+            sorted_tests_data[setup_name] = {}\r
+            for test_name in setup_tests_data:\r
+                sorted_tests_data[setup_name][test_name] = sorted(setup_tests_data[test_name],\r
+                                                                  key=lambda (_, timestamp, __, ___): timestamp)\r
+        return sorted_tests_data\r
+\r
+    def fetch_and_parse(self):\r
+        self.fetch_all_data()\r
+        self.parse_raw_data()\r
+        return self.all_data_parsed\r
index 1914319..0696f86 100755 (executable)
-#!/scratch/Anaconda2.4.0/bin/python\r
-import pandas as pd\r
-import numpy as np\r
-import matplotlib\r
-\r
-matplotlib.use('Agg')\r
-from matplotlib import pyplot as plt\r
-from matplotlib import dates as matdates\r
-from matplotlib import lines as matlines\r
-import os\r
-import time\r
-from datetime import datetime\r
-\r
-"""\r
-This Module is structured to work with a raw data at the following JSON format:\r
-\r
- {'setup_name': {'test1_name':[QUERY1,QUERY2,QUERY3],\r
-                'test2_name':[QUERY1,QUERY2,QUERY3]\r
-                }\r
-  'setup_name2': {'test1_name':[QUERY1,QUERY2,QUERY3],\r
-                'test2_name':[QUERY1,QUERY2,QUERY3]\r
-                }\r
- }\r
-\r
- The Query structure is set (currently) to this:\r
-\r
- (test_name,state, date,hour,minute,mpps_result,mpps_min,mpps_max,build_id) example:\r
-\r
- ["syn attack - 64 bytes, single CPU", "stl", "20161226", "01", "39", "9.631898", "9.5", "11.5", "54289"]\r
-\r
- it can be changed to support other formats of queries, simply change the query class to support your desired structure\r
- the query class specify the indexes of the data within the query tuple\r
-\r
-"""\r
-\r
-\r
-class TestQuery(object):\r
-    query_dateformat = "%Y%m%d"  # date format in the query\r
-    QUERY_DATE = 2\r
-    QUERY_HOUR = 3\r
-    QUERY_MINUTE = 4\r
-    QUERY_MPPS_RESULT = 5\r
-    QUERY_TEST_MIN = 6\r
-    QUERY_TEST_MAX = 7\r
-    QUERY_BUILD_ID = 8\r
-\r
-\r
-class Test:\r
-    def __init__(self, name, setup_name, end_date):\r
-        self.name = name\r
-        self.setup_name = setup_name\r
-        self.end_date = end_date\r
-        self.stats = []  # tuple\r
-        self.results_df = []  # dataFrame\r
-        self.latest_result = []  # float\r
-        self.latest_result_date = ''  # string\r
-\r
-    def analyze_all_test_data(self, raw_test_data):\r
-        test_results = []\r
-        test_dates = []\r
-        test_build_ids = []\r
-        test_mins = set()\r
-        test_maxs = set()\r
-        for query in raw_test_data:\r
-            date_formatted = time.strftime("%d-%m-%Y",\r
-                                           time.strptime(query[int(TestQuery.QUERY_DATE)], TestQuery.query_dateformat))\r
-            time_of_res = date_formatted + '-' + query[int(TestQuery.QUERY_HOUR)] + ':' + query[\r
-                int(TestQuery.QUERY_MINUTE)]\r
-            test_dates.append(time_of_res)\r
-            test_results.append(float(query[int(TestQuery.QUERY_MPPS_RESULT)]))\r
-            test_build_ids.append(query[int(TestQuery.QUERY_BUILD_ID)])\r
-            test_mins.add(float(query[int(TestQuery.QUERY_TEST_MIN)]))\r
-            test_maxs.add(float(query[int(TestQuery.QUERY_TEST_MAX)]))\r
-        test_results_df = pd.DataFrame({self.name: test_results, self.name + ' Date': test_dates,\r
-                                        "Setup": ([self.setup_name] * len(test_results)), "Build Id": test_build_ids},\r
-                                       dtype='str')\r
-        stats = tuple(\r
-            [float(test_results_df[self.name].mean()), min(test_mins), max(test_maxs)])  # stats = (avg_mpps,min,max)\r
-        self.latest_result = float(test_results_df[self.name].iloc[-1])\r
-        self.latest_result_date = str(test_results_df[test_results_df.columns[3]].iloc[-1])\r
-        self.results_df = test_results_df\r
-        self.stats = stats\r
-\r
-\r
-class Setup:\r
-    def __init__(self, name, start_date, end_date, raw_setup_data):\r
-        self.name = name\r
-        self.start_date = start_date  # string of date\r
-        self.end_date = end_date  # string of date\r
-        self.tests = []  # list of test objects\r
-        self.all_tests_data_table = pd.DataFrame()  # dataframe\r
-        self.setup_trend_stats = pd.DataFrame()  # dataframe\r
-        self.latest_test_results = pd.DataFrame()  # dataframe\r
-        self.raw_setup_data = raw_setup_data  # dictionary\r
-        self.test_names = raw_setup_data.keys()  # list of names\r
-\r
-    def analyze_all_tests(self):\r
-        for test_name in self.test_names:\r
-            t = Test(test_name, self.name, self.end_date)\r
-            t.analyze_all_test_data(self.raw_setup_data[test_name])\r
-            self.tests.append(t)\r
-\r
-    def analyze_latest_test_results(self):\r
-        test_names = []\r
-        test_dates = []\r
-        test_latest_results = []\r
-        for test in self.tests:\r
-            test_names.append(test.name)\r
-            test_dates.append(test.latest_result_date)\r
-            test_latest_results.append(test.latest_result)\r
-        self.latest_test_results = pd.DataFrame(\r
-            {'Date': test_dates, 'Test Name': test_names, 'MPPS\Core (Norm)': test_latest_results},\r
-            index=range(1, len(test_latest_results) + 1))\r
-        self.latest_test_results = self.latest_test_results[[2, 1, 0]]  # re-order columns to name|MPPS|date\r
-\r
-    def analyze_all_tests_stats(self):\r
-        test_names = []\r
-        all_test_stats = []\r
-        for test in self.tests:\r
-            test_names.append(test.name)\r
-            all_test_stats.append(test.stats)\r
-        self.setup_trend_stats = pd.DataFrame(all_test_stats, index=test_names,\r
-                                              columns=['Avg MPPS/Core (Norm)', 'Golden Min', 'Golden Max'])\r
-        self.setup_trend_stats.index.name = 'Test Name'\r
-\r
-    def analyze_all_tests_trend(self):\r
-        all_tests_trend_data = []\r
-        for test in self.tests:\r
-            all_tests_trend_data.append(test.results_df)\r
-        self.all_tests_data_table = reduce(lambda x, y: pd.merge(x, y, how='outer'), all_tests_trend_data)\r
-\r
-    def plot_trend_graph_all_tests(self, save_path='', file_name='_trend_graph.png'):\r
-        time_format1 = '%d-%m-%Y-%H:%M'\r
-        time_format2 = '%Y-%m-%d-%H:%M'\r
-        for test in self.tests:\r
-            test_data = test.results_df[test.results_df.columns[2]].tolist()\r
-            test_time_stamps = test.results_df[test.results_df.columns[3]].tolist()\r
-            test_time_stamps.append(self.end_date + '-23:59')\r
-            test_data.append(test_data[-1])\r
-            float_test_time_stamps = []\r
-            for ts in test_time_stamps:\r
-                try:\r
-                    float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format1)))\r
-                except:\r
-                    float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format2)))\r
-            plt.plot_date(x=float_test_time_stamps, y=test_data, label=test.name, fmt='-', xdate=True)\r
-            plt.legend(fontsize='small', loc='best')\r
-        plt.ylabel('MPPS/Core (Norm)')\r
-        plt.title('Setup: ' + self.name)\r
-        plt.tick_params(\r
-            axis='x',\r
-            which='both',\r
-            bottom='off',\r
-            top='off',\r
-            labelbottom='off')\r
-        plt.xlabel('Time Period: ' + self.start_date + ' - ' + self.end_date)\r
-        if save_path:\r
-            plt.savefig(os.path.join(save_path, self.name + file_name))\r
-            if not self.setup_trend_stats.empty:\r
-                (self.setup_trend_stats.round(2)).to_csv(os.path.join(save_path, self.name +\r
-                                                                      '_trend_stats.csv'))\r
-            plt.close('all')\r
-\r
-    def plot_latest_test_results_bar_chart(self, save_path='', img_file_name='_latest_test_runs.png',\r
-                                           stats_file_name='_latest_test_runs_stats.csv'):\r
-        plt.figure()\r
-        colors_for_bars = ['b', 'g', 'r', 'c', 'm', 'y']\r
-        self.latest_test_results[[1]].plot(kind='bar', legend=False,\r
-                                           color=colors_for_bars)  # plot only mpps data, which is in column 1\r
-        plt.xticks(rotation='horizontal')\r
-        plt.xlabel('Index of Tests')\r
-        plt.ylabel('MPPS/Core (Norm)')\r
-        plt.title("Test Runs for Setup: " + self.name)\r
-        if save_path:\r
-            plt.savefig(os.path.join(save_path, self.name + img_file_name))\r
-            (self.latest_test_results.round(2)).to_csv(\r
-                os.path.join(save_path, self.name + stats_file_name))\r
-        plt.close('all')\r
-\r
-    def analyze_all_setup_data(self):\r
-        self.analyze_all_tests()\r
-        self.analyze_latest_test_results()\r
-        self.analyze_all_tests_stats()\r
-        self.analyze_all_tests_trend()\r
-\r
-    def plot_all(self, save_path=''):\r
-        self.plot_latest_test_results_bar_chart(save_path)\r
-        self.plot_trend_graph_all_tests(save_path)\r
-\r
-\r
-def latest_runs_comparison_bar_chart(setup_name1, setup_name2, setup1_latest_result, setup2_latest_result,\r
-                                     save_path=''\r
-                                     ):\r
-    s1_res = setup1_latest_result[[0, 1]]  # column0 is test name, column1 is MPPS\Core\r
-    s2_res = setup2_latest_result[[0, 1, 2]]  # column0 is test name, column1 is MPPS\Core, column2 is Date\r
-    s1_res.columns = ['Test Name', setup_name1]\r
-    s2_res.columns = ['Test Name', setup_name2, 'Date']\r
-    compare_dframe = pd.merge(s1_res, s2_res, on='Test Name')\r
-    compare_dframe.plot(kind='bar')\r
-    plt.legend(fontsize='small', loc='best')\r
-    plt.xticks(rotation='horizontal')\r
-    plt.xlabel('Index of Tests')\r
-    plt.ylabel('MPPS/Core (Norm)')\r
-    plt.title("Comparison between " + setup_name1 + " and " + setup_name2)\r
-    if save_path:\r
-        plt.savefig(os.path.join(save_path, "_comparison.png"))\r
-        compare_dframe = compare_dframe.round(2)\r
-        compare_dframe.to_csv(os.path.join(save_path, '_comparison_stats_table.csv'))\r
-\r
-        # WARNING: if the file _all_stats.csv already exists, this script deletes it, to prevent overflowing of data\r
-\r
-\r
-def create_all_data(ga_data, start_date, end_date, save_path='', detailed_test_stats=''):\r
-    all_setups = {}\r
-    all_setups_data = []\r
-    setup_names = ga_data.keys()\r
-    for setup_name in setup_names:\r
-        s = Setup(setup_name, start_date, end_date, ga_data[setup_name])\r
-        s.analyze_all_setup_data()\r
-        s.plot_all(save_path)\r
-        all_setups_data.append(s.all_tests_data_table)\r
-        all_setups[setup_name] = s\r
-\r
-    if detailed_test_stats:\r
-        if os.path.exists(os.path.join(save_path, '_detailed_table.csv')):\r
-            os.remove(os.path.join(save_path, '_detailed_table.csv'))\r
-        all_setups_data_dframe = pd.DataFrame().append(all_setups_data)\r
-        all_setups_data_dframe.to_csv(os.path.join(save_path, '_detailed_table.csv'))\r
-\r
-    trex07setup = all_setups['trex07']\r
-    trex08setup = all_setups['trex08']\r
-    latest_runs_comparison_bar_chart('Mellanox ConnectX-4',\r
-                                     'Intel XL710', trex07setup.latest_test_results,\r
-                                     trex08setup.latest_test_results,\r
-                                     save_path=save_path)\r
+#!/scratch/Anaconda2.4.0/bin/python
+import pandas as pd
+import numpy as np
+import matplotlib
+
+matplotlib.use('Agg')
+from matplotlib import pyplot as plt
+from matplotlib import dates as matdates
+from matplotlib import lines as matlines
+import os
+import time
+from datetime import datetime
+
+"""
+This Module is structured to work with a raw data at the following JSON format:
+
+ {'setup_name': {'test1_name':[QUERY1,QUERY2,QUERY3],
+                'test2_name':[QUERY1,QUERY2,QUERY3]
+                }
+  'setup_name2': {'test1_name':[QUERY1,QUERY2,QUERY3],
+                'test2_name':[QUERY1,QUERY2,QUERY3]
+                }
+ }
+
+ The Query structure is set (currently) to this:
+
+ (test_name,state, date,hour,minute,mpps_result,mpps_min,mpps_max,build_id) example:
+
+ ["syn attack - 64 bytes, single CPU", "stl", "20161226", "01", "39", "9.631898", "9.5", "11.5", "54289"]
+
+ it can be changed to support other formats of queries, simply change the query class to support your desired structure
+ the query class specify the indexes of the data within the query tuple
+
+"""
+
+
+class TestQuery(object):
+    QUERY_TIMEFORMAT = "%Y-%m-%d %H:%M:%S"  # date format in the query
+    QUERY_TIMESTAMP = 1
+    QUERY_MPPS_RESULT = 2
+    QUERY_BUILD_ID = 3
+
+
+class Test:
+    def __init__(self, name, setup_name, end_date):
+        self.name = name
+        self.setup_name = setup_name
+        self.end_date = end_date
+        self.stats = []  # tuple
+        self.results_df = []  # dataFrame
+        self.latest_result = []  # float
+        self.latest_result_date = ''  # string
+
+    def analyze_all_test_data(self, raw_test_data):
+        test_results = []
+        test_dates = []
+        test_build_ids = []
+        for query in raw_test_data:
+            # date_formatted = time.strftime("%d-%m-%Y",
+            #                                time.strptime(query[int(TestQuery.QUERY_DATE)], TestQuery.query_dateformat))
+            # time_of_res = date_formatted + '-' + query[int(TestQuery.QUERY_HOUR)] + ':' + query[
+            #     int(TestQuery.QUERY_MINUTE)]
+            time_of_query = time.strptime(query[TestQuery.QUERY_TIMESTAMP], TestQuery.QUERY_TIMEFORMAT)
+            time_formatted = time.strftime("%d-%m-%Y-%H:%M", time_of_query)
+            test_dates.append(time_formatted)
+            test_results.append(float(query[int(TestQuery.QUERY_MPPS_RESULT)]))
+            test_build_ids.append(query[int(TestQuery.QUERY_BUILD_ID)])
+        test_results_df = pd.DataFrame({self.name: test_results, self.name + ' Date': test_dates,
+                                        "Setup": ([self.setup_name] * len(test_results)), "Build Id": test_build_ids},
+                                       dtype='str')
+        stats_avg = float(test_results_df[self.name].mean())
+        stats_min = float(test_results_df[self.name].min())
+        stats_max = float(test_results_df[self.name].max())
+        stats = tuple(
+            [stats_avg, stats_min, stats_max,
+             float(test_results_df[self.name].std()),
+             float(((stats_max - stats_min) / stats_avg) * 100),
+             len(test_results)])  # stats = (avg_mpps,min,max,std,error, no of test_results) error = ((max-min)/avg)*100
+        self.latest_result = float(test_results_df[self.name].iloc[-1])
+        self.latest_result_date = str(test_results_df[test_results_df.columns[3]].iloc[-1])
+        self.results_df = test_results_df
+        self.stats = stats
+
+
+class Setup:
+    def __init__(self, name, end_date, raw_setup_data):
+        self.name = name
+        self.end_date = end_date  # string of date
+        self.tests = []  # list of test objects
+        self.all_tests_data_table = pd.DataFrame()  # dataframe
+        self.setup_trend_stats = pd.DataFrame()  # dataframe
+        self.latest_test_results = pd.DataFrame()  # dataframe
+        self.raw_setup_data = raw_setup_data  # dictionary
+        self.test_names = raw_setup_data.keys()  # list of names
+
+    def analyze_all_tests(self):
+        for test_name in self.test_names:
+            t = Test(test_name, self.name, self.end_date)
+            t.analyze_all_test_data(self.raw_setup_data[test_name])
+            self.tests.append(t)
+
+    def analyze_latest_test_results(self):
+        test_names = []
+        test_dates = []
+        test_latest_results = []
+        for test in self.tests:
+            test_names.append(test.name)
+            test_dates.append(test.latest_result_date)
+            test_latest_results.append(test.latest_result)
+        self.latest_test_results = pd.DataFrame(
+            {'Date': test_dates, 'Test Name': test_names, 'MPPS\Core (Norm)': test_latest_results},
+            index=range(1, len(test_latest_results) + 1))
+        self.latest_test_results = self.latest_test_results[[2, 1, 0]]  # re-order columns to name|MPPS|date
+
+    def analyze_all_tests_stats(self):
+        test_names = []
+        all_test_stats = []
+        for test in self.tests:
+            test_names.append(test.name)
+            all_test_stats.append(test.stats)
+        self.setup_trend_stats = pd.DataFrame(all_test_stats, index=test_names,
+                                              columns=['Avg MPPS/Core (Norm)', 'Min', 'Max', 'Std','Error (%)', 'Total Results'])
+        self.setup_trend_stats.index.name = 'Test Name'
+
+    def analyze_all_tests_trend(self):
+        all_tests_trend_data = []
+        for test in self.tests:
+            all_tests_trend_data.append(test.results_df)
+        self.all_tests_data_table = reduce(lambda x, y: pd.merge(x, y, how='outer'), all_tests_trend_data)
+
+    def plot_trend_graph_all_tests(self, save_path='', file_name='_trend_graph.png'):
+        time_format1 = '%d-%m-%Y-%H:%M'
+        time_format2 = '%Y-%m-%d-%H:%M'
+        for test in self.tests:
+            test_data = test.results_df[test.results_df.columns[2]].tolist()
+            test_time_stamps = test.results_df[test.results_df.columns[3]].tolist()
+            start_date = test_time_stamps[0]
+            test_time_stamps.append(self.end_date + '-23:59')
+            test_data.append(test_data[-1])
+            float_test_time_stamps = []
+            for ts in test_time_stamps:
+                try:
+                    float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format1)))
+                except:
+                    float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format2)))
+            plt.plot_date(x=float_test_time_stamps, y=test_data, label=test.name, fmt='.-', xdate=True)
+            plt.legend(fontsize='small', loc='best')
+        plt.ylabel('MPPS/Core (Norm)')
+        plt.title('Setup: ' + self.name)
+        plt.tick_params(
+            axis='x',
+            which='both',
+            bottom='off',
+            top='off',
+            labelbottom='off')
+        plt.xlabel('Time Period: ' + start_date[:-6] + ' - ' + self.end_date)
+        if save_path:
+            plt.savefig(os.path.join(save_path, self.name + file_name))
+            if not self.setup_trend_stats.empty:
+                (self.setup_trend_stats.round(2)).to_csv(os.path.join(save_path, self.name +
+                                                                      '_trend_stats.csv'))
+            plt.close('all')
+
+    def plot_latest_test_results_bar_chart(self, save_path='', img_file_name='_latest_test_runs.png',
+                                           stats_file_name='_latest_test_runs_stats.csv'):
+        plt.figure()
+        colors_for_bars = ['b', 'g', 'r', 'c', 'm', 'y']
+        self.latest_test_results[[1]].plot(kind='bar', legend=False,
+                                           color=colors_for_bars)  # plot only mpps data, which is in column 1
+        plt.xticks(rotation='horizontal')
+        plt.xlabel('Index of Tests')
+        plt.ylabel('MPPS/Core (Norm)')
+        plt.title("Test Runs for Setup: " + self.name)
+        if save_path:
+            plt.savefig(os.path.join(save_path, self.name + img_file_name))
+            (self.latest_test_results.round(2)).to_csv(
+                os.path.join(save_path, self.name + stats_file_name))
+        plt.close('all')
+
+    def analyze_all_setup_data(self):
+        self.analyze_all_tests()
+        self.analyze_latest_test_results()
+        self.analyze_all_tests_stats()
+        self.analyze_all_tests_trend()
+
+    def plot_all(self, save_path=''):
+        self.plot_latest_test_results_bar_chart(save_path)
+        self.plot_trend_graph_all_tests(save_path)
+
+
+def latest_runs_comparison_bar_chart(setup_name1, setup_name2, setup1_latest_result, setup2_latest_result,
+                                     save_path=''
+                                     ):
+    s1_res = setup1_latest_result[[0, 1]]  # column0 is test name, column1 is MPPS\Core
+    s2_res = setup2_latest_result[[0, 1, 2]]  # column0 is test name, column1 is MPPS\Core, column2 is Date
+    s1_res.columns = ['Test Name', setup_name1]
+    s2_res.columns = ['Test Name', setup_name2, 'Date']
+    compare_dframe = pd.merge(s1_res, s2_res, on='Test Name')
+    compare_dframe.plot(kind='bar')
+    plt.legend(fontsize='small', loc='best')
+    plt.xticks(rotation='horizontal')
+    plt.xlabel('Index of Tests')
+    plt.ylabel('MPPS/Core (Norm)')
+    plt.title("Comparison between " + setup_name1 + " and " + setup_name2)
+    if save_path:
+        plt.savefig(os.path.join(save_path, "_comparison.png"))
+        compare_dframe = compare_dframe.round(2)
+        compare_dframe.to_csv(os.path.join(save_path, '_comparison_stats_table.csv'))
+
+        # WARNING: if the file _all_stats.csv already exists, this script deletes it, to prevent overflowing of data
+
+
+def create_all_data(ga_data, end_date, save_path='', detailed_test_stats=''):
+    all_setups = {}
+    all_setups_data = []
+    setup_names = ga_data.keys()
+    for setup_name in setup_names:
+        s = Setup(setup_name, end_date, ga_data[setup_name])
+        s.analyze_all_setup_data()
+        s.plot_all(save_path)
+        all_setups_data.append(s.all_tests_data_table)
+        all_setups[setup_name] = s
+
+    if detailed_test_stats:
+        if os.path.exists(os.path.join(save_path, '_detailed_table.csv')):
+            os.remove(os.path.join(save_path, '_detailed_table.csv'))
+        all_setups_data_dframe = pd.DataFrame().append(all_setups_data)
+        all_setups_data_dframe.to_csv(os.path.join(save_path, '_detailed_table.csv'))
+
+    trex07setup = all_setups['trex07']
+    trex08setup = all_setups['trex08']
+    latest_runs_comparison_bar_chart('Mellanox ConnectX-4',
+                                     'Intel XL710', trex07setup.latest_test_results,
+                                     trex08setup.latest_test_results,
+                                     save_path=save_path)