added option to support input from elk in analyticsWebReport, added TRexDataAnalysisV... 47/5247/1
authoritraviv <[email protected]>
Thu, 12 Jan 2017 13:45:35 +0000 (15:45 +0200)
committeritraviv <[email protected]>
Thu, 12 Jan 2017 13:45:58 +0000 (15:45 +0200)
Signed-off-by: itraviv <[email protected]>
doc/AnalyticsWebReport.py
doc/TRexDataAnalysisV2.py
doc/trex_analytics.asciidoc

index e3f6504..04058a0 100755 (executable)
@@ -6,21 +6,24 @@ import time
 import datetime\r
 \r
 \r
-def main(verbose=False, detailed_test_stats=''):\r
-    if verbose:\r
-        print('Retrieving data from Google Analytics')\r
-    analytics = ac.initialize_analyticsreporting()\r
-    current_date = time.strftime("%Y-%m-%d")\r
-    k_days_ago = datetime.datetime.now() - datetime.timedelta(days=15)\r
-    start_date = str(k_days_ago.date())\r
-    response = ac.get_report(analytics, start_date, current_date)\r
-    ga_all_data_dict, setups = ac.export_to_tuples(response)\r
+def main(verbose=False, source='ga', detailed_test_stats='yes'):\r
+    if source == 'ga':\r
+        if verbose:\r
+            print('Retrieving data from Google Analytics')\r
+        analytics = ac.initialize_analyticsreporting()\r
+        current_date = time.strftime("%Y-%m-%d")\r
+        k_days_ago = datetime.datetime.now() - datetime.timedelta(days=15)\r
+        start_date = str(k_days_ago.date())\r
+        response = ac.get_report(analytics, start_date, current_date)\r
+        all_data_dict, setups = ac.export_to_tuples(response)\r
+    if source == 'elk':\r
+        all_data_dict = 0  # INSERT JSON FROM ELK HERE\r
     dest_path = os.path.join(os.getcwd(), 'build', 'images')\r
     if verbose:\r
         print('Saving data to %s' % dest_path)\r
         if detailed_test_stats:\r
             print('generating detailed table for test results')\r
-    tr.create_all_data(ga_all_data_dict, start_date, current_date, save_path=dest_path,\r
+    tr.create_all_data(all_data_dict, start_date, current_date, save_path=dest_path,\r
                        detailed_test_stats=detailed_test_stats)\r
     if verbose:\r
         print('Done without errors.')\r
index e7e82b2..cd49538 100755 (executable)
@@ -2,19 +2,48 @@
 import pandas as pd\r
 import numpy as np\r
 import matplotlib\r
+from enum import IntEnum\r
 \r
 matplotlib.use('Agg')\r
 from matplotlib import pyplot as plt\r
 import os\r
 import time\r
 \r
+"""\r
+This Module is structured to work with a raw data at the following JSON format:\r
+\r
+ {'setup_name': {'test1_name':[QUERY1,QUERY2,QUERY3],\r
+                'test2_name':[QUERY1,QUERY2,QUERY3]\r
+                }\r
+  'setup_name2': {'test1_name':[QUERY1,QUERY2,QUERY3],\r
+                'test2_name':[QUERY1,QUERY2,QUERY3]\r
+                }\r
+ }\r
+\r
+ The Query structure is set (currently) to this:\r
+ (test_name,state, date,hour,minute,mpps_result,mpps_min,mpps_max,build_id)\r
+\r
+ it can be changed to support other formats of queries, simply change the enum class to support your desired structure\r
+ the enums specify the indexes of the data within the query tuple\r
+\r
+"""\r
+\r
+\r
+class TestQuery(IntEnum):\r
+    QUERY_DATE = 2  # date format is yyyymmdd\r
+    QUERY_HOUR = 3\r
+    QUERY_MINUTE = 4\r
+    QUERY_MPPS_RESULT = 5\r
+    QUERY_TEST_MIN = 6\r
+    QUERY_TEST_MAX = 7\r
+    QUERY_BUILD_ID = 8\r
 \r
-### TODO: insert a description of a test query\r
 \r
 class Test:\r
-    def __init__(self, name, setup_name):\r
+    def __init__(self, name, setup_name, end_date):\r
         self.name = name\r
         self.setup_name = setup_name\r
+        self.end_date = end_date\r
         self.stats = []  # tuple\r
         self.results_df = []  # dataFrame\r
         self.latest_result = []  # float\r
@@ -27,15 +56,17 @@ class Test:
         test_mins = set()\r
         test_maxs = set()\r
         for query in raw_test_data:\r
-            test_results.append(float(query[5]))\r
-            date_formatted = time.strftime("%d-%m-%Y", time.strptime(query[2], "%Y%m%d"))\r
-            time_of_res = date_formatted + '-' + query[3] + ':' + query[4]\r
+            date_formatted = time.strftime("%d-%m-%Y", time.strptime(query[int(TestQuery.QUERY_DATE)], "%Y%m%d"))\r
+            time_of_res = date_formatted + '-' + query[int(TestQuery.QUERY_HOUR)] + ':' + query[\r
+                int(TestQuery.QUERY_MINUTE)]\r
             test_dates.append(time_of_res)\r
-            test_build_ids.append(query[8])\r
-            test_mins.add(float(query[6]))\r
-            test_maxs.add(float(query[7]))\r
-        test_results_df = pd.DataFrame({self.name: test_results, (self.name + ' Date'): test_dates,\r
-                                        "Setup": ([self.setup_name] * len(test_results)), "Build Id": test_build_ids})\r
+            test_results.append(float(query[int(TestQuery.QUERY_MPPS_RESULT)]))\r
+            test_build_ids.append(query[int(TestQuery.QUERY_BUILD_ID)])\r
+            test_mins.add(float(query[int(TestQuery.QUERY_TEST_MIN)]))\r
+            test_maxs.add(float(query[int(TestQuery.QUERY_TEST_MAX)]))\r
+        test_results_df = pd.DataFrame({self.name: test_results, self.name + ' Date': test_dates,\r
+                                        "Setup": ([self.setup_name] * len(test_results)), "Build Id": test_build_ids},\r
+                                       dtype='str')\r
         stats = tuple(\r
             [float(test_results_df[self.name].mean()), min(test_mins), max(test_maxs)])  # stats = (avg_mpps,min,max)\r
         self.latest_result = float(test_results_df[self.name].iloc[-1])\r
@@ -58,7 +89,7 @@ class Setup:
 \r
     def analyze_all_tests(self):\r
         for test_name in self.test_names:\r
-            t = Test(test_name, self.name)\r
+            t = Test(test_name, self.name, self.end_date)\r
             t.analyze_all_test_data(self.raw_setup_data[test_name])\r
             self.tests.append(t)\r
 \r
@@ -93,7 +124,7 @@ class Setup:
 \r
     def plot_trend_graph_all_tests(self, save_path='', file_name='_trend_graph.png'):\r
         for test_name in self.test_names:\r
-            self.all_tests_data_table[test_name].plot()\r
+            self.all_tests_data_table[test_name].plot(style=['.-'])\r
             plt.legend(fontsize='small', loc='best')\r
         plt.ylabel('MPPS/Core (Norm)')\r
         plt.title('Setup: ' + self.name)\r
index 1068192..b9dbf4c 100755 (executable)
@@ -22,6 +22,19 @@ endif::backend-xhtml11[]
 
 
 = TRex performance trend 
+Performance is measured in a metric called MPPS/Core (Norm), which stands for Mega-Packet per second (Norm). +
+ +
+How the calculation is made? +
+ +
+We measure the MPPS of the dut in it's best working point. +
+The measurement is then scaled to fit 100% CPU utilization, and finally divided by the number of cores the device has,
+to get a normalized MPPS/Core which is denoted as MPPS/Core (Norm) +
+ +
+How to run these tests? +
+[source,python]
+----
+       trex-core/scripts> ./run_regression --stl -t <name of test> --cfg setups/<name of setup>
+----
 
 == Setup: XL710 (4x40GbE)  trex-08