changed trex_analytics to support detailed mode, generating a detailed table for... 92/4292/1
authoritraviv <[email protected]>
Sun, 11 Dec 2016 15:35:37 +0000 (17:35 +0200)
committeritraviv <[email protected]>
Sun, 11 Dec 2016 15:35:52 +0000 (17:35 +0200)
Signed-off-by: itraviv <[email protected]>
doc/AnalyticsConnect.py
doc/AnalyticsWebReport.py
doc/TRexDataAnalysis.py
doc/trex_analytics.asciidoc
doc/ws_main.py

index 1061953..bb473c5 100755 (executable)
@@ -21,112 +21,117 @@ VIEW_ID = '120207451'
 
 
 def initialize_analyticsreporting():
-  """Initializes an analyticsreporting service object.
-
-  Returns:
-    analytics an authorized analyticsreporting service object.
-  """
-
-  credentials = ServiceAccountCredentials.from_p12_keyfile(
-    SERVICE_ACCOUNT_EMAIL, KEY_FILE_LOCATION, scopes=SCOPES)
-
-  http = credentials.authorize(httplib2.Http())
-
-  # Build the service object.
-  analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)
-
-  return analytics
-
-
-def get_report(analytics,start_date='2016-11-27',end_date='2016-11-27'):
-  # Use the Analytics Service Object to query the Analytics Reporting API V4.
-  return analytics.reports().batchGet(
-      body={
-        'reportRequests': [
-        {
-          'viewId': VIEW_ID,
-          'dateRanges': [{'startDate': start_date, 'endDate': end_date}],
-          'metrics': [{'expression': 'ga:metric1','formattingType':'CURRENCY'},
-                                         {'expression': 'ga:metric2','formattingType':'CURRENCY'},
-                                         {'expression': 'ga:metric3','formattingType':'CURRENCY'},
-                                         {'expression': 'ga:totalEvents'}],
-          'dimensions': [{"name":"ga:eventAction"},{"name": "ga:dimension1"},{"name": "ga:dimension2"},{"name": "ga:dimension3"},{"name": "ga:dimension4"}],
-                 'pageSize': 10000
+    """Initializes an analyticsreporting service object.
+
+    Returns:
+      analytics an authorized analyticsreporting service object.
+    """
+
+    credentials = ServiceAccountCredentials.from_p12_keyfile(
+        SERVICE_ACCOUNT_EMAIL, KEY_FILE_LOCATION, scopes=SCOPES)
+
+    http = credentials.authorize(httplib2.Http())
+
+    # Build the service object.
+    analytics = build('analytics', 'v4', http=http, discoveryServiceUrl=DISCOVERY_URI)
+
+    return analytics
+
+
+def get_report(analytics, start_date='2016-11-27', end_date='2016-11-27'):
+    # Use the Analytics Service Object to query the Analytics Reporting API V4.
+    return analytics.reports().batchGet(
+        body={
+            'reportRequests': [
+                {
+                    'viewId': VIEW_ID,
+                    'dateRanges': [{'startDate': start_date, 'endDate': end_date}],
+                    'metrics': [{'expression': 'ga:metric1', 'formattingType': 'CURRENCY'},
+                                {'expression': 'ga:metric2', 'formattingType': 'CURRENCY'},
+                                {'expression': 'ga:metric3', 'formattingType': 'CURRENCY'},
+                                {'expression': 'ga:totalEvents'}],
+                    'dimensions': [{"name": "ga:eventAction"}, {"name": "ga:dimension1"}, {"name": "ga:dimension2"},
+                                   {"name": "ga:dimension3"},
+                                   {"name": "ga:date"}, {"name": "ga:hour"}, {"name": "ga:minute"}],
+                    'pageSize': 10000
+                }
+            ]
         }
-        ]
-               }
-  ).execute()
+    ).execute()
 
 
 def print_response(response):
-  """Parses and prints the Analytics Reporting API V4 response"""
-
-  for report in response.get('reports', []):
-    columnHeader = report.get('columnHeader', {})
-    dimensionHeaders = columnHeader.get('dimensions', [])
-    metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
-    rows = report.get('data', {}).get('rows', [])
+    """Parses and prints the Analytics Reporting API V4 response"""
 
-    for row in rows:
-      dimensions = row.get('dimensions', [])
-      dateRangeValues = row.get('metrics', [])
+    for report in response.get('reports', []):
+        columnHeader = report.get('columnHeader', {})
+        dimensionHeaders = columnHeader.get('dimensions', [])
+        metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])
+        rows = report.get('data', {}).get('rows', [])
 
-      for header, dimension in zip(dimensionHeaders, dimensions):
-        print header + ': ' + dimension
+        for row in rows:
+            dimensions = row.get('dimensions', [])
+            dateRangeValues = row.get('metrics', [])
 
-      for i, values in enumerate(dateRangeValues):
-        print 'Date range (' + str(i) + ')'
-        for metricHeader, value in zip(metricHeaders, values.get('values')):
-          print metricHeader.get('name') + ': ' + value
+            for header, dimension in zip(dimensionHeaders, dimensions):
+                print header + ': ' + dimension
 
+            for i, values in enumerate(dateRangeValues):
+                print 'Date range (' + str(i) + ')'
+                for metricHeader, value in zip(metricHeaders, values.get('values')):
+                    print metricHeader.get('name') + ': ' + value
 
 
 def export_to_tuples(response):
-       # counter = 0
-       setups = set()
-       df = {}
-       for report in response.get('reports', []):
-               rows = report.get('data', {}).get('rows', [])
-               for row in rows:
-                       data = []
-                       dimensions = row.get('dimensions', [])
-                       # print 'this is dimensions'
-                       # print dimensions
-                       data.append(dimensions[1]) #test name
-                       data.append(dimensions[2]) # state
-                       # data.append(dimensions[3]) # setup
-                       data.append(dimensions[4]) # test_type
-                       dateRangeValues = row.get('metrics', [])
-                       value = dateRangeValues[0].get('values',[])[0] #MPPS
-                       golden_min = dateRangeValues[0].get('values',[])[1] #golden min
-                       golden_max = dateRangeValues[0].get('values',[])[2] #golden max
-                       data.append(value)
-                       data.append(golden_min)
-                       data.append(golden_max)
-                       if dimensions[3] in setups:
-                               if dimensions[1] in df[dimensions[3]]:
-                                       df[dimensions[3]][dimensions[1]].append(tuple(data))
-                               else:
-                                       df[dimensions[3]][dimensions[1]] = [tuple(data)]
-                       else:
-                               df[dimensions[3]] = {}
-                               df[dimensions[3]][dimensions[1]] = [tuple(data)]
-                               setups.add(dimensions[3])
-       # print 'counter is: %d' % counter
-       return df, setups
+    # counter = 0
+    setups = set()
+    df = {}
+    for report in response.get('reports', []):
+        rows = report.get('data', {}).get('rows', [])
+        for row in rows:
+            data = []
+            dimensions = row.get('dimensions', [])
+            # print 'this is dimensions'
+            # print dimensions
+            data.append(dimensions[1])  # test name
+            data.append(dimensions[2])  # state
+            # data.append(dimensions[3]) # setup
+            data.append(dimensions[4])  # date in YYYYMMDD format
+            data.append(dimensions[5])  # hour
+            data.append(dimensions[6])  # minute
+            dateRangeValues = row.get('metrics', [])
+            value = dateRangeValues[0].get('values', [])[0]  # MPPS
+            golden_min = dateRangeValues[0].get('values', [])[1]  # golden min
+            golden_max = dateRangeValues[0].get('values', [])[2]  # golden max
+            data.append(value)
+            # counter += 1
+            data.append(golden_min)
+            data.append(golden_max)
+            data.append(dimensions[0])  # build id
+            if dimensions[3] in setups:
+                if dimensions[1] in df[dimensions[3]]:
+                    df[dimensions[3]][dimensions[1]].append(tuple(data))
+                else:
+                    df[dimensions[3]][dimensions[1]] = [tuple(data)]
+            else:
+                df[dimensions[3]] = {}
+                df[dimensions[3]][dimensions[1]] = [tuple(data)]
+                setups.add(dimensions[3])
+    # print 'counter is: %d' % counter
+    return df, setups
 
 
 def main():
-       analytics = initialize_analyticsreporting()
-       response = get_report(analytics)
-       df, setups = export_to_tuples(response)
-       # pprint(df)
-       return df,setups
+    analytics = initialize_analyticsreporting()
+    response = get_report(analytics)
+    df, setups = export_to_tuples(response)
+    # pprint(df)
+    return df, setups
+
 
 if __name__ == '__main__':
     main()
 
-
 """
 response structure (when fetched with "export to tuples"):
 
@@ -188,4 +193,3 @@ response structure (when fetched with "export to tuples"):
 
 
 """
-
index bd4a9a2..1806cab 100755 (executable)
@@ -6,7 +6,7 @@ import time
 import datetime\r
 \r
 \r
-def main(verbose = False):\r
+def main(verbose=False, detailed_test_stats=''):\r
     if verbose:\r
         print('Retrieving data from Google Analytics')\r
     analytics = ac.initialize_analyticsreporting()\r
@@ -18,10 +18,13 @@ def main(verbose = False):
     dest_path = os.path.join(os.getcwd(), 'build', 'images')\r
     if verbose:\r
         print('Saving data to %s' % dest_path)\r
-    tr.create_all_data(ga_all_data_dict, setups, start_date, current_date, save_path = dest_path,\r
-                       add_stats='yes')\r
+        if detailed_test_stats:\r
+            print('generating detailed table for test results')\r
+    tr.create_all_data(ga_all_data_dict, setups, start_date, current_date, save_path=dest_path,\r
+                       add_stats='yes', detailed_test_stats=detailed_test_stats)\r
     if verbose:\r
         print('Done without errors.')\r
 \r
+\r
 if __name__ == "__main__":\r
     main()\r
index fb855a1..ed67426 100755 (executable)
@@ -2,34 +2,46 @@
 import pandas as pd\r
 import numpy as np\r
 import matplotlib\r
+\r
 matplotlib.use('Agg')\r
 from matplotlib import pyplot as plt\r
 import os\r
+import time\r
 \r
 \r
-def generate_dframe_for_test(test_name, test_data):\r
+def generate_dframe_for_test(setup_name, test_name, test_data):\r
     test_results = []\r
+    test_dates = []\r
+    test_build_ids = []\r
     test_mins = set()\r
     test_maxs = set()\r
     for query in test_data:\r
-        test_results.append(float(query[3]))\r
-        test_mins.add(float(query[4]))\r
-        test_maxs.add(float(query[5]))\r
+        test_results.append(float(query[5]))\r
+        date_formatted = time.strftime("%d-%m-%Y", time.strptime(query[2], "%Y%m%d"))\r
+        time_of_res = date_formatted + '-' + query[3] + ':' + query[4]\r
+        test_dates.append(time_of_res)\r
+        test_build_ids.append(query[8])\r
+        test_mins.add(float(query[6]))\r
+        test_maxs.add(float(query[7]))\r
     df = pd.DataFrame({test_name: test_results})\r
+    df_detailed = pd.DataFrame({(test_name + ' Results'): test_results, (test_name + ' Date'): test_dates,\r
+                                "Setup": ([setup_name] * len(test_results)), "Build Id": test_build_ids})\r
     stats = tuple([float(df.mean()), min(test_mins), max(test_maxs)])  # stats = (avg_mpps,min,max)\r
-    return df, stats\r
+    return df, stats, df_detailed\r
 \r
 \r
 def generate_dframe_arr_and_stats_of_tests_per_setup(date, setup_name, setup_dict):\r
     dframe_arr_trend = []\r
     stats_arr = []\r
     dframe_arr_latest = []\r
+    dframe_arr_detailed = []\r
     test_names = setup_dict.keys()\r
     for test in test_names:\r
-        df, stats = generate_dframe_for_test(test, setup_dict[test])\r
+        df, stats, df_detailed = generate_dframe_for_test(setup_name, test, setup_dict[test])\r
+        dframe_arr_detailed.append(df_detailed)\r
         dframe_arr_trend.append(df)\r
         stats_arr.append(stats)\r
-        df_latest = float(setup_dict[test][-1][3])\r
+        df_latest = float(setup_dict[test][-1][6])\r
         dframe_arr_latest.append(df_latest)\r
     dframe_arr_latest = pd.DataFrame({'Date': [date] * len(dframe_arr_latest),\r
                                       'Setup': [setup_name],\r
@@ -38,7 +50,7 @@ def generate_dframe_arr_and_stats_of_tests_per_setup(date, setup_name, setup_dic
                                      index=range(1, len(dframe_arr_latest) + 1))\r
     stats_df = pd.DataFrame(stats_arr, index=setup_dict.keys(), columns=['Avg MPPS', 'Golden Min', 'Golden Max'])\r
     stats_df.index.name = 'Test Name'\r
-    return dframe_arr_trend, stats_df, dframe_arr_latest\r
+    return dframe_arr_trend, stats_df, dframe_arr_latest, dframe_arr_detailed\r
 \r
 \r
 def create_plot_for_dframe_arr(dframe_arr, setup_name, start_date, end_date, show='no', save_path='',\r
@@ -78,20 +90,43 @@ def create_bar_plot_for_latest_runs_per_setup(dframe_all_tests_latest, setup_nam
         plt.show()\r
 \r
 \r
-def create_all_data_per_setup(setup_dict, setup_name, start_date, end_date, show='no', save_path='', add_stats=''):\r
-    dframe_arr, stats_arr, dframe_latest_arr = generate_dframe_arr_and_stats_of_tests_per_setup(end_date, setup_name,\r
-                                                                                                setup_dict)\r
+def create_all_data_per_setup(setup_dict, setup_name, start_date, end_date, show='no', save_path='', add_stats='',\r
+                              detailed_test_stats=''):\r
+    dframe_arr, stats_arr, dframe_latest_arr, dframe_detailed = generate_dframe_arr_and_stats_of_tests_per_setup(\r
+        end_date, setup_name,\r
+        setup_dict)\r
+    if detailed_test_stats:\r
+        detailed_table = create_detailed_table(dframe_detailed, setup_name, save_path)\r
+    else:\r
+        detailed_table = []\r
     create_bar_plot_for_latest_runs_per_setup(dframe_latest_arr, setup_name, show=show, save_path=save_path)\r
     create_plot_for_dframe_arr(dframe_arr, setup_name, start_date, end_date, show, save_path)\r
     if add_stats:\r
         stats_arr = stats_arr.round(2)\r
         stats_arr.to_csv(os.path.join(save_path, setup_name + '_trend_stats.csv'))\r
     plt.close('all')\r
+    return detailed_table\r
+\r
+\r
+def create_detailed_table(dframe_arr_detailed, setup_name, save_path=''):\r
+    result = reduce(lambda x, y: pd.merge(x, y, on=('Build Id', 'Setup')), dframe_arr_detailed)\r
+    return result\r
 \r
 \r
-def create_all_data(ga_data, setup_names, start_date, end_date, save_path='', add_stats=''):\r
+# WARNING: if the file _all_stats.csv already exists, this script deletes it, to prevent overflowing of data\r
+#  since data is appended to the file\r
+def create_all_data(ga_data, setup_names, start_date, end_date, save_path='', add_stats='', detailed_test_stats=''):\r
+    total_detailed_data = []\r
+    if detailed_test_stats:\r
+        if os.path.exists(os.path.join(save_path, '_detailed_table.csv')):\r
+            os.remove(os.path.join(save_path, '_detailed_table.csv'))\r
     for setup_name in setup_names:\r
         if setup_name == 'trex11':\r
             continue\r
-        create_all_data_per_setup(ga_data[setup_name], setup_name, start_date, end_date, show='no', save_path=save_path,\r
-                                  add_stats=add_stats)\r
+        detailed_setup_data = create_all_data_per_setup(ga_data[setup_name], setup_name, start_date, end_date,\r
+                                                        show='no', save_path=save_path,\r
+                                                        add_stats=add_stats, detailed_test_stats=detailed_test_stats)\r
+        total_detailed_data.append(detailed_setup_data)\r
+    if detailed_test_stats:\r
+        total_detailed_dframe = pd.DataFrame().append(total_detailed_data)\r
+        total_detailed_dframe.to_csv(os.path.join(save_path, '_detailed_table.csv'))\r
index 35c3a3e..5f4cbfe 100755 (executable)
@@ -26,11 +26,9 @@ endif::backend-xhtml11[]
 .Setup Details
 [options='header',halign='center',cols="1,5"]
 |=================
-| Server: | UCSC-C240-M4SX
 | CPU:    | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
-| RAM:    | 65536 @ 2133 MHz
-| NICs:   | 2 x Intel Corporation Ethernet Controller X710 
-| OS:     | Fedora 18
+| NICs:   | 1 x 100G (2 interfaces) Mellanox ConnectX-4
+| OS:     | CentOS 7
 |=================
 
 image:images/trex07_latest_test_runs.png[title="trex07 test runs",align="left",width={p_width}, link="images/trex07_latest_test_runs.png"]
@@ -52,10 +50,8 @@ include::build/images/trex07_trend_stats.csv[]
 .Setup Details
 [options='header',halign='center',cols="1,5"]
 |=================
-| Server: | UCSC-C240-M4SX
 | CPU:    | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
-| RAM:    | 65536 @ 2133 MHz
-| NICs:   | 2 x Intel Corporation Ethernet Controller X710 
+| NICs:   | 2 x 40G (4 interfaces) Intel XL710 
 | OS:     | Fedora 18
 |=================
 
@@ -79,9 +75,7 @@ include::build/images/trex08_trend_stats.csv[]
 .Setup Details
 [options='header',halign='center',cols="1,5"]
 |=================
-| Server: | UCSC-C240-M4SX
 | CPU:    | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
-| RAM:    | 65536 @ 2133 MHz
 | NICs:   | 2x10G (X710) (8 interfaces) 
 | OS:     | Fedora 18
 |=================
@@ -105,11 +99,12 @@ include::build/images/trex09_trend_stats.csv[]
 //////////////////////////////////////////////////////////
 == Setup: TRex11
 .Setup Details
-[options='header',halign='center']
-|====================================================================================================================
-|Name |OS |NICs |Routers 
-| trex11 | Fedora 18| 2x10G (X710) (8 interfaces), 1x10G (2 interfaces), 1x1G (4 interfaces) | Loopback on X710 + ASA 5520 + ASA 5512 + ASA 5585-ssp10
-|====================================================================================================================
+[options='header',halign='center',cols="1,5"]
+|=================
+| CPU:    | 2 x Intel(R) Xeon(R) CPU E5-2667 v3 @ 3.20GHz
+| NICs:   | 1x40G (2 interfaces) Cisco VIC
+| OS:     | Ubuntu 14
+|=================
 
 image:images/trex11_latest_test_runs.png[title="trex11 test runs",align="left",width={p_width}, link="images/trex11_latest_test_runs.png"]
  
@@ -132,8 +127,7 @@ include::build/images/trex11_trend_stats.csv[]
 [options='header',halign='center',cols="1,5"]
 |=================
 | CPU:    | 2 x Intel(R) Xeon(R) CPU E5-2650 0 @ 2.00GHz
-| RAM:    | 31 Gib
-| NICs:   | 2x10G (X710) (4 interfaces) 
+| NICs:   | 2x10G (4 interfaces) Intel 82599EB
 | OS:     | Fedora 18
 |=================
 
index 3fdc2be..54975d0 100755 (executable)
@@ -251,6 +251,7 @@ def scansize(self):
 def options(opt):
     opt.add_option('--exe', action='store_true', default=False, help='Execute the program after it is compiled')
     opt.add_option('--performance', action='store_true', help='Build a performance report based on google analytics')
+    opt.add_option('--performance-detailed',action='store_true',help='print detailed test results (date,time, build id and results) to csv file named _detailed_table.csv.')
 
 def configure(conf):
     search_path = '~/.local/bin /usr/local/bin/ /usr/bin'
@@ -891,7 +892,10 @@ def build_cp(bld,dir,root,callback):
 def create_analytic_report(task):
     try:
         import AnalyticsWebReport as analytics
-        analytics.main(verbose = Logs.verbose)
+        if task.generator.bld.options.performance_detailed:
+            analytics.main(verbose = Logs.verbose,detailed_test_stats='yes')
+        else:
+            analytics.main(verbose = Logs.verbose)
     except Exception as e:
         raise Exception('Error importing or using AnalyticsWebReport script: %s' % e)
 
@@ -921,7 +925,7 @@ def build(bld):
         bld(rule=my_copy, target=x)
         bld.add_group() 
 
-    if bld.options.performance:
+    if bld.options.performance or bld.options.performance_detailed:
         bld(rule=create_analytic_report)
         bld.add_group()
         bld(rule=convert_to_html_toc_book, source='trex_analytics.asciidoc waf.css', target='trex_analytics.html',scan=ascii_doc_scan);