regression: stl updates
authorYaroslav Brustinov <[email protected]>
Wed, 9 Mar 2016 12:05:08 +0000 (14:05 +0200)
committerYaroslav Brustinov <[email protected]>
Wed, 9 Mar 2016 12:05:08 +0000 (14:05 +0200)
32 files changed:
scripts/automation/regression/functional_tests/config.yaml [new file with mode: 0644]
scripts/automation/regression/functional_tests/functional_general_test.py [new file with mode: 0755]
scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap [new file with mode: 0644]
scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap [new file with mode: 0644]
scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap [new file with mode: 0644]
scripts/automation/regression/functional_tests/golden/udp_590.cap [new file with mode: 0644]
scripts/automation/regression/functional_tests/hltapi_stream_builder_test.py [new file with mode: 0755]
scripts/automation/regression/functional_tests/misc_methods_test.py [new file with mode: 0755]
scripts/automation/regression/functional_tests/pkt_bld_general_test.py [new file with mode: 0755]
scripts/automation/regression/functional_tests/platform_cmd_cache_test.py [new file with mode: 0755]
scripts/automation/regression/functional_tests/platform_cmd_link_test.py [new file with mode: 0755]
scripts/automation/regression/functional_tests/platform_device_cfg_test.py [new file with mode: 0755]
scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py [new file with mode: 0755]
scripts/automation/regression/functional_tests/platform_if_manager_test.py [new file with mode: 0755]
scripts/automation/regression/functional_tests/platform_if_obj_test.py [new file with mode: 0755]
scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py [new file with mode: 0644]
scripts/automation/regression/functional_tests/stl_basic_tests.py [new file with mode: 0644]
scripts/automation/regression/stateful_tests/tests_exceptions.py [new file with mode: 0755]
scripts/automation/regression/stateful_tests/trex_general_test.py [new file with mode: 0755]
scripts/automation/regression/stateful_tests/trex_imix_test.py [new file with mode: 0755]
scripts/automation/regression/stateful_tests/trex_ipv6_test.py [new file with mode: 0755]
scripts/automation/regression/stateful_tests/trex_nat_test.py [new file with mode: 0755]
scripts/automation/regression/stateful_tests/trex_nbar_test.py [new file with mode: 0755]
scripts/automation/regression/stateful_tests/trex_rx_test.py [new file with mode: 0755]
scripts/automation/regression/stateless_tests/stl_examples_test.py [new file with mode: 0755]
scripts/automation/regression/stateless_tests/stl_general_test.py [new file with mode: 0644]
scripts/external_libs/ansi2html/LICENSE [new file with mode: 0755]
scripts/external_libs/ansi2html/README.rst [new file with mode: 0755]
scripts/external_libs/ansi2html/ansi2html/__init__.py [new file with mode: 0755]
scripts/external_libs/ansi2html/ansi2html/converter.py [new file with mode: 0755]
scripts/external_libs/ansi2html/ansi2html/style.py [new file with mode: 0755]
scripts/external_libs/ansi2html/ansi2html/util.py [new file with mode: 0755]

diff --git a/scripts/automation/regression/functional_tests/config.yaml b/scripts/automation/regression/functional_tests/config.yaml
new file mode 100644 (file)
index 0000000..4f4c7c4
--- /dev/null
@@ -0,0 +1,74 @@
+################################################################
+####         T-Rex nightly test configuration file          ####
+################################################################
+
+
+### T-Rex configuration:
+# hostname       - can be DNS name or IP for the TRex machine for ssh to the box
+# password       - root password for TRex machine
+# is_dual        - should the TRex inject with -p ?
+# version_path   - path to the t-rex version and executable
+# cores          - how many cores should be used
+# latency        - rate of latency packets injected by the TRex
+
+### Router configuration:
+# hostname       - the router hostname as apears in ______# cli prefix
+# ip_address     - the router's ip that can be used to communicate with
+# image          - the desired imaged wished to be loaded as the router's running config
+# line_password  - router password when access via Telent
+# en_password    - router password when changing to "enable" mode
+# interfaces     - an array of client-server pairs, representing the interfaces configurations of the router
+# configurations - an array of configurations that could possibly loaded into the router during the test. 
+#                  The "clean" configuration is a mandatory configuration the router will load with to run the basic test bench
+
+### TFTP configuration:
+# hostname       - the tftp hostname
+# ip_address     - the tftp's ip address
+# images_path    - the tftp's relative path in which the router's images are located
+
+### Test_misc configuration:
+# expected_bw    - the "golden" bandwidth (in Gbps) results planned on receiving from the test
+
+trex:
+  hostname       : hostname
+  password       : root password
+  version_path   : not used
+  cores          : 1
+
+router:
+  model          : device model
+  hostname       : device hostname
+  ip_address     : device ip 
+  image          : device image name
+  line_password  : telnet pass
+  en_password    : enable pass
+  mgmt_interface : GigabitEthernet0/0/0
+  clean_config   : path to clean_config file
+  intf_masking   : 255.255.255.0
+  ipv6_mask      : 64
+  interfaces     :
+    - client : 
+        name          : GigabitEthernet0/0/1
+        src_mac_addr  : 0000.0001.0000
+        dest_mac_addr : 0000.1000.0000
+      server : 
+        name          : GigabitEthernet0/0/2
+        src_mac_addr  : 0000.0002.0000
+        dest_mac_addr : 0000.2000.0000
+      vrf_name      :   null
+    - client : 
+        name          : GigabitEthernet0/0/3
+        src_mac_addr  : 0000.0003.0000
+        dest_mac_addr : 0000.3000.0000
+      server : 
+        name          : GigabitEthernet0/0/4
+        src_mac_addr  : 0000.0004.0000
+        dest_mac_addr : 0000.4000.0000
+      vrf_name      : dup  
+    
+
+tftp:
+  hostname       : tftp hostname
+  ip_address     : tftp ip
+  root_dir       : tftp root dir
+  images_path    : path related to root dir
diff --git a/scripts/automation/regression/functional_tests/functional_general_test.py b/scripts/automation/regression/functional_tests/functional_general_test.py
new file mode 100755 (executable)
index 0000000..525b58d
--- /dev/null
@@ -0,0 +1,22 @@
+#!/router/bin/python
+
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+
+
+class CGeneralFunctional_Test(object): 
+    def __init__(self):
+        pass
+
+
+    def setUp(self):
+        pass
+
+
+    def tearDown(self):
+        pass
+
+if __name__ == "__main__":
+    pass
diff --git a/scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap
new file mode 100644 (file)
index 0000000..6ca3229
Binary files /dev/null and b/scripts/automation/regression/functional_tests/golden/basic_imix_golden.cap differ
diff --git a/scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap
new file mode 100644 (file)
index 0000000..43ae236
Binary files /dev/null and b/scripts/automation/regression/functional_tests/golden/basic_imix_vm_golden.cap differ
diff --git a/scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap b/scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap
new file mode 100644 (file)
index 0000000..7d5e7ec
Binary files /dev/null and b/scripts/automation/regression/functional_tests/golden/basic_tuple_gen_golden.cap differ
diff --git a/scripts/automation/regression/functional_tests/golden/udp_590.cap b/scripts/automation/regression/functional_tests/golden/udp_590.cap
new file mode 100644 (file)
index 0000000..29302f2
Binary files /dev/null and b/scripts/automation/regression/functional_tests/golden/udp_590.cap differ
diff --git a/scripts/automation/regression/functional_tests/hltapi_stream_builder_test.py b/scripts/automation/regression/functional_tests/hltapi_stream_builder_test.py
new file mode 100755 (executable)
index 0000000..c6b477a
--- /dev/null
@@ -0,0 +1,629 @@
+#!/router/bin/python
+
+import os
+import unittest
+from trex_stl_lib.trex_stl_hltapi import STLHltStream
+from trex_stl_lib.trex_stl_types import validate_type
+from nose.plugins.attrib import attr
+from nose.tools import nottest
+
+def compare_yamls(yaml1, yaml2):
+    validate_type('yaml1', yaml1, str)
+    validate_type('yaml2', yaml2, str)
+    i = 0
+    for line1, line2 in zip(yaml1.strip().split('\n'), yaml2.strip().split('\n')):
+        i += 1
+        assert line1 == line2, 'yamls are not equal starting from line %s:\n%s\n    Golden    <->    Generated\n%s' % (i, line1.strip(), line2.strip())
+
+# TODO: move the tests to compare pcaps, not yamls
+@nottest
+class CTRexHltApi_Test(unittest.TestCase):
+    ''' Checks correct HLTAPI creation of packet/VM '''
+
+    def setUp(self):
+        self.golden_yaml = None
+        self.test_yaml = None
+
+    def tearDown(self):
+        compare_yamls(self.golden_yaml, self.test_yaml)
+
+    # Eth/IP/TCP, all values default, no VM instructions + test MACs correction
+    def test_hlt_basic(self):
+        STLHltStream(mac_src = 'a0:00:01:::01', mac_dst = '0d 00 01 00 00 01',
+                     mac_src2 = '{00 b0 01 00 00 01}', mac_dst2 = 'd0.00.01.00.00.01')
+        with self.assertRaises(Exception):
+            STLHltStream(mac_src2 = '00:00:00:00:00:0k')
+        with self.assertRaises(Exception):
+            STLHltStream(mac_dst2 = '100:00:00:00:00:00')
+        # wrong encap
+        with self.assertRaises(Exception):
+            STLHltStream(l2_encap = 'ethernet_sdfgsdfg')
+        # all default values
+        test_stream = STLHltStream()
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        self.golden_yaml = '''
+- stream:
+    action_count: 0
+    enabled: true
+    flags: 3
+    isg: 0.0
+    mode:
+      percentage: 10.0
+      type: continuous
+    packet:
+      binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEAGusUAAAAAwAAAAQQAAFAAAAABAAAAAVAAD+U1/QAAISEhISEhISEhIQ==
+      meta: ''
+    flow_stats:
+      enabled: false
+    self_start: true
+    vm:
+      instructions: []
+      split_by_var: ''
+'''
+
+    # Eth/IP/TCP, test MAC fields VM, wait for masking of variables for MAC
+    @nottest
+    def test_macs_vm(self):
+        test_stream = STLHltStream(name = 'stream-0', )
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        self.golden_yaml = '''
+TBD
+'''
+
+
+    # Eth/IP/TCP, ip src and dest is changed by VM
+    def test_ip_ranges(self):
+        # running on single core not implemented yet
+        with self.assertRaises(Exception):
+            test_stream = STLHltStream(split_by_cores = 'single',
+                                       ip_src_addr = '192.168.1.1',
+                                       ip_src_mode = 'increment',
+                                       ip_src_count = 5,)
+        # wrong type
+        with self.assertRaises(Exception):
+            test_stream = STLHltStream(split_by_cores = 12345,
+                                       ip_src_addr = '192.168.1.1',
+                                       ip_src_mode = 'increment',
+                                       ip_src_count = 5,)
+
+        test_stream = STLHltStream(split_by_cores = 'duplicate',
+                                   ip_src_addr = '192.168.1.1',
+                                   ip_src_mode = 'increment',
+                                   ip_src_count = 5,
+                                   ip_dst_addr = '5.5.5.5',
+                                   ip_dst_count = 2,
+                                   ip_dst_mode = 'random',
+                                   name = 'test_ip_ranges',
+                                   rate_pps = 1)
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        self.golden_yaml = '''
+- name: test_ip_ranges
+  stream:
+    action_count: 0
+    enabled: true
+    flags: 3
+    isg: 0.0
+    mode:
+      pps: 1.0
+      type: continuous
+    packet:
+      binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEAGrxPAqAEBBQUFBQQAAFAAAAABAAAAAVAAD+UqSwAAISEhISEhISEhIQ==
+      meta: ''
+    flow_stats:
+      enabled: false
+    self_start: true
+    vm:
+      instructions:
+      - init_value: 0
+        max_value: 4
+        min_value: 0
+        name: inc_4_4_1
+        op: inc
+        size: 4
+        step: 1
+        type: flow_var
+      - add_value: 3232235777
+        is_big_endian: true
+        name: inc_4_4_1
+        pkt_offset: 26
+        type: write_flow_var
+      - init_value: 0
+        max_value: 4294967295
+        min_value: 0
+        name: ip_dst_random
+        op: random
+        size: 4
+        step: 1
+        type: flow_var
+      - add_value: 0
+        is_big_endian: true
+        name: ip_dst_random
+        pkt_offset: 30
+        type: write_flow_var
+      - pkt_offset: 14
+        type: fix_checksum_ipv4
+      split_by_var: ''
+'''
+
+    # Eth / IP / TCP, tcp ports are changed by VM
+    def test_tcp_ranges(self):
+        test_stream = STLHltStream(tcp_src_port_mode = 'decrement',
+                                   tcp_src_port_count = 10,
+                                   tcp_dst_port_mode = 'random',
+                                   tcp_dst_port_count = 10,
+                                   tcp_dst_port = 1234,
+                                   name = 'test_tcp_ranges',
+                                   rate_pps = '2')
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        self.golden_yaml = '''
+- name: test_tcp_ranges
+  stream:
+    action_count: 0
+    enabled: true
+    flags: 3
+    isg: 0.0
+    mode:
+      pps: 2.0
+      type: continuous
+    packet:
+      binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEAGusUAAAAAwAAAAQQABNIAAAABAAAAAVAAD+UxewAAISEhISEhISEhIQ==
+      meta: ''
+    flow_stats:
+      enabled: false
+    self_start: true
+    vm:
+      instructions:
+      - init_value: 9
+        max_value: 9
+        min_value: 0
+        name: dec_2_9_1
+        op: dec
+        size: 2
+        step: 1
+        type: flow_var
+      - add_value: 1015
+        is_big_endian: true
+        name: dec_2_9_1
+        pkt_offset: 34
+        type: write_flow_var
+      - init_value: 0
+        max_value: 65535
+        min_value: 0
+        name: tcp_dst_random
+        op: random
+        size: 2
+        step: 1
+        type: flow_var
+      - add_value: 0
+        is_big_endian: true
+        name: tcp_dst_random
+        pkt_offset: 36
+        type: write_flow_var
+      - pkt_offset: 14
+        type: fix_checksum_ipv4
+      split_by_var: dec_2_9_1
+'''
+
+    # Eth / IP / UDP, udp ports are changed by VM
+    def test_udp_ranges(self):
+        # UDP is not set, expecting ignore of wrong UDP arguments
+        STLHltStream(udp_src_port_mode = 'qwerqwer',
+                     udp_src_port_count = 'weqwer',
+                     udp_src_port = 'qwerqwer',
+                     udp_dst_port_mode = 'qwerqwe',
+                     udp_dst_port_count = 'sfgsdfg',
+                     udp_dst_port = 'sdfgsdfg')
+        # UDP is set, expecting fail due to wrong UDP arguments
+        with self.assertRaises(Exception):
+            STLHltStream(l4_protocol = 'udp',
+                         udp_src_port_mode = 'qwerqwer',
+                         udp_src_port_count = 'weqwer',
+                         udp_src_port = 'qwerqwer',
+                         udp_dst_port_mode = 'qwerqwe',
+                         udp_dst_port_count = 'sfgsdfg',
+                         udp_dst_port = 'sdfgsdfg')
+        # generate it already with correct arguments
+        test_stream = STLHltStream(l4_protocol = 'udp',
+                                   udp_src_port_mode = 'decrement',
+                                   udp_src_port_count = 10,
+                                   udp_src_port = 1234,
+                                   udp_dst_port_mode = 'increment',
+                                   udp_dst_port_count = 10,
+                                   udp_dst_port = 1234,
+                                   name = 'test_udp_ranges',
+                                   rate_percent = 20,)
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        self.golden_yaml = '''
+- name: test_udp_ranges
+  stream:
+    action_count: 0
+    enabled: true
+    flags: 3
+    isg: 0.0
+    mode:
+      percentage: 20.0
+      type: continuous
+    packet:
+      binary: AAAAAAAAAAABAAABCABFAAAyAAAAAEARuroAAAAAwAAAAQTSBNIAHsmgISEhISEhISEhISEhISEhISEhISEhIQ==
+      meta: ''
+    flow_stats:
+      enabled: false
+    self_start: true
+    vm:
+      instructions:
+      - init_value: 9
+        max_value: 9
+        min_value: 0
+        name: dec_2_9_1
+        op: dec
+        size: 2
+        step: 1
+        type: flow_var
+      - add_value: 1225
+        is_big_endian: true
+        name: dec_2_9_1
+        pkt_offset: 34
+        type: write_flow_var
+      - init_value: 0
+        max_value: 9
+        min_value: 0
+        name: inc_2_9_1
+        op: inc
+        size: 2
+        step: 1
+        type: flow_var
+      - add_value: 1234
+        is_big_endian: true
+        name: inc_2_9_1
+        pkt_offset: 36
+        type: write_flow_var
+      - pkt_offset: 14
+        type: fix_checksum_ipv4
+      split_by_var: dec_2_9_1
+'''
+
+    # Eth/IP/TCP, packet length is changed in VM by frame_size
+    def test_pkt_len_by_framesize(self):
+        # just check errors, no compare to golden
+        STLHltStream(length_mode = 'increment',
+                     frame_size_min = 100,
+                     frame_size_max = 3000)
+        test_stream = STLHltStream(length_mode = 'decrement',
+                                   frame_size_min = 100,
+                                   frame_size_max = 3000,
+                                   name = 'test_pkt_len_by_framesize',
+                                   rate_bps = 1000)
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        self.golden_yaml = '''
+- name: test_pkt_len_by_framesize
+  stream:
+    action_count: 0
+    enabled: true
+    flags: 3
+    isg: 0.0
+    mode:
+      bps_L2: 1000.0
+      type: continuous
+    packet:
+      binary: AAAAAAAAAAABAAABCABFAAuqAAAAAEAGr00AAAAAwAAAAQQAAFAAAAABAAAAAVAAD+UwiwAAISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEh
+      meta: ''
+    flow_stats:
+      enabled: false
+    self_start: true
+    vm:
+      instructions:
+      - init_value: 3000
+        max_value: 3000
+        min_value: 100
+        name: pkt_len
+        op: dec
+        size: 2
+        step: 1
+        type: flow_var
+      - name: pkt_len
+        type: trim_pkt_size
+      - add_value: -14
+        is_big_endian: true
+        name: pkt_len
+        pkt_offset: 16
+        type: write_flow_var
+      - pkt_offset: 14
+        type: fix_checksum_ipv4
+      split_by_var: pkt_len
+'''
+
+    # Eth/IP/UDP, packet length is changed in VM by l3_length
+    def test_pkt_len_by_l3length(self):
+        test_stream = STLHltStream(l4_protocol = 'udp',
+                                   length_mode = 'random',
+                                   l3_length_min = 100,
+                                   l3_length_max = 400,
+                                   name = 'test_pkt_len_by_l3length')
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        self.golden_yaml = '''
+- name: test_pkt_len_by_l3length
+  stream:
+    action_count: 0
+    enabled: true
+    flags: 3
+    isg: 0.0
+    mode:
+      percentage: 10.0
+      type: continuous
+    packet:
+      binary: AAAAAAAAAAABAAABCABFAAGQAAAAAEARuVwAAAAAwAAAAQQAAFABfCaTISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEh
+      meta: ''
+    flow_stats:
+      enabled: false
+    self_start: true
+    vm:
+      instructions:
+      - init_value: 114
+        max_value: 414
+        min_value: 114
+        name: pkt_len
+        op: random
+        size: 2
+        step: 1
+        type: flow_var
+      - name: pkt_len
+        type: trim_pkt_size
+      - add_value: -14
+        is_big_endian: true
+        name: pkt_len
+        pkt_offset: 16
+        type: write_flow_var
+      - add_value: -34
+        is_big_endian: true
+        name: pkt_len
+        pkt_offset: 38
+        type: write_flow_var
+      - pkt_offset: 14
+        type: fix_checksum_ipv4
+      split_by_var: ''
+'''
+
+    # Eth/IP/TCP, with vlan, no VM
+    def test_vlan_basic(self):
+        with self.assertRaises(Exception):
+            STLHltStream(l2_encap = 'ethernet_ii',
+                         vlan_id = 'sdfgsdgf')
+        test_stream = STLHltStream(l2_encap = 'ethernet_ii')
+        assert ':802.1Q:' not in test_stream.get_pkt_type(), 'Default packet should not include dot1q'
+
+        test_stream = STLHltStream(name = 'test_vlan_basic', l2_encap = 'ethernet_ii_vlan')
+        assert ':802.1Q:' in test_stream.get_pkt_type(), 'No dot1q in packet with encap ethernet_ii_vlan'
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        self.golden_yaml = '''
+- name: test_vlan_basic
+  stream:
+    action_count: 0
+    enabled: true
+    flags: 3
+    isg: 0.0
+    mode:
+      percentage: 10.0
+      type: continuous
+    packet:
+      binary: AAAAAAAAAAABAAABgQAwAAgARQAALgAAAABABrrJAAAAAMAAAAEEAABQAAAAAQAAAAFQAA/leEMAACEhISEhIQ==
+      meta: ''
+    flow_stats:
+      enabled: false
+    self_start: true
+    vm:
+      instructions: []
+      split_by_var: ''
+'''
+
+    # Eth/IP/TCP, with 4 vlan
+    def test_vlan_multiple(self):
+        # default frame size should be not enough
+        with self.assertRaises(Exception):
+            STLHltStream(vlan_id = [1, 2, 3, 4])
+        test_stream = STLHltStream(name = 'test_vlan_multiple', frame_size = 100,
+                                   vlan_id = [1, 2, 3, 4], # can be either array or string separated by spaces
+                                   vlan_protocol_tag_id = '8100 0x8100')
+        pkt_layers = test_stream.get_pkt_type()
+        assert '802.1Q:' * 4 in pkt_layers, 'No four dot1q layers in packet: %s' % pkt_layers
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        self.golden_yaml = '''
+- name: test_vlan_multiple
+  stream:
+    action_count: 0
+    enabled: true
+    flags: 3
+    isg: 0.0
+    mode:
+      percentage: 10.0
+      type: continuous
+    packet:
+      binary: AAAAAAAAAAABAAABgQAwAYEAMAKBADADgQAwBAgARQAARgAAAABABrqxAAAAAMAAAAEEAABQAAAAAQAAAAFQAA/l6p0AACEhISEhISEhISEhISEhISEhISEhISEhISEhISEhIQ==
+      meta: ''
+    flow_stats:
+      enabled: false
+    self_start: true
+    vm:
+      instructions: []
+      split_by_var: ''
+'''
+
+    # Eth/IP/TCP, with 5 vlans and VMs on vlan_id
+    def test_vlan_vm(self):
+        test_stream = STLHltStream(name = 'test_vlan_vm', frame_size = 100,
+                                   vlan_id = '1 2 1000 4 5',                          # 5 vlans
+                                   vlan_id_mode = 'increment fixed decrement random', # 5th vlan will be default fixed
+                                   vlan_id_step = 2,                                  # 1st vlan step will be 2, others - default 1
+                                   vlan_id_count = [4, 1, 10],                        # 4th independent on count, 5th will be fixed
+                                   )
+        pkt_layers = test_stream.get_pkt_type()
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        assert '802.1Q:' * 5 in pkt_layers, 'No five dot1q layers in packet: %s' % pkt_layers
+        self.golden_yaml = '''
+- name: test_vlan_vm
+  stream:
+    action_count: 0
+    enabled: true
+    flags: 3
+    isg: 0.0
+    mode:
+      percentage: 10.0
+      type: continuous
+    packet:
+      binary: AAAAAAAAAAABAAABgQAwAYEAMAKBADPogQAwBIEAMAUIAEUAAEIAAAAAQAa6tQAAAADAAAABBAAAUAAAAAEAAAABUAAP5SzkAAAhISEhISEhISEhISEhISEhISEhISEhISEhIQ==
+      meta: ''
+    flow_stats:
+      enabled: false
+    self_start: true
+    vm:
+      instructions:
+      - init_value: 0
+        max_value: 6
+        min_value: 0
+        name: dec_2_3_2
+        op: inc
+        size: 2
+        step: 2
+        type: flow_var
+      - add_value: 1
+        is_big_endian: true
+        mask: 4095
+        name: dec_2_3_2
+        pkt_cast_size: 2
+        pkt_offset: 14
+        shift: 0
+        type: write_mask_flow_var
+      - init_value: 9
+        max_value: 9
+        min_value: 0
+        name: dec_2_9_1
+        op: dec
+        size: 2
+        step: 1
+        type: flow_var
+      - add_value: 991
+        is_big_endian: true
+        mask: 4095
+        name: dec_2_9_1
+        pkt_cast_size: 2
+        pkt_offset: 22
+        shift: 0
+        type: write_mask_flow_var
+      - init_value: 0
+        max_value: 65535
+        min_value: 0
+        name: vlan_id_random
+        op: random
+        size: 2
+        step: 1
+        type: flow_var
+      - add_value: 0
+        is_big_endian: true
+        mask: 4095
+        name: vlan_id_random
+        pkt_cast_size: 2
+        pkt_offset: 26
+        shift: 0
+        type: write_mask_flow_var
+      split_by_var: dec_2_9_1
+'''
+
+
+    # Eth/IPv6/TCP, no VM
+    def test_ipv6_basic(self):
+        # default frame size should be not enough
+        with self.assertRaises(Exception):
+            STLHltStream(l3_protocol = 'ipv6')
+        # error should not affect
+        STLHltStream(ipv6_src_addr = 'asdfasdfasgasdf')
+        # error should affect
+        with self.assertRaises(Exception):
+            STLHltStream(l3_protocol = 'ipv6', ipv6_src_addr = 'asdfasdfasgasdf')
+        test_stream = STLHltStream(name = 'test_ipv6_basic', l3_protocol = 'ipv6', length_mode = 'fixed', l3_length = 150, )
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        self.golden_yaml = '''
+- name: test_ipv6_basic
+  stream:
+    action_count: 0
+    enabled: true
+    flags: 3
+    isg: 0.0
+    mode:
+      percentage: 10.0
+      type: continuous
+    packet:
+      binary: AAAAAAAAAAABAAABht1gAAAAAG4GQP6AAAAAAAAAAAAAAAAAABL+gAAAAAAAAAAAAAAAAAAiBAAAUAAAAAEAAAABUAAP5ctLAAAhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISE=
+      meta: ''
+    flow_stats:
+      enabled: false
+    self_start: true
+    vm:
+      instructions: []
+      split_by_var: ''
+'''
+
+    # Eth/IPv6/UDP, VM on ipv6 fields
+    def test_ipv6_src_dst_ranges(self):
+        test_stream = STLHltStream(name = 'test_ipv6_src_dst_ranges', l3_protocol = 'ipv6', l3_length = 150, l4_protocol = 'udp',
+                                   ipv6_src_addr = '1111:2222:3333:4444:5555:6666:7777:8888',
+                                   ipv6_dst_addr = '1111:1111:1111:1111:1111:1111:1111:1111',
+                                   ipv6_src_mode = 'increment', ipv6_src_step = 5, ipv6_src_count = 10,
+                                   ipv6_dst_mode = 'decrement', ipv6_dst_step = '1111:1111:1111:1111:1111:1111:0000:0011', ipv6_dst_count = 150,
+                                   )
+        self.test_yaml = test_stream.dump_to_yaml(self.yaml_save_location())
+        self.golden_yaml = '''
+- name: test_ipv6_src_dst_ranges
+  stream:
+    action_count: 0
+    enabled: true
+    flags: 3
+    isg: 0.0
+    mode:
+      percentage: 10.0
+      type: continuous
+    packet:
+      binary: AAAAAAAAAAABAAABht1gAAAAAG4RQBERIiIzM0REVVVmZnd3iIgRERERERERERERERERERERBAAAUABucjohISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISEhISE=
+      meta: ''
+    flow_stats:
+      enabled: false
+    self_start: true
+    vm:
+      instructions:
+      - init_value: 0
+        max_value: 45
+        min_value: 0
+        name: inc_4_9_5
+        op: inc
+        size: 4
+        step: 5
+        type: flow_var
+      - add_value: 2004322440
+        is_big_endian: true
+        name: inc_4_9_5
+        pkt_offset: 34
+        type: write_flow_var
+      - init_value: 2533
+        max_value: 2533
+        min_value: 0
+        name: dec_4_149_17
+        op: dec
+        size: 4
+        step: 17
+        type: flow_var
+      - add_value: 286328620
+        is_big_endian: true
+        name: dec_4_149_17
+        pkt_offset: 50
+        type: write_flow_var
+      split_by_var: dec_4_149_17
+'''
+
+
+
+
+
+    def yaml_save_location(self):
+        #return os.devnull
+        # debug/deveopment, comment line above
+        return '/tmp/%s.yaml' % self._testMethodName
+
+
diff --git a/scripts/automation/regression/functional_tests/misc_methods_test.py b/scripts/automation/regression/functional_tests/misc_methods_test.py
new file mode 100755 (executable)
index 0000000..096f86d
--- /dev/null
@@ -0,0 +1,61 @@
+#!/router/bin/python
+
+import functional_general_test
+import misc_methods
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+
+
+class MiscMethods_Test(functional_general_test.CGeneralFunctional_Test):
+
+    def setUp(self):
+        self.ipv4_gen = misc_methods.get_network_addr()
+        self.ipv6_gen = misc_methods.get_network_addr(ip_type = 'ipv6')
+        pass
+
+    def test_ipv4_gen(self):
+        for i in range(1, 255):
+            assert_equal( next(self.ipv4_gen), [".".join( map(str, [1, 1, i, 0])), '255.255.255.0'] )
+
+    def test_ipv6_gen(self):
+        tmp_ipv6_addr = ['2001', 'DB8', 0, '2222', 0, 0, 0, 0]
+        for i in range(0, 255):
+            tmp_ipv6_addr[2] = hex(i)[2:]
+            assert_equal( next(self.ipv6_gen), ":".join( map(str, tmp_ipv6_addr)) )
+
+    def test_get_ipv4_client_addr(self):
+        tmp_ipv4_addr = next(self.ipv4_gen)[0]
+        assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv4_addr), '1.1.1.1')
+        assert_raises (ValueError, misc_methods.get_single_net_client_addr, tmp_ipv4_addr, {'3' : 255} )
+    
+    def test_get_ipv6_client_addr(self):
+        tmp_ipv6_addr = next(self.ipv6_gen)
+        assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'7' : 1}, ip_type = 'ipv6'), '2001:DB8:0:2222:0:0:0:1')    
+        assert_equal ( misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'7' : 2}, ip_type = 'ipv6'), '2001:DB8:0:2222:0:0:0:2')    
+        assert_raises (ValueError, misc_methods.get_single_net_client_addr, tmp_ipv6_addr, {'7' : 70000} )
+        
+
+    @raises(ValueError)
+    def test_ipv4_client_addr_exception(self):
+        tmp_ipv4_addr = next(self.ipv4_gen)[0]
+        misc_methods.get_single_net_client_addr(tmp_ipv4_addr, {'4' : 1})
+
+    @raises(ValueError)
+    def test_ipv6_client_addr_exception(self):
+        tmp_ipv6_addr = next(self.ipv6_gen)
+        misc_methods.get_single_net_client_addr(tmp_ipv6_addr, {'8' : 1}, ip_type = 'ipv6')
+
+    @raises(StopIteration)
+    def test_gen_ipv4_to_limit (self):
+        while(True):
+            next(self.ipv4_gen)
+
+    @raises(StopIteration)
+    def test_gen_ipv6_to_limit (self):
+        while(True):
+            next(self.ipv6_gen)
+
+    def tearDown(self):
+        pass
diff --git a/scripts/automation/regression/functional_tests/pkt_bld_general_test.py b/scripts/automation/regression/functional_tests/pkt_bld_general_test.py
new file mode 100755 (executable)
index 0000000..5f89eaf
--- /dev/null
@@ -0,0 +1,28 @@
+#!/router/bin/python
+
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+import sys
+import outer_packages
+
+
+class CGeneralPktBld_Test(object): 
+    def __init__(self):
+        pass
+
+    @staticmethod
+    def print_packet(pkt_obj):
+        print "\nGenerated packet:\n{}".format(repr(pkt_obj))
+
+
+    def setUp(self):
+        pass
+
+
+    def tearDown(self):
+        pass
+
+if __name__ == "__main__":
+    pass
diff --git a/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py b/scripts/automation/regression/functional_tests/platform_cmd_cache_test.py
new file mode 100755 (executable)
index 0000000..24ccf7a
--- /dev/null
@@ -0,0 +1,60 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CCommandCache_Test(functional_general_test.CGeneralFunctional_Test):
+
+    def setUp(self):
+       self.cache = CCommandCache()
+        self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/1')
+        self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/2')
+        self.cache.add('conf', "arp 1.1.1.1 0000.0001.0000 arpa")
+        self.cache.add('conf', "arp 1.1.2.1 0000.0002.0000 arpa")
+        self.cache.add('exec', "show ip nbar protocol-discovery stats packet-count")
+
+    def test_add(self):
+        assert_equal(self.cache.cache['IF'],
+            {'GigabitEthernet0/0/1' : ['ip nbar protocol-discovery'],
+             'GigabitEthernet0/0/2' : ['ip nbar protocol-discovery']
+            })
+        assert_equal(self.cache.cache['CONF'],
+            ["arp 1.1.1.1 0000.0001.0000 arpa",
+             "arp 1.1.2.1 0000.0002.0000 arpa"]
+            )
+        assert_equal(self.cache.cache['EXEC'],
+            ["show ip nbar protocol-discovery stats packet-count"])
+
+    def test_dump_config (self):
+        import sys
+        from StringIO import StringIO
+        saved_stdout = sys.stdout
+        try:
+            out = StringIO()
+            sys.stdout = out
+            self.cache.dump_config()
+            output = out.getvalue().strip()
+            assert_equal(output, 
+                "configure terminal\ninterface GigabitEthernet0/0/1\nip nbar protocol-discovery\ninterface GigabitEthernet0/0/2\nip nbar protocol-discovery\nexit\narp 1.1.1.1 0000.0001.0000 arpa\narp 1.1.2.1 0000.0002.0000 arpa\nexit\nshow ip nbar protocol-discovery stats packet-count"
+                )
+        finally:
+            sys.stdout = saved_stdout
+
+    def test_get_config_list (self):
+        assert_equal(self.cache.get_config_list(),
+            ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"]
+            )
+
+    def test_clear_cache (self):
+        self.cache.clear_cache()
+        assert_equal(self.cache.cache,
+            {"IF"   : {},
+             "CONF" : [],
+             "EXEC" : []}
+            )
+
+    def tearDown(self):
+        self.cache.clear_cache()
diff --git a/scripts/automation/regression/functional_tests/platform_cmd_link_test.py b/scripts/automation/regression/functional_tests/platform_cmd_link_test.py
new file mode 100755 (executable)
index 0000000..7a31815
--- /dev/null
@@ -0,0 +1,62 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CCommandLink_Test(functional_general_test.CGeneralFunctional_Test):
+
+    def setUp(self):
+        self.cache = CCommandCache()
+        self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/1')
+        self.cache.add('IF', "ip nbar protocol-discovery", 'GigabitEthernet0/0/2')
+        self.cache.add('conf', "arp 1.1.1.1 0000.0001.0000 arpa")
+        self.cache.add('conf', "arp 1.1.2.1 0000.0002.0000 arpa")
+        self.cache.add('exec', "show ip nbar protocol-discovery stats packet-count")
+        self.com_link = CCommandLink()
+
+    def test_transmit(self):
+        # test here future implemntatin of platform physical link
+        pass
+
+    def test_run_cached_command (self):
+        self.com_link.run_command([self.cache])
+
+        assert_equal (self.com_link.get_history(), 
+            ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"]
+            )
+
+        self.com_link.clear_history()
+        self.com_link.run_single_command(self.cache)
+        assert_equal (self.com_link.get_history(), 
+            ["configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count"]
+            )
+
+    def test_run_single_command(self):
+        self.com_link.run_single_command("show ip nbar protocol-discovery stats packet-count")
+        assert_equal (self.com_link.get_history(), 
+            ["show ip nbar protocol-discovery stats packet-count"]
+            )
+
+    def test_run_mixed_commands (self):
+        self.com_link.run_single_command("show ip nbar protocol-discovery stats packet-count")
+        self.com_link.run_command([self.cache])
+        self.com_link.run_command(["show ip interface brief"])
+
+        assert_equal (self.com_link.get_history(), 
+            ["show ip nbar protocol-discovery stats packet-count",
+             "configure terminal", "interface GigabitEthernet0/0/1", "ip nbar protocol-discovery", "interface GigabitEthernet0/0/2", "ip nbar protocol-discovery", "exit", "arp 1.1.1.1 0000.0001.0000 arpa", "arp 1.1.2.1 0000.0002.0000 arpa", "exit", "show ip nbar protocol-discovery stats packet-count",
+             "show ip interface brief"]
+            )
+
+    def test_clear_history (self):
+        self.com_link.run_command(["show ip interface brief"])
+        self.com_link.clear_history()
+        assert_equal (self.com_link.get_history(), [])
+
+    def tearDown(self):
+        self.cache.clear_cache()
+
+
diff --git a/scripts/automation/regression/functional_tests/platform_device_cfg_test.py b/scripts/automation/regression/functional_tests/platform_device_cfg_test.py
new file mode 100755 (executable)
index 0000000..3935a4c
--- /dev/null
@@ -0,0 +1,20 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CDeviceCfg_Test(functional_general_test.CGeneralFunctional_Test):
+
+    def setUp(self):
+       self.dev_cfg = CDeviceCfg('./functional_tests/config.yaml')
+
+    def test_get_interfaces_cfg(self):
+        assert_equal (self.dev_cfg.get_interfaces_cfg(), 
+               [{'client': {'src_mac_addr': '0000.0001.0000', 'name': 'GigabitEthernet0/0/1', 'dest_mac_addr': '0000.1000.0000'}, 'vrf_name': None, 'server': {'src_mac_addr': '0000.0002.0000', 'name': 'GigabitEthernet0/0/2', 'dest_mac_addr': '0000.2000.0000'}}, {'client': {'src_mac_addr': '0000.0003.0000', 'name': 'GigabitEthernet0/0/3', 'dest_mac_addr': '0000.3000.0000'}, 'vrf_name': 'dup', 'server': {'src_mac_addr': '0000.0004.0000', 'name': 'GigabitEthernet0/0/4', 'dest_mac_addr': '0000.4000.0000'}}]
+               )
+
+    def tearDown(self):
+        pass
diff --git a/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py b/scripts/automation/regression/functional_tests/platform_dual_if_obj_test.py
new file mode 100755 (executable)
index 0000000..ff54b9e
--- /dev/null
@@ -0,0 +1,31 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CDualIfObj_Test(functional_general_test.CGeneralFunctional_Test):
+
+    def setUp(self):
+       self.if_1   = CIfObj('gig0/0/1', '1.1.1.1', '2001:DB8:0:2222:0:0:0:1', '0000.0001.0000', '0000.0001.0000', IFType.Client)
+        self.if_2   = CIfObj('gig0/0/2', '1.1.2.1', '2001:DB8:1:2222:0:0:0:1', '0000.0002.0000', '0000.0002.0000', IFType.Server)
+        self.if_3   = CIfObj('gig0/0/3', '1.1.3.1', '2001:DB8:2:2222:0:0:0:1', '0000.0003.0000', '0000.0003.0000', IFType.Client)
+        self.if_4   = CIfObj('gig0/0/4', '1.1.4.1', '2001:DB8:3:2222:0:0:0:1', '0000.0004.0000', '0000.0004.0000', IFType.Server)
+        self.dual_1 = CDualIfObj(None, self.if_1, self.if_2)
+        self.dual_2 = CDualIfObj('dup', self.if_3, self.if_4)
+
+    def test_id_allocation(self):
+        assert (self.dual_1.get_id() < self.dual_2.get_id() < CDualIfObj._obj_id)
+
+    def test_get_vrf_name (self):
+        assert_equal ( self.dual_1.get_vrf_name() , None )
+        assert_equal ( self.dual_2.get_vrf_name() , 'dup' )
+
+    def test_is_duplicated (self):
+        assert_equal ( self.dual_1.is_duplicated() , False )
+        assert_equal ( self.dual_2.is_duplicated() , True )
+
+    def tearDown(self):
+        pass
\ No newline at end of file
diff --git a/scripts/automation/regression/functional_tests/platform_if_manager_test.py b/scripts/automation/regression/functional_tests/platform_if_manager_test.py
new file mode 100755 (executable)
index 0000000..b09e8d7
--- /dev/null
@@ -0,0 +1,40 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CIfManager_Test(functional_general_test.CGeneralFunctional_Test):
+
+    def setUp(self):
+        self.dev_cfg = CDeviceCfg('./functional_tests/config.yaml')
+        self.if_mng  = CIfManager()
+
+    # main testing method to check the entire class
+    def test_load_config (self):
+        self.if_mng.load_config(self.dev_cfg)
+
+        # check the number of items in each qeury
+        assert_equal( len(self.if_mng.get_if_list()), 4 )
+        assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client)), 2 )
+        assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client, is_duplicated = True)), 1 )
+        assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Client, is_duplicated = False)), 1 )
+        assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server)), 2 )
+        assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server, is_duplicated = True)), 1 )
+        assert_equal( len(self.if_mng.get_if_list(if_type = IFType.Server, is_duplicated = False)), 1 )
+        assert_equal( len(self.if_mng.get_duplicated_if()), 2 )
+        assert_equal( len(self.if_mng.get_dual_if_list()), 2 )
+
+        # check the classification with intf name
+        assert_equal( map(CIfObj.get_name, self.if_mng.get_if_list() ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2','GigabitEthernet0/0/3','GigabitEthernet0/0/4'] )
+        assert_equal( map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = True) ), ['GigabitEthernet0/0/3','GigabitEthernet0/0/4'] )
+        assert_equal( map(CIfObj.get_name, self.if_mng.get_if_list(is_duplicated = False) ), ['GigabitEthernet0/0/1','GigabitEthernet0/0/2'] )
+        assert_equal( map(CIfObj.get_name, self.if_mng.get_duplicated_if() ), ['GigabitEthernet0/0/3', 'GigabitEthernet0/0/4'] )
+
+        # check the classification with vrf name
+        assert_equal( map(CDualIfObj.get_vrf_name, self.if_mng.get_dual_if_list() ), [None, 'dup'] )
+
+    def tearDown(self):
+        pass
diff --git a/scripts/automation/regression/functional_tests/platform_if_obj_test.py b/scripts/automation/regression/functional_tests/platform_if_obj_test.py
new file mode 100755 (executable)
index 0000000..534d417
--- /dev/null
@@ -0,0 +1,49 @@
+#!/router/bin/python
+
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+
+
+class CIfObj_Test(functional_general_test.CGeneralFunctional_Test):
+    test_idx = 1
+
+    def setUp(self):
+        self.if_1 = CIfObj('gig0/0/1', '1.1.1.1', '2001:DB8:0:2222:0:0:0:1', '0000.0001.0000', '0000.0001.0000', IFType.Client)
+        self.if_2 = CIfObj('TenGig0/0/0', '1.1.2.1', '2001:DB8:1:2222:0:0:0:1', '0000.0002.0000', '0000.0002.0000', IFType.Server)
+        CIfObj_Test.test_idx += 1
+
+    def test_id_allocation(self):
+        assert (self.if_1.get_id() < self.if_2.get_id() < CIfObj._obj_id)
+
+    def test_isClient(self):
+        assert_equal (self.if_1.is_client(), True)
+
+    def test_isServer(self):
+        assert_equal (self.if_2.is_server(), True)
+
+    def test_get_name (self):
+        assert_equal (self.if_1.get_name(), 'gig0/0/1')
+        assert_equal (self.if_2.get_name(), 'TenGig0/0/0')
+
+    def test_get_src_mac_addr (self):
+        assert_equal (self.if_1.get_src_mac_addr(), '0000.0001.0000')
+
+    def test_get_dest_mac (self):
+        assert_equal (self.if_2.get_dest_mac(), '0000.0002.0000')
+
+    def test_get_ipv4_addr (self):
+        assert_equal (self.if_1.get_ipv4_addr(), '1.1.1.1' )
+        assert_equal (self.if_2.get_ipv4_addr(), '1.1.2.1' ) 
+
+    def test_get_ipv6_addr (self):
+        assert_equal (self.if_1.get_ipv6_addr(), '2001:DB8:0:2222:0:0:0:1' )
+        assert_equal (self.if_2.get_ipv6_addr(), '2001:DB8:1:2222:0:0:0:1' )
+
+    def test_get_type (self):
+        assert_equal (self.if_1.get_if_type(), IFType.Client)
+        assert_equal (self.if_2.get_if_type(), IFType.Server)
+
+    def tearDown(self):
+        pass
diff --git a/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py b/scripts/automation/regression/functional_tests/scapy_pkt_builder_test.py
new file mode 100644 (file)
index 0000000..eaff953
--- /dev/null
@@ -0,0 +1,368 @@
+#!/router/bin/python
+
+import pkt_bld_general_test
+
+#HACK FIX ME START
+import sys
+import os
+
+CURRENT_PATH        = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(CURRENT_PATH, '../../../trex_control_plane/stl/'))
+#HACK FIX ME END
+from trex_stl_lib.trex_stl_packet_builder_scapy import *
+
+from scapy.all import *
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import assert_raises
+from nose.tools import raises
+import os
+import random
+import pprint
+
+class CTRexPktBuilderSanitySCapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
+
+    def setUp(self):
+        pass
+
+    def test_simple_vm1(self):
+        raw1 = CTRexScRaw( [ CTRexVmDescFlowVar(name="a",min_value="16.0.0.1",max_value="16.0.0.10",init_value="16.0.0.1",size=4,op="inc"),
+                              CTRexVmDescWrFlowVar (fv_name="a",pkt_offset= "IP.src"),
+                              CTRexVmDescFixIpv4(offset = "IP")]
+                          );
+
+        pkt_builder = CScapyTRexPktBuilder();
+
+        py='5'*128
+        pkt=Ether()/ \
+                 IP(src="16.0.0.1",dst="48.0.0.1")/ \
+                 UDP(dport=12,sport=1025)/IP()/py
+
+        # set packet 
+        pkt_builder.set_packet(pkt);
+        pkt_builder.add_command ( raw1 )
+        pkt_builder.compile();
+
+        pkt_builder.dump_scripts ()
+
+        print pkt_builder.get_vm_data()
+
+        assert_equal( pkt_builder.get_vm_data(), {'split_by_var': '', 'instructions': [{'name': 'a', 'max_value': 268435466, 'min_value': 268435457, 'init_value': 268435457, 'size': 4, 'type': 'flow_var', 'step':1,'op': 'inc'}, {'is_big_endian': True, 'pkt_offset': 26, 'type': 'write_flow_var',  'name': 'a', 'add_value': 0}, {'pkt_offset': 14, 'type': 'fix_checksum_ipv4'}]} )
+                                                 
+
+
+    def test_simple_no_vm1(self):
+
+        pkt_builder = CScapyTRexPktBuilder();
+
+        py='5'*128
+        pkt=Ether()/ \
+                 IP(src="16.0.0.1",dst="48.0.0.1")/ \
+                 UDP(dport=12,sport=1025)/IP()/py
+
+        # set packet 
+        pkt_builder.set_packet(pkt);
+
+        pkt_builder.compile();
+
+        pkt_builder.dump_scripts ()
+
+        assert_equal( pkt_builder.get_vm_data(),
+                {   'instructions': [ ],
+                    'split_by_var': ''}
+        )
+
+
+    def test_simple_mac_default(self):
+
+        pkt =  Ether()/IP()/UDP()
+
+
+        pkt_builder = CScapyTRexPktBuilder(pkt = pkt);
+
+        assert_equal( pkt_builder.is_default_src_mac () ,True)
+        assert_equal( pkt_builder.is_default_dst_mac () ,True)
+
+        pkt =  Ether(src="00:00:00:00:00:01")/IP()/UDP()
+
+        pkt_builder = CScapyTRexPktBuilder(pkt = pkt);
+
+        assert_equal( pkt_builder.is_default_src_mac (), False)
+        assert_equal( pkt_builder.is_default_dst_mac (), True)
+
+        pkt =  Ether(dst="00:00:00:00:00:01")/IP()/UDP()
+
+        pkt_builder = CScapyTRexPktBuilder(pkt = pkt);
+
+        assert_equal( pkt_builder.is_default_src_mac (),True)
+        assert_equal(  pkt_builder.is_default_dst_mac (),False)
+
+
+
+
+    def test_simple_teredo(self):
+
+        pkt =  Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=3797,sport=3544)/IPv6(src="2001:0:4137:9350:8000:f12a:b9c8:2815",dst="2001:4860:0:2001::68")/UDP(dport=12,sport=1025)/ICMPv6Unknown()
+
+        pkt.build();
+        p_utl=CTRexScapyPktUtl(pkt);
+
+        assert_equal( p_utl.get_field_offet_by_str("IPv6.src"), (50,16) )
+        assert_equal( p_utl.get_field_offet_by_str("IPv6.dst"), (66,16) )
+
+
+
+
+    def test_simple_scapy_vlan(self):
+
+        py='5'*(9)
+        p1=Ether(src="00:00:00:01:00:00",dst="00:00:00:01:00:00")/ \
+                 Dot1Q(vlan=12)/ \
+                 Dot1Q(vlan=17)/ \
+                 IP(src="10.0.0.10",dst="48.0.0.1")/ \
+                 UDP(dport=12,sport=1025)/py
+
+        p1.build();
+        p1.dump_layers_offset()
+        p1.show2();
+        hexdump(p1);
+        #wrpcap("ipv4_udp_9k.pcap", p1);
+
+        p_utl=CTRexScapyPktUtl(p1);
+
+        assert_equal(p_utl.get_pkt_layers(),"Ethernet:802.1Q:802.1Q:IP:UDP:Raw")
+        assert_equal(p_utl.layer_offset("802.1Q",0),14);
+        assert_equal(p_utl.layer_offset("802.1Q",1),18);
+        assert_equal(p_utl.get_field_offet_by_str("802|1Q.vlan"),(14,0));
+        assert_equal(p_utl.get_field_offet_by_str("802|1Q:1.vlan"),(18,0));
+        assert_equal(p_utl.get_field_offet_by_str("IP.src"),(34,4));
+
+    def test_simple_scapy_128_udp(self):
+        """
+        build 128 byte packet with 0x35 as pyld
+        """
+
+
+        pkt_size =128 
+        p1=Ether(src="00:00:00:01:00:00",dst="00:00:00:01:00:00")/ \
+                 IP(src="16.0.0.1",dst="48.0.0.1")/ \
+                 UDP(dport=12,sport=1025)
+        pyld_size=pkt_size-len(p1);
+
+        pkt=p1/('5'*(pyld_size))
+
+        pkt.show2();
+        hexdump(pkt);
+        assert_equal(len(pkt),128)
+
+    def test_simple_scapy_9k_ip_len(self):
+        """
+        build 9k ipv4 len packet
+        """
+
+
+        ip_pkt_size =9*1024
+        p_l2=Ether(src="00:00:00:01:00:00",dst="00:00:00:01:00:00");
+        p_l3=    IP(src="16.0.0.1",dst="48.0.0.1")/ \
+                 UDP(dport=12,sport=1025)
+        pyld_size = ip_pkt_size-len(p_l3);
+
+        pkt=p_l2/p_l3/('\x55'*(pyld_size))
+
+        #pkt.show2();
+        #hexdump(pkt);
+        assert_equal(len(pkt),9*1024+14)
+
+    def test_simple_scapy_ipv6_1(self):
+        """
+        build ipv6 packet 
+        """
+
+        print "start "
+        py='\x55'*(64)
+
+        p=Ether()/IPv6()/UDP(dport=12,sport=1025)/py
+        #p.build();
+        #p.dump_layers_offset()
+        hexdump(p);
+        p.show2();
+
+        p_utl=CTRexScapyPktUtl(p);
+
+        assert_equal(p_utl.get_field_offet_by_str("IPv6.src"),(22,16));
+
+
+    def test_simple_vm2(self):
+        raw1 = CTRexScRaw( [ CTRexVmDescFlowVar(name="my_valn",min_value=0,max_value=10,init_value=2,size=1,op="inc"),
+                             CTRexVmDescWrFlowVar (fv_name="my_valn",pkt_offset= "802|1Q.vlan" ,offset_fixup=3) # fix the offset as valn is bitfield and not supported right now 
+                              ]
+                          );
+
+        pkt_builder = CScapyTRexPktBuilder();
+
+        py='5'*128
+        pkt=Ether()/ \
+        Dot1Q(vlan=12)/ \
+                 IP(src="16.0.0.1",dst="48.0.0.1")/ \
+                 UDP(dport=12,sport=1025)/IP()/py
+
+        # set packet 
+        pkt_builder.set_packet(pkt);
+        pkt_builder.add_command ( raw1 )
+        pkt_builder.compile();
+
+
+        d= pkt_builder.get_vm_data()
+        assert_equal(d['instructions'][1]['pkt_offset'],17)
+
+    def test_simple_vm3(self):
+        try:
+            raw1 = CTRexScRaw( [ CTRexVmDescFlowVar(name="my_valn",min_value=0,max_value=10,init_value=2,size=1,op="inc"),
+                                 CTRexVmDescWrFlowVar (fv_name="my_valn_err",pkt_offset= "802|1Q.vlan" ,offset_fixup=3) # fix the offset as valn is bitfield and not supported right now 
+                                  ]
+                              );
+    
+            pkt_builder = CScapyTRexPktBuilder();
+    
+            py='5'*128
+            pkt=Ether()/ \
+            Dot1Q(vlan=12)/ \
+                     IP(src="16.0.0.1",dst="48.0.0.1")/ \
+                     UDP(dport=12,sport=1025)/IP()/py
+    
+            # set packet 
+            pkt_builder.set_packet(pkt);
+            pkt_builder.add_command ( raw1 )
+            pkt_builder.compile();
+    
+    
+            d= pkt_builder.get_vm_data()
+        except  CTRexPacketBuildException as e:
+            assert_equal(str(e), "[errcode:-11] 'variable my_valn_err does not exists  '")
+
+    def test_simple_tuple_gen(self):
+        vm = CTRexScRaw( [ CTRexVmDescTupleGen (name="tuple"), # define tuple gen 
+                             CTRexVmDescWrFlowVar (fv_name="tuple.ip", pkt_offset= "IP.src" ), # write ip to packet IP.src
+                             CTRexVmDescFixIpv4(offset = "IP"),                                # fix checksum
+                             CTRexVmDescWrFlowVar (fv_name="tuple.port", pkt_offset= "UDP.sport" )  #write udp.port
+                                  ]
+                              );
+        pkt_builder = CScapyTRexPktBuilder();
+
+        py='5'*128
+        pkt=Ether()/ \
+        Dot1Q(vlan=12)/ \
+                 IP(src="16.0.0.1",dst="48.0.0.1")/ \
+                 UDP(dport=12,sport=1025)/IP()/py
+
+        # set packet 
+        pkt_builder.set_packet(pkt);
+        pkt_builder.add_command ( vm )
+        pkt_builder.compile();
+        d= pkt_builder.get_vm_data()
+        pkt_builder.dump_vm_data_as_yaml()
+
+        assert_equal(d['instructions'][1]['pkt_offset'],30)
+        assert_equal(d['instructions'][3]['pkt_offset'],38)
+
+    def test_simple_random_pkt_size(self):
+
+        ip_pkt_size = 9*1024
+        p_l2 = Ether();
+        p_l3 = IP(src="16.0.0.1",dst="48.0.0.1")
+        p_l4 = UDP(dport=12,sport=1025)
+        pyld_size = ip_pkt_size-len(p_l3/p_l4);
+
+        pkt =p_l2/p_l3/p_l4/('\x55'*(pyld_size))
+
+        l3_len_fix =-(len(p_l2));
+        l4_len_fix =-(len(p_l2/p_l3));
+
+        vm = CTRexScRaw( [ CTRexVmDescFlowVar(name="fv_rand", min_value=64, max_value=len(pkt), size=2, op="random"),
+                           CTRexVmDescTrimPktSize("fv_rand"), # total packet size
+                           CTRexVmDescWrFlowVar(fv_name="fv_rand", pkt_offset= "IP.len", add_val=l3_len_fix), 
+                           CTRexVmDescFixIpv4(offset = "IP"),                                # fix checksum
+                           CTRexVmDescWrFlowVar(fv_name="fv_rand", pkt_offset= "UDP.len", add_val=l4_len_fix)  
+                          ]
+                       )
+        pkt_builder = CScapyTRexPktBuilder();
+
+        # set packet 
+        pkt_builder.set_packet(pkt);
+        pkt_builder.add_command ( vm )
+        pkt_builder.compile();
+        d= pkt_builder.get_vm_data()
+        pkt_builder.dump_vm_data_as_yaml()
+
+        assert_equal(d['instructions'][0]['max_value'],9230)
+        assert_equal(d['instructions'][2]['pkt_offset'],16)
+        assert_equal(d['instructions'][4]['pkt_offset'],38)
+
+    def test_simple_pkt_loader(self):
+        p=RawPcapReader("functional_tests/golden/basic_imix_golden.cap")
+        print ""
+        for pkt in p:
+            print pkt[1]
+            print hexdump(str(pkt[0]))
+            break;
+
+    def test_simple_pkt_loader1(self):
+
+        pkt_builder = CScapyTRexPktBuilder(pkt = "functional_tests/golden/udp_590.cap", build_raw = False);
+        print ""
+        pkt_builder.dump_as_hex()
+        r = pkt_builder.pkt_raw
+        assert_equal(ord(r[1]),0x50)
+        assert_equal(ord(r[0]),0x00)
+        assert_equal(ord(r[0x240]),0x16)
+        assert_equal(ord(r[0x24d]),0x79)
+        assert_equal(len(r),590)
+
+        print len(r)
+
+    def test_simple_pkt_loader2(self):
+
+        pkt_builder = CScapyTRexPktBuilder(pkt = "functional_tests/golden/basic_imix_golden.cap");
+        assert_equal(pkt_builder.pkt_layers_desc (), "Ethernet:IP:UDP:Raw");
+
+    def test_simple_pkt_loader3(self):
+
+        #pkt_builder = CScapyTRexPktBuilder(pkt = "stl/golden/basic_imix_golden.cap");
+        #r = pkt_builder.pkt_raw
+        #print ""
+        #hexdump(str(r))
+
+
+        #print pkt_builder.pkt_layers_desc ()
+
+
+        #pkt_builder.set_packet(pkt);
+
+        py='\x55'*(64)
+
+        p=Ether()/IP()/UDP(dport=12,sport=1025)/py
+        pkt_str = str(p);
+        print ""
+        hexdump(pkt_str);
+        scapy_pkt = Ether(pkt_str);
+        scapy_pkt.show2();
+
+    def tearDown(self):
+        pass
+
+
+class CTRexPktBuilderScapy_Test(pkt_bld_general_test.CGeneralPktBld_Test):
+
+    def setUp(self):
+        pass;
+        #self.pkt_bld = CTRexPktBuilder()
+        #self.pkt_bld.add_pkt_layer("l2", dpkt.ethernet.Ethernet())
+        #self.pp = pprint.PrettyPrinter(indent=4)
+
+    def tearDown(self):
+        pass
+
+
+if __name__ == "__main__":
+    pass
+
diff --git a/scripts/automation/regression/functional_tests/stl_basic_tests.py b/scripts/automation/regression/functional_tests/stl_basic_tests.py
new file mode 100644 (file)
index 0000000..ea51540
--- /dev/null
@@ -0,0 +1,263 @@
+
+import outer_packages
+from platform_cmd_link import *
+import functional_general_test
+from nose.tools import assert_equal
+from nose.tools import assert_not_equal
+from nose.tools import nottest
+from nose.plugins.attrib import attr
+from trex import CTRexScenario
+from dpkt import pcap
+from trex_stl_lib import trex_stl_sim
+from trex_stl_lib.trex_stl_streams import STLProfile
+import sys
+import os
+import subprocess
+import shlex
+from threading import Thread
+
+@attr('run_on_trex')
+class CStlBasic_Test(functional_general_test.CGeneralFunctional_Test):
+    def setUp (self):
+        self.test_path = os.path.abspath(os.getcwd())
+        self.scripts_path = CTRexScenario.scripts_path
+
+        self.verify_exists(os.path.join(self.scripts_path, "bp-sim-64-debug"))
+
+        self.stl_sim = os.path.join(self.scripts_path, "stl-sim")
+
+        self.verify_exists(self.stl_sim)
+
+        self.profiles_path = os.path.join(self.scripts_path, "stl/yaml/")
+
+        self.profiles = {}
+        self.profiles['imix_3pkt'] = os.path.join(self.profiles_path, "imix_3pkt.yaml")
+        self.profiles['imix_3pkt_vm'] = os.path.join(self.profiles_path, "imix_3pkt_vm.yaml")
+        self.profiles['random_size_9k'] = os.path.join(self.profiles_path, "../udp_rand_len_9k.py")
+        self.profiles['imix_tuple_gen'] = os.path.join(self.profiles_path, "imix_1pkt_tuple_gen.yaml")
+
+        for k, v in self.profiles.iteritems():
+            self.verify_exists(v)
+
+        self.valgrind_profiles = [ self.profiles['imix_3pkt_vm'],
+                                   self.profiles['random_size_9k'],
+                                   self.profiles['imix_tuple_gen'] ]
+
+        self.golden_path = os.path.join(self.test_path,"stl/golden/")
+
+        os.chdir(self.scripts_path)
+
+
+    def tearDown (self):
+        os.chdir(self.test_path)
+
+
+
+    def get_golden (self, name):
+        golden = os.path.join(self.golden_path, name)
+        self.verify_exists(golden)
+        return golden
+
+
+    def verify_exists (self, name):
+        if not os.path.exists(name):
+            raise Exception("cannot find '{0}'".format(name))
+
+
+    def compare_caps (self, cap1, cap2, max_diff_sec = 0.01):
+        with open(cap1, 'r') as f1:
+            reader1 = pcap.Reader(f1)
+            pkts1 = reader1.readpkts()
+
+        with open(cap2, 'r') as f2:
+            reader2 = pcap.Reader(f2)
+            pkts2 = reader2.readpkts()
+
+        assert_equal(len(pkts1), len(pkts2))
+
+        for pkt1, pkt2, i in zip(pkts1, pkts2, xrange(1, len(pkts1))):
+            ts1 = pkt1[0]
+            ts2 = pkt2[0]
+            if abs(ts1-ts2) > 0.000005: # 5 nsec
+                raise AssertionError("TS error: cap files '{0}', '{1}' differ in cap #{2} - '{3}' vs. '{4}'".format(cap1, cap2, i, ts1, ts2))
+
+            if pkt1[1] != pkt2[1]:
+                raise AssertionError("RAW error: cap files '{0}', '{1}' differ in cap #{2}".format(cap1, cap2, i))
+
+
+
+    def run_sim (self, yaml, output, options = "", silent = False, obj = None):
+        if output:
+            user_cmd = "-f {0} -o {1} {2}".format(yaml, output, options)
+        else:
+            user_cmd = "-f {0} {1}".format(yaml, options)
+
+        if silent:
+            user_cmd += " --silent"
+
+        rc = trex_stl_sim.main(args = shlex.split(user_cmd))
+        if obj:
+            obj['rc'] = (rc == 0)
+
+        return (rc == 0)
+
+
+
+    def run_py_profile_path (self, profile, options,silent = False, do_no_remove=False,compare =True, test_generated=True, do_no_remove_generated = False):
+        output_cap = "a.pcap"
+        input_file =  os.path.join('stl/', profile)
+        golden_file = os.path.join('exp',os.path.basename(profile).split('.')[0]+'.pcap');
+        if os.path.exists(output_cap):
+            os.unlink(output_cap)
+        try:
+            rc = self.run_sim(input_file, output_cap, options, silent)
+            assert_equal(rc, True)
+            #s='cp  '+output_cap+' '+golden_file;
+            #print s
+            #os.system(s)
+
+            if compare:
+                self.compare_caps(output_cap, golden_file)
+        finally:
+            if not do_no_remove:
+                os.unlink(output_cap)
+        if test_generated:
+            try:
+                generated_filename = input_file.replace('.py', '_GENERATED.py').replace('.yaml', '_GENERATED.py')
+                if input_file.endswith('.py'):
+                    profile = STLProfile.load_py(input_file)
+                elif input_file.endswith('.yaml'):
+                    profile = STLProfile.load_yaml(input_file)
+                profile.dump_to_code(generated_filename)
+                
+                rc = self.run_sim(generated_filename, output_cap, options, silent)
+                assert_equal(rc, True)
+
+                if compare:
+                    self.compare_caps(output_cap, golden_file)
+            except Exception as e:
+                print e
+            finally:
+                if not do_no_remove_generated:
+                    os.unlink(generated_filename)
+                    os.unlink(generated_filename + 'c')
+                if not do_no_remove:
+                    os.unlink(output_cap)
+
+
+    def test_stl_profiles (self):
+
+        p = [
+            ["udp_1pkt_1mac_override.py","-m 1 -l 50",True],
+            ["syn_attack.py","-m 1 -l 50",True],               # can't compare random now
+            ["udp_1pkt_1mac.py","-m 1 -l 50",True],
+            ["udp_1pkt_mac.py","-m 1 -l 50",True],
+            ["udp_1pkt.py","-m 1 -l 50",True],
+            ["udp_1pkt_tuple_gen.py","-m 1 -l 50",True],
+            ["udp_rand_len_9k.py","-m 1 -l 50",True],           # can't do the compare
+            ["udp_1pkt_mpls.py","-m 1 -l 50",True],
+            ["udp_1pkt_mpls_vm.py","-m 1 ",True],
+            ["imix.py","-m 1 -l 100",True],
+            ["udp_inc_len_9k.py","-m 1 -l 100",True],
+            ["udp_1pkt_range_clients.py","-m 1 -l 100",True],
+            ["multi_burst_2st_1000pkt.py","-m 1 -l 100",True],
+            ["pcap.py", "-m 1", True],
+            ["pcap_with_vm.py", "-m 1", True],
+
+            # YAML test
+            ["yaml/burst_1000_pkt.yaml","-m 1 -l 100",True],
+            ["yaml/burst_1pkt_1burst.yaml","-m 1 -l 100",True],
+            ["yaml/burst_1pkt_vm.yaml","-m 1 -l 100",True],
+            ["yaml/imix_1pkt.yaml","-m 1 -l 100",True],
+            ["yaml/imix_1pkt_2.yaml","-m 1 -l 100",True],
+            ["yaml/imix_1pkt_tuple_gen.yaml","-m 1 -l 100",True],
+            ["yaml/imix_1pkt_vm.yaml","-m 1 -l 100",True],
+            ["udp_1pkt_pcap.py","-m 1 -l 10",True],
+            ["udp_3pkt_pcap.py","-m 1 -l 10",True],
+            #["udp_1pkt_simple.py","-m 1 -l 3",True],
+            ["udp_1pkt_pcap_relative_path.py","-m 1 -l 3",True],
+            ["udp_1pkt_tuple_gen_split.py","-m 1 -c 2 -l 100",True],
+            ["udp_1pkt_range_clients_split.py","-m 1 -c 2 -l 100",True],
+            ["udp_1pkt_vxlan.py","-m 1 -c 1 -l 17",True, False], # can't generate: no VXLAN in Scapy, only in profile
+            ["udp_1pkt_ipv6_in_ipv4.py","-m 1 -c 1 -l 17",True],
+            ["yaml/imix_3pkt.yaml","-m 50kpps --limit 20 --cores 2",True],
+            ["yaml/imix_3pkt_vm.yaml","-m 50kpps --limit 20 --cores 2",True],
+            ["udp_1pkt_simple_mac_dst.py","-m 1 -l 1 ",True],
+            ["udp_1pkt_simple_mac_src.py","-m 1 -l 1 ",True],
+            ["udp_1pkt_simple_mac_dst_src.py","-m 1 -l 1 ",True],
+            ["burst_3st_loop_x_times.py","-m 1 -l 20 ",True],
+            ["udp_1pkt_mac_step.py","-m 1 -l 20 ",True],
+            ["udp_1pkt_mac_mask1.py","-m 1 -l 20 ",True] ,
+            ["udp_1pkt_mac_mask2.py","-m 1 -l 20 ",True],
+            ["udp_1pkt_mac_mask3.py","-m 1 -l 20 ",True],
+            ["udp_1pkt_simple_test2.py","-m 1 -l 10 ",True], # test split of packet with ip option
+            ["udp_1pkt_simple_test.py","-m 1 -l 10 ",True],
+            ["udp_1pkt_mac_mask5.py","-m 1 -l 30 ",True],
+            ["udp_1pkt_range_clients_split_garp.py","-m 1 -l 50",True]
+
+
+          ];
+
+
+        p1  = [ ["udp_1pkt_range_clients_split_garp.py","-m 1 -l 50",True] ]
+
+
+        for obj in p:
+            try:
+                test_generated = obj[3]
+            except: # check generated if not said otherwise
+                test_generated = True
+            self.run_py_profile_path (obj[0],obj[1],compare =obj[2], test_generated = test_generated, do_no_remove=True, do_no_remove_generated = False)
+
+
+    def test_hlt_profiles (self):
+        p = (
+            ['hlt/hlt_udp_inc_dec_len_9k.py', '-m 1 -l 20', True],
+            ['hlt/hlt_imix_default.py', '-m 1 -l 20', True],
+            ['hlt/hlt_imix_4rates.py', '-m 1 -l 20', True],
+            ['hlt/hlt_david1.py', '-m 1 -l 20', True],
+            ['hlt/hlt_david2.py', '-m 1 -l 20', True],
+            ['hlt/hlt_david3.py', '-m 1 -l 20', True],
+            ['hlt/hlt_david4.py', '-m 1 -l 20', True],
+            ['hlt/hlt_wentong1.py', '-m 1 -l 20', True],
+            ['hlt/hlt_wentong2.py', '-m 1 -l 20', True],
+            ['hlt/hlt_tcp_ranges.py', '-m 1 -l 20', True],
+            ['hlt/hlt_udp_ports.py', '-m 1 -l 20', True],
+            ['hlt/hlt_udp_random_ports.py', '-m 1 -l 20', True],
+            ['hlt/hlt_ip_ranges.py', '-m 1 -l 20', True],
+            ['hlt/hlt_framesize_vm.py', '-m 1 -l 20', True],
+            ['hlt/hlt_l3_length_vm.py', '-m 1 -l 20', True],
+            ['hlt/hlt_vlan_default.py', '-m 1 -l 20', True],
+            ['hlt/hlt_4vlans.py', '-m 1 -l 20', True],
+            ['hlt/hlt_vlans_vm.py', '-m 1 -l 20', True],
+            ['hlt/hlt_ipv6_default.py', '-m 1 -l 20', True],
+            ['hlt/hlt_ipv6_ranges.py', '-m 1 -l 20', True],
+            ['hlt/hlt_mac_ranges.py', '-m 1 -l 20', True],
+            )
+
+        for obj in p:
+            self.run_py_profile_path (obj[0], obj[1], compare =obj[2], do_no_remove=True, do_no_remove_generated = False)
+
+    # valgrind tests - this runs in multi thread as it safe (no output)
+    def test_valgrind_various_profiles (self):
+
+        print "\n"
+        threads = []
+        for profile in self.valgrind_profiles:
+            print "\n*** VALGRIND: testing profile '{0}' ***\n".format(profile)
+            obj = {'t': None, 'rc': None}
+            t = Thread(target = self.run_sim,
+                       kwargs = {'obj': obj, 'yaml': profile, 'output':None, 'options': "--cores 8 --limit 20 --valgrind", 'silent': True})
+            obj['t'] = t
+
+            threads.append(obj)
+            t.start()
+
+        for obj in threads:
+            obj['t'].join()
+
+        for obj in threads:
+            assert_equal(obj['rc'], True)
+
+
+
diff --git a/scripts/automation/regression/stateful_tests/tests_exceptions.py b/scripts/automation/regression/stateful_tests/tests_exceptions.py
new file mode 100755 (executable)
index 0000000..604efcc
--- /dev/null
@@ -0,0 +1,37 @@
+#!/router/bin/python
+
+class TRexInUseError(Exception):
+       def __init__(self, value):
+               self.value = value
+       def __str__(self):
+               return repr(self.value)
+
+class TRexRunFailedError(Exception):
+       def __init__(self, value):
+               self.value = value
+       def __str__(self):
+               return repr(self.value)
+
+class TRexIncompleteRunError(Exception):
+       def __init__(self, value):
+               self.value = value
+       def __str__(self):
+               return repr(self.value)
+
+class TRexLowCpuUtilError(Exception):
+       def __init__(self, value):
+               self.value = value
+       def __str__(self):
+               return repr(self.value)
+
+class AbnormalResultError(Exception):
+       def __init__(self, value):
+               self.value = value
+       def __str__(self):
+               return repr(self.value)
+
+class ClassificationMissmatchError(Exception):
+       def __init__(self, value):
+               self.value = value
+       def __str__(self):
+               return repr(self.value)
diff --git a/scripts/automation/regression/stateful_tests/trex_general_test.py b/scripts/automation/regression/stateful_tests/trex_general_test.py
new file mode 100755 (executable)
index 0000000..21f5d8a
--- /dev/null
@@ -0,0 +1,319 @@
+#!/router/bin/python
+
+__copyright__ = "Copyright 2014"
+
+"""
+Name:
+     trex_general_test.py
+
+
+Description:
+
+    This script creates the functionality to test the performance of the T-Rex traffic generator
+    The tested scenario is a T-Rex TG directly connected to a Cisco router.
+
+::
+
+    Topology:
+
+       -------                         --------
+      |       | Tx---1gig/10gig----Rx |        |
+      | T-Rex |                       | router |
+      |       | Rx---1gig/10gig----Tx |        |
+       -------                         --------
+
+"""
+from nose.plugins import Plugin
+from nose.plugins.skip import SkipTest
+import trex
+from trex import CTRexScenario
+import misc_methods
+import sys
+import os
+# from CPlatformUnderTest import *
+from CPlatform import *
+import termstyle
+import threading
+from tests_exceptions import *
+from platform_cmd_link import *
+import unittest
+
+def setUpModule(module):
+    pass
+
+def tearDownModule(module):
+    pass
+
+class CTRexGeneral_Test(unittest.TestCase):
+    """This class defines the general stateful testcase of the T-Rex traffic generator"""
+    def __init__ (self, *args, **kwargs):
+        unittest.TestCase.__init__(self, *args, **kwargs)
+        if CTRexScenario.is_test_list:
+            return
+        # Point test object to scenario global object
+        self.configuration         = CTRexScenario.configuration
+        self.benchmark             = CTRexScenario.benchmark
+        self.trex                  = CTRexScenario.trex
+        self.trex_crashed          = CTRexScenario.trex_crashed
+        self.modes                 = CTRexScenario.modes
+        self.skipping              = False
+        self.fail_reasons          = []
+        if not hasattr(self, 'unsupported_modes'):
+            self.unsupported_modes   = []
+        self.is_loopback           = True if 'loopback' in self.modes else False
+        self.is_virt_nics          = True if 'virt_nics' in self.modes else False
+        self.is_VM                 = True if 'VM' in self.modes else False
+
+        if not CTRexScenario.is_init:
+            if self.trex: # stateful
+                CTRexScenario.trex_version = self.trex.get_trex_version()
+            if not self.is_loopback:
+                # initilize the scenario based on received configuration, once per entire testing session
+                CTRexScenario.router = CPlatform(CTRexScenario.router_cfg['silent_mode'])
+                device_cfg           = CDeviceCfg()
+                device_cfg.set_platform_config(CTRexScenario.router_cfg['config_dict'])
+                device_cfg.set_tftp_config(CTRexScenario.router_cfg['tftp_config_dict'])
+                CTRexScenario.router.load_platform_data_from_file(device_cfg)
+                CTRexScenario.router.launch_connection(device_cfg)
+                running_image = CTRexScenario.router.get_running_image_details()['image']
+                print 'Current router image: %s' % running_image
+                if CTRexScenario.router_cfg['forceImageReload']:
+                    needed_image = device_cfg.get_image_name()
+                    if not CTRexScenario.router.is_image_matches(needed_image):
+                        print 'Setting router image: %s' % needed_image
+                        CTRexScenario.router.config_tftp_server(device_cfg)
+                        CTRexScenario.router.load_platform_image(needed_image)
+                        CTRexScenario.router.set_boot_image(needed_image)
+                        CTRexScenario.router.reload_platform(device_cfg)
+                        CTRexScenario.router.launch_connection(device_cfg)
+                        running_image = CTRexScenario.router.get_running_image_details()['image'] # verify image
+                        if not CTRexScenario.router.is_image_matches(needed_image):
+                            self.fail('Unable to set router image: %s, current image is: %s' % (needed_image, running_image))
+                    else:
+                        print 'Matches needed image: %s' % needed_image
+                CTRexScenario.router_image = running_image
+
+            if self.modes:
+                print termstyle.green('\t!!!\tRunning with modes: %s, not suitable tests will be skipped.\t!!!' % list(self.modes))
+
+            CTRexScenario.is_init = True
+            print termstyle.green("Done instantiating T-Rex scenario!\n")
+
+#           raise RuntimeError('CTRexScenario class is not initialized!')
+        self.router = CTRexScenario.router
+
+
+
+#   def assert_dict_eq (self, dict, key, val, error=''):
+#           v1 = int(dict[key]))
+#           self.assertEqual(v1, int(val), error)
+#
+#   def assert_dict_gt (self, d, key, val, error=''):
+#           v1 = int(dict[key])
+#           self.assert_gt(v1, int(val), error)
+
+    def assertEqual(self, v1, v2, s):
+        if v1 != v2:
+            error='ERROR '+str(v1)+' !=  '+str(v2)+ '   '+s;
+            self.fail(error)
+
+    def assert_gt(self, v1, v2, s):
+        if not v1 > v2:
+            error='ERROR {big} <  {small}      {str}'.format(big = v1, small = v2, str = s)
+            self.fail(error)
+
+    def check_results_eq (self,res,name,val):
+        if res is None:
+            self.fail('TRex results cannot be None !')
+            return
+
+        if name not in res:
+            self.fail('TRex results does not include key %s' % name)
+            return
+
+        if res[name] != float(val):
+            self.fail('TRex results[%s]==%f and not as expected %f ' % (name, res[name], val))
+
+    def check_CPU_benchmark (self, trex_res, err = 10, minimal_cpu = 30, maximal_cpu = 85):
+            #cpu_util = float(trex_res.get_last_value("trex-global.data.m_cpu_util"))
+            cpu_util = sum([float(x) for x in trex_res.get_value_list("trex-global.data.m_cpu_util")[-4:-1]]) / 3 # mean of 3 values before last
+            
+            if not self.is_virt_nics:
+                if cpu_util > maximal_cpu:
+                    self.fail("CPU is too high (%s%%), probably queue full." % cpu_util )
+                if cpu_util < minimal_cpu:
+                    self.fail("CPU is too low (%s%%), can't verify performance in such low CPU%%." % cpu_util )
+
+            cores = self.get_benchmark_param('cores')
+            trex_tx_bps  = trex_res.get_last_value("trex-global.data.m_total_tx_bytes")
+            test_norm_cpu = 100.0*(trex_tx_bps/(cores*cpu_util))/1e6
+
+            print "TRex CPU utilization: %g%%, norm_cpu is : %d Mb/core" % (round(cpu_util), int(test_norm_cpu))
+
+            #expected_norm_cpu = self.get_benchmark_param('cpu_to_core_ratio')
+
+            #calc_error_precent = abs(100.0*(test_norm_cpu/expected_norm_cpu)-100.0)
+
+#           if calc_error_precent > err:
+#               msg ='Normalized bandwidth to CPU utilization ratio is %2.0f Mb/core expected %2.0f Mb/core more than %2.0f %% - ERROR' % (test_norm_cpu, expected_norm_cpu, err)
+#               raise AbnormalResultError(msg)
+#           else:
+#               msg ='Normalized bandwidth to CPU utilization ratio is %2.0f Mb/core expected %2.0f Mb/core less than %2.0f %% - OK' % (test_norm_cpu, expected_norm_cpu, err)
+#               print msg
+
+
+    def check_results_gt (self, res, name, val):
+        if res is None:
+            self.fail('TRex results canot be None !')
+            return
+
+        if name not in res:
+            self.fail('TRex results does not include key %s' % name)
+            return
+
+        if res[name]< float(val):
+            self.fail('TRex results[%s]<%f and not as expected greater than %f ' % (name, res[name], val))
+
+    def check_for_trex_crash(self):
+        pass
+
+    def get_benchmark_param (self, param, sub_param = None, test_name = None):
+        if not test_name:
+            test_name = self.get_name()
+        if test_name not in self.benchmark:
+            self.skip('No data in benchmark.yaml for test: %s, param: %s. Skipping.' % (test_name, param))
+        if sub_param:
+            return self.benchmark[test_name][param].get(sub_param)
+        else:
+            return self.benchmark[test_name].get(param)
+
+    def check_general_scenario_results (self, trex_res, check_latency = True):
+        
+        try:
+            # check if test is valid
+            if not trex_res.is_done_warmup():
+                self.fail('T-Rex did not reach warm-up situtaion. Results are not valid.')
+
+            # check history size is enough
+            if len(trex_res._history) < 5:
+                self.fail('T-Rex results list is too short. Increase the test duration or check unexpected stopping.')
+
+            # check T-Rex number of drops
+            trex_tx_pckt    = trex_res.get_last_value("trex-global.data.m_total_tx_pkts")
+            trex_drops      = trex_res.get_total_drops()
+            trex_drop_rate  = trex_res.get_drop_rate()
+            if ( trex_drops > 0.001 * trex_tx_pckt) and (trex_drop_rate > 0.0):     # deliberately mask kickoff drops when T-Rex first initiated
+                self.fail('Number of packet drops larger than 0.1% of all traffic')
+
+            # check queue full, queue drop, allocation error
+            m_total_alloc_error = trex_res.get_last_value("trex-global.data.m_total_alloc_error")
+            m_total_queue_full = trex_res.get_last_value("trex-global.data.m_total_queue_full")
+            m_total_queue_drop = trex_res.get_last_value("trex-global.data.m_total_queue_drop")
+            self.assert_gt(1000, m_total_alloc_error, 'Got allocation errors. (%s), please review multiplier and templates configuration.' % m_total_alloc_error)
+            self.assert_gt(1000, m_total_queue_drop, 'Too much queue_drop (%s), please review multiplier.' % m_total_queue_drop)
+
+            if self.is_VM:
+                allowed_queue_full = 10000 + trex_tx_pckt / 100
+            else:
+                allowed_queue_full = 1000 + trex_tx_pckt / 1000
+            self.assert_gt(allowed_queue_full, m_total_queue_full, 'Too much queue_full (%s), please review multiplier.' % m_total_queue_full)
+
+            # # check T-Rex expected counters
+            #trex_exp_rate = trex_res.get_expected_tx_rate().get('m_tx_expected_bps')
+            #assert trex_exp_rate is not None
+            #trex_exp_gbps = trex_exp_rate/(10**9)
+
+            if check_latency:
+                # check that max latency does not exceed 1 msec in regular setup or 100ms in VM
+                allowed_latency = 9999999 if self.is_VM else 1000
+                if max(trex_res.get_max_latency().values()) > allowed_latency:
+                    self.fail('LatencyError: Maximal latency exceeds %s (usec)' % allowed_latency)
+    
+                # check that avg latency does not exceed 1 msec in regular setup or 3ms in VM
+                allowed_latency = 9999999 if self.is_VM else 1000
+                if max(trex_res.get_avg_latency().values()) > allowed_latency:
+                    self.fail('LatencyError: Average latency exceeds %s (usec)' % allowed_latency)
+
+            if not self.is_loopback:
+                # check router number of drops --> deliberately masked- need to be figured out!!!!!
+                pkt_drop_stats = self.router.get_drop_stats()
+#               assert pkt_drop_stats['total_drops'] < 20
+
+                # check for trex-router packet consistency
+                # TODO: check if it's ok
+                print 'router drop stats: %s' % pkt_drop_stats
+                print 'TRex drop stats: %s' % trex_drops
+                #self.assertEqual(pkt_drop_stats, trex_drops, "TRex's and router's drop stats don't match.")
+
+        except KeyError as e:
+            self.fail(e)
+            #assert False
+
+        # except AssertionError as e:
+        #     e.args += ('T-Rex has crashed!') 
+        #     raise
+
+    # We encountered error, don't fail the test immediately
+    def fail(self, reason = 'Unknown error'):
+        print 'Error: %s' % reason
+        self.fail_reasons.append(reason)
+
+    # skip running of the test, counts as 'passed' but prints 'skipped'
+    def skip(self, message = 'Unknown reason'):
+        print 'Skip: %s' % message
+        self.skipping = True
+        raise SkipTest(message)
+
+    # get name of currently running test
+    def get_name(self):
+        return self._testMethodName
+
+    def setUp(self):
+        test_setup_modes_conflict = self.modes & set(self.unsupported_modes)
+        if test_setup_modes_conflict:
+            self.skip("The test can't run with following modes of given setup: %s " % test_setup_modes_conflict)
+        if self.trex and not self.trex.is_idle():
+            print 'Warning: TRex is not idle at setUp, trying to stop it.'
+            self.trex.force_kill(confirm = False)
+        if not self.is_loopback:
+            print ''
+            if self.trex: # stateful
+                self.router.load_clean_config()
+            self.router.clear_counters()
+            self.router.clear_packet_drop_stats()
+
+    ########################################################################
+    ####                DO NOT ADD TESTS TO THIS FILE                   ####
+    ####    Added tests here will held once for EVERY test sub-class    ####
+    ########################################################################
+
+    # masked example to such test. uncomment to watch how it affects #
+#   def test_isInitialized(self):
+#       assert CTRexScenario.is_init == True
+    def tearDown(self):
+        if not self.trex:
+            return
+        if not self.trex.is_idle():
+            print 'Warning: TRex is not idle at tearDown, trying to stop it.'
+            self.trex.force_kill(confirm = False)
+        if not self.skipping:
+            # print server logs of test run
+            if CTRexScenario.server_logs:
+                try:
+                    print termstyle.green('\n>>>>>>>>>>>>>>> Daemon log <<<<<<<<<<<<<<<')
+                    daemon_log = self.trex.get_trex_daemon_log()
+                    log_size = len(daemon_log)
+                    print ''.join(daemon_log[CTRexScenario.daemon_log_lines:])
+                    CTRexScenario.daemon_log_lines = log_size
+                except Exception as e:
+                    print "Can't get TRex daemon log:", e
+                try:
+                    print termstyle.green('>>>>>>>>>>>>>>>> Trex log <<<<<<<<<<<<<<<<')
+                    print ''.join(self.trex.get_trex_log())
+                except Exception as e:
+                    print "Can't get TRex log:", e
+            if len(self.fail_reasons):
+                raise Exception('The test is failed, reasons:\n%s' % '\n'.join(self.fail_reasons))
+
+    def check_for_trex_crash(self):
+        pass
diff --git a/scripts/automation/regression/stateful_tests/trex_imix_test.py b/scripts/automation/regression/stateful_tests/trex_imix_test.py
new file mode 100755 (executable)
index 0000000..43dea90
--- /dev/null
@@ -0,0 +1,202 @@
+#!/router/bin/python
+from trex_general_test import CTRexGeneral_Test
+from CPlatform import CStaticRouteConfig
+from tests_exceptions import *
+#import sys
+import time
+
+class CTRexIMIX_Test(CTRexGeneral_Test):
+    """This class defines the IMIX testcase of the T-Rex traffic generator"""
+    def __init__(self, *args, **kwargs):
+        # super(CTRexIMIX_Test, self).__init__()
+        CTRexGeneral_Test.__init__(self, *args, **kwargs)
+        pass
+
+    def setUp(self):
+        super(CTRexIMIX_Test, self).setUp() # launch super test class setUp process
+        # CTRexGeneral_Test.setUp(self)       # launch super test class setUp process
+        # self.router.clear_counters()
+        pass
+
+    def test_routing_imix_64(self):
+        # test initializtion
+        if not self.is_loopback:
+            self.router.configure_basic_interfaces()
+            self.router.config_pbr(mode = "config")
+
+#       self.trex.set_yaml_file('cap2/imix_64.yaml')
+        mult  = self.get_benchmark_param('multiplier')
+        core  = self.get_benchmark_param('cores')
+
+#       trex_res = self.trex.run(multiplier = mult, cores = core, duration = 30, l = 1000, p = True)
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p  = True,
+            nc = True,
+            d = 30,   
+            f = 'cap2/imix_64.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        # trex_res is a CTRexResult instance- and contains the summary of the test results
+        # you may see all the results keys by simply calling here for 'print trex_res.result'
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+
+        self.check_general_scenario_results(trex_res)
+        self.check_CPU_benchmark(trex_res)
+
+    # the name intentionally not matches nose default pattern, including the test should be specified explicitly
+    def dummy(self):
+        self.assertEqual(1, 2, 'boo')
+        self.assertEqual(2, 2, 'boo')
+        self.assertEqual(2, 3, 'boo')
+        #print ''
+        #print dir(self)
+        #print locals()
+        #print ''
+        #print_r(unittest.TestCase)
+        #print ''
+        #print_r(self)
+        print ''
+        #print unittest.TestCase.shortDescription(self)
+        #self.skip("I'm just a dummy test")
+
+
+    def test_routing_imix (self):
+        # test initializtion
+        if not self.is_loopback:
+            self.router.configure_basic_interfaces()
+            self.router.config_pbr(mode = "config")
+
+#       self.trex.set_yaml_file('cap2/imix_fast_1g.yaml')
+        mult = self.get_benchmark_param('multiplier')
+        core = self.get_benchmark_param('cores')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p  = True,
+            nc = True,
+            d = 60,   
+            f = 'cap2/imix_fast_1g.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        # trex_res is a CTRexResult instance- and contains the summary of the test results
+        # you may see all the results keys by simply calling here for 'print trex_res.result'
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+
+        self.check_general_scenario_results(trex_res)
+
+        self.check_CPU_benchmark(trex_res)
+
+
+    def test_static_routing_imix (self):
+        if self.is_loopback:
+            self.skip('In loopback mode the test is same as test_routing_imix')
+        # test initializtion
+        if not self.is_loopback:
+            self.router.configure_basic_interfaces()
+
+            # Configure static routing based on benchmark data input
+            stat_route_dict = self.get_benchmark_param('stat_route_dict')
+            stat_route_obj = CStaticRouteConfig(stat_route_dict)
+            self.router.config_static_routing(stat_route_obj, mode = "config")
+
+        mult = self.get_benchmark_param('multiplier')
+        core = self.get_benchmark_param('cores')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p  = True,
+            nc = True,
+            d = 60,   
+            f = 'cap2/imix_fast_1g.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        # trex_res is a CTRexResult instance- and contains the summary of the test results
+        # you may see all the results keys by simply calling here for 'print trex_res.result'
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+        print ("\nLATEST DUMP:")
+        print trex_res.get_latest_dump()
+
+        self.check_general_scenario_results(trex_res)
+        self.check_CPU_benchmark(trex_res)
+
+
+    def test_static_routing_imix_asymmetric (self):
+        # test initializtion
+        if not self.is_loopback:
+            self.router.configure_basic_interfaces()
+
+            # Configure static routing based on benchmark data input
+            stat_route_dict = self.get_benchmark_param('stat_route_dict')
+            stat_route_obj = CStaticRouteConfig(stat_route_dict)
+            self.router.config_static_routing(stat_route_obj, mode = "config")
+
+        mult = self.get_benchmark_param('multiplier')
+        core = self.get_benchmark_param('cores')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            nc = True,
+            d = 100,   
+            f = 'cap2/imix_fast_1g.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        # trex_res is a CTRexResults instance- and contains the summary of the test results
+        # you may see all the results keys by simply calling here for 'print trex_res.result'
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+
+        self.check_general_scenario_results(trex_res)
+
+        self.check_CPU_benchmark(trex_res)
+
+
+    def test_jumbo(self):
+        if not self.is_loopback:
+            self.router.configure_basic_interfaces(mtu = 9216)
+            self.router.config_pbr(mode = "config")
+
+        mult = self.get_benchmark_param('multiplier')
+        core = self.get_benchmark_param('cores')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p = True,
+            nc = True,
+            d = 100,   
+            f = 'cap2/imix_9k.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        # trex_res is a CTRexResults instance- and contains the summary of the test results
+        # you may see all the results keys by simply calling here for 'print trex_res.result'
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+
+        self.check_general_scenario_results(trex_res)
+        self.check_CPU_benchmark(trex_res, minimal_cpu = 0, maximal_cpu = 10)
+
+    def tearDown(self):
+        CTRexGeneral_Test.tearDown(self)
+        # remove nbar config here
+        pass
+
+if __name__ == "__main__":
+    pass
diff --git a/scripts/automation/regression/stateful_tests/trex_ipv6_test.py b/scripts/automation/regression/stateful_tests/trex_ipv6_test.py
new file mode 100755 (executable)
index 0000000..bffb475
--- /dev/null
@@ -0,0 +1,102 @@
+#!/router/bin/python
+from trex_general_test import CTRexGeneral_Test
+from tests_exceptions import *
+import time
+from nose.tools import assert_equal
+
+class CTRexIPv6_Test(CTRexGeneral_Test):
+    """This class defines the IPv6 testcase of the T-Rex traffic generator"""
+    def __init__(self, *args, **kwargs):
+       super(CTRexIPv6_Test, self).__init__(*args, **kwargs)
+       pass
+
+    def setUp(self):
+        super(CTRexIPv6_Test, self).setUp() # launch super test class setUp process
+#       print " before sleep setup !!"
+#       time.sleep(100000);
+#       pass
+
+    def test_ipv6_simple(self):
+        if self.is_virt_nics:
+            self.skip('--ipv6 flag does not work correctly in with virtual NICs') # TODO: fix
+        # test initializtion
+        if not self.is_loopback:
+            self.router.configure_basic_interfaces()
+
+            self.router.config_pbr(mode = "config")
+            self.router.config_ipv6_pbr(mode = "config")
+
+        mult = self.get_benchmark_param('multiplier')
+        core = self.get_benchmark_param('cores')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p  = True,
+            nc = True,
+            ipv6 = True,
+            d = 60,   
+            f = 'avl/sfr_delay_10_1g.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        # trex_res is a CTRexResult instance- and contains the summary of the test results
+        # you may see all the results keys by simply calling here for 'print trex_res.result'
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+
+        self.check_general_scenario_results(trex_res)
+        
+        self.check_CPU_benchmark (trex_res, 10.0)
+
+        assert True
+
+
+    def test_ipv6_negative (self):
+        if self.is_loopback:
+            self.skip('The test checks ipv6 drops by device and we are in loopback setup')
+        # test initializtion
+        self.router.configure_basic_interfaces()
+
+        # NOT CONFIGURING IPv6 INTENTIONALLY TO GET DROPS!
+        self.router.config_pbr(mode = "config")
+        
+        # same params as test_ipv6_simple
+        mult = self.get_benchmark_param('multiplier', test_name = 'test_ipv6_simple')
+        core = self.get_benchmark_param('cores', test_name = 'test_ipv6_simple')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p  = True,
+            nc = True,
+            ipv6 = True,
+            d = 60,   
+            f = 'avl/sfr_delay_10_1g.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        # trex_res is a CTRexResult instance- and contains the summary of the test results
+        # you may see all the results keys by simply calling here for 'print trex_res.result'
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+
+        trex_tx_pckt    = float(trex_res.get_last_value("trex-global.data.m_total_tx_pkts"))
+        trex_drops      = int(trex_res.get_total_drops())
+
+        trex_drop_rate  = trex_res.get_drop_rate()
+
+        # make sure that at least 50% of the total transmitted packets failed
+        self.assert_gt((trex_drops/trex_tx_pckt), 0.5, 'packet drop ratio is not high enough')
+
+        
+
+    def tearDown(self):
+        CTRexGeneral_Test.tearDown(self)
+       # remove config here
+        pass
+
+if __name__ == "__main__":
+    pass
diff --git a/scripts/automation/regression/stateful_tests/trex_nat_test.py b/scripts/automation/regression/stateful_tests/trex_nat_test.py
new file mode 100755 (executable)
index 0000000..e7fe5ca
--- /dev/null
@@ -0,0 +1,169 @@
+#!/router/bin/python
+from trex_general_test import CTRexGeneral_Test
+from tests_exceptions import *
+import time
+from CPlatform import CStaticRouteConfig, CNatConfig
+from nose.tools import assert_equal
+
+
+class CTRexNoNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
+    """This class defines the NAT testcase of the T-Rex traffic generator"""
+    def __init__(self, *args, **kwargs):
+       super(CTRexNoNat_Test, self).__init__(*args, **kwargs)
+        self.unsupported_modes = ['loopback'] # NAT requires device
+       pass
+
+    def setUp(self):
+        super(CTRexNoNat_Test, self).setUp() # launch super test class setUp process
+        pass
+
+    def check_nat_stats (self, nat_stats):
+        pass
+
+
+    def test_nat_learning(self):
+        # test initializtion
+        self.router.configure_basic_interfaces()
+
+        stat_route_dict = self.get_benchmark_param('stat_route_dict')
+        stat_route_obj = CStaticRouteConfig(stat_route_dict)
+        self.router.config_static_routing(stat_route_obj, mode = "config")
+
+        self.router.config_nat_verify()         # shutdown duplicate interfaces
+
+#       self.trex.set_yaml_file('cap2/http_simple.yaml')
+        mult = self.get_benchmark_param('multiplier')
+        core = self.get_benchmark_param('cores')
+
+#       trex_res = self.trex.run(multiplier = mult, cores = core, duration = 100, l = 1000, learn_verify = True)
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            learn_verify = True,
+            d = 100,   
+            f = 'cap2/http_simple.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+        print ("\nLATEST DUMP:")
+        print trex_res.get_latest_dump()
+
+
+        expected_nat_opened = self.get_benchmark_param('nat_opened')
+        learning_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data
+
+        if self.get_benchmark_param('allow_timeout_dev'):
+            nat_timeout_ratio = learning_stats['m_total_nat_time_out']/learning_stats['m_total_nat_open']
+            if nat_timeout_ratio > 0.005:
+                self.fail('TRex nat_timeout ratio %f > 0.005 (0.5%) and not as expected to be less than 0.5%' %(nat_timeout_ratio))
+        else:
+            self.check_results_eq (learning_stats, 'm_total_nat_time_out', 0.0)
+        self.check_results_eq (learning_stats, 'm_total_nat_no_fid', 0.0)
+        self.check_results_gt (learning_stats, 'm_total_nat_learn_error', 0.0)
+#
+        self.check_results_gt (learning_stats, 'm_total_nat_open', expected_nat_opened)
+
+        self.check_general_scenario_results(trex_res)
+        self.check_CPU_benchmark(trex_res, minimal_cpu = 10, maximal_cpu = 85)
+
+    def tearDown(self):
+        CTRexGeneral_Test.tearDown(self)
+        pass
+
+
+class CTRexNat_Test(CTRexGeneral_Test):#(unittest.TestCase):
+    """This class defines the NAT testcase of the T-Rex traffic generator"""
+    def __init__(self, *args, **kwargs):
+       super(CTRexNat_Test, self).__init__(*args, **kwargs)
+        self.unsupported_modes = ['loopback'] # NAT requires device
+       pass
+
+    def setUp(self):
+        super(CTRexNat_Test, self).setUp() # launch super test class setUp process
+        # config nat here
+        
+
+    def check_nat_stats (self, nat_stats):
+        pass
+
+
+    def test_nat_simple_mode1(self):
+        self.nat_simple_helper(learn_mode=1)
+
+    def test_nat_simple_mode2(self):
+        self.nat_simple_helper(learn_mode=2)
+
+    def nat_simple_helper(self, learn_mode=1):
+        # test initializtion
+        self.router.configure_basic_interfaces()
+
+        
+        stat_route_dict = self.get_benchmark_param('stat_route_dict')
+        stat_route_obj = CStaticRouteConfig(stat_route_dict)
+        self.router.config_static_routing(stat_route_obj, mode = "config")
+
+        nat_dict = self.get_benchmark_param('nat_dict')
+        nat_obj  = CNatConfig(nat_dict)
+        self.router.config_nat(nat_obj)
+
+#       self.trex.set_yaml_file('cap2/http_simple.yaml')
+        mult = self.get_benchmark_param('multiplier')
+        core = self.get_benchmark_param('cores')
+
+#       trex_res = self.trex.run(nc=False,multiplier = mult, cores = core, duration = 100, l = 1000, learn = True)
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            learn_mode = learn_mode,
+            d = 100,
+            f = 'cap2/http_simple.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+        print ("\nLATEST DUMP:")
+        print trex_res.get_latest_dump()
+
+        trex_nat_stats = trex_res.get_last_value("trex-global.data", ".*nat.*") # extract all nat data
+        if self.get_benchmark_param('allow_timeout_dev'):
+            nat_timeout_ratio = trex_nat_stats['m_total_nat_time_out']/trex_nat_stats['m_total_nat_open']
+            if nat_timeout_ratio > 0.005:
+                self.fail('TRex nat_timeout ratio %f > 0.5%%' % nat_timeout_ratio)
+        else:
+            self.check_results_eq (trex_nat_stats,'m_total_nat_time_out', 0.0)
+        self.check_results_eq (trex_nat_stats,'m_total_nat_no_fid', 0.0)
+        self.check_results_gt (trex_nat_stats,'m_total_nat_open', 6000)
+
+
+        self.check_general_scenario_results(trex_res, check_latency = False) # NAT can cause latency
+##       test_norm_cpu = 2*(trex_res.result['total-tx']/(core*trex_res.result['cpu_utilization']))
+#        trex_tx_pckt  = trex_res.get_last_value("trex-global.data.m_total_tx_bps")
+#        cpu_util = int(trex_res.get_last_value("trex-global.data.m_cpu_util"))
+#        test_norm_cpu = 2*(trex_tx_pckt/(core*cpu_util))
+#        print "test_norm_cpu is: ", test_norm_cpu
+
+        self.check_CPU_benchmark(trex_res, minimal_cpu = 10, maximal_cpu = 85)
+
+        #if ( abs((test_norm_cpu/self.get_benchmark_param('cpu_to_core_ratio')) - 1) > 0.03):
+        #    raiseraise AbnormalResultError('Normalized bandwidth to CPU utilization ratio exceeds 3%')
+
+        nat_stats = self.router.get_nat_stats()
+        print nat_stats
+
+        self.assert_gt(nat_stats['total_active_trans'], 5000, 'total active translations is not high enough')
+        self.assert_gt(nat_stats['dynamic_active_trans'], 5000, 'total dynamic active translations is not high enough')
+        self.assertEqual(nat_stats['static_active_trans'], 0, "NAT statistics nat_stats['static_active_trans'] should be zero")
+        self.assert_gt(nat_stats['num_of_hits'], 50000, 'total nat hits is not high enough')
+
+    def tearDown(self):
+        CTRexGeneral_Test.tearDown(self)
+        self.router.clear_nat_translations()
+
+
+if __name__ == "__main__":
+    pass
diff --git a/scripts/automation/regression/stateful_tests/trex_nbar_test.py b/scripts/automation/regression/stateful_tests/trex_nbar_test.py
new file mode 100755 (executable)
index 0000000..74d0227
--- /dev/null
@@ -0,0 +1,193 @@
+#!/router/bin/python
+from trex_general_test import CTRexGeneral_Test
+from tests_exceptions import *
+from interfaces_e import IFType
+from nose.tools import nottest
+from misc_methods import print_r
+
+class CTRexNbar_Test(CTRexGeneral_Test):
+    """This class defines the NBAR testcase of the T-Rex traffic generator"""
+    def __init__(self, *args, **kwargs):
+       super(CTRexNbar_Test, self).__init__(*args, **kwargs)
+        self.unsupported_modes = ['loopback'] # obviously no NBar in loopback
+       pass
+
+    def setUp(self):
+        super(CTRexNbar_Test, self).setUp() # launch super test class setUp process
+#       self.router.kill_nbar_flows()
+        self.router.clear_cft_counters()
+        self.router.clear_nbar_stats()
+
+    def match_classification (self):
+        nbar_benchmark = self.get_benchmark_param("nbar_classification")
+        test_classification = self.router.get_nbar_stats()
+        print "TEST CLASSIFICATION:"
+        print test_classification
+        missmatchFlag = False
+        missmatchMsg = "NBAR classification contians a missmatch on the following protocols:"
+        fmt = '\n\t{0:15} | Expected: {1:>3.2f}%, Got: {2:>3.2f}%'
+        noise_level = 0.045 # percents
+
+        for cl_intf in self.router.get_if_manager().get_if_list(if_type = IFType.Client):
+            client_intf = cl_intf.get_name()
+
+            # removing noise classifications
+            for key, value in test_classification[client_intf]['percentage'].items():
+                if value <= noise_level:
+                    print 'Removing noise classification: %s' % key
+                    del test_classification[client_intf]['percentage'][key]
+
+            if len(test_classification[client_intf]['percentage']) != (len(nbar_benchmark) + 1):    # adding 'total' key to nbar_benchmark
+                raise ClassificationMissmatchError ('The total size of classification result does not match the provided benchmark.')
+
+            for protocol, bench in nbar_benchmark.iteritems():
+                if protocol != 'total':
+                    try:
+                        bench = float(bench)
+                        protocol = protocol.replace('_','-')
+                        protocol_test_res = test_classification[client_intf]['percentage'][protocol]
+                        deviation = 100 * abs(bench/protocol_test_res - 1) # percents
+                        difference = abs(bench - protocol_test_res)
+                        if (deviation > 10 and difference > noise_level):   # allowing 10% deviation and 'noise_level'% difference
+                            missmatchFlag = True
+                            missmatchMsg += fmt.format(protocol, bench, protocol_test_res)
+                    except KeyError as e:
+                        missmatchFlag = True
+                        print e
+                        print "Changes missmatchFlag to True. ", "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf )
+                        missmatchMsg += "\n\tProtocol {0} isn't part of classification results on interface {intf}".format( protocol, intf = client_intf )
+                    except ZeroDivisionError as e:
+                        print "ZeroDivisionError: %s" % protocol
+                        pass
+        if missmatchFlag:
+            self.fail(missmatchMsg)
+
+
+    def test_nbar_simple(self):
+        # test initializtion
+        deviation_compare_value = 0.03   # default value of deviation - 3%
+        self.router.configure_basic_interfaces()
+
+        self.router.config_pbr(mode = "config")
+        self.router.config_nbar_pd()
+
+        mult = self.get_benchmark_param('multiplier')
+        core = self.get_benchmark_param('cores')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p  = True,
+            nc  = True,
+            d = 100,   
+            f = 'avl/sfr_delay_10_1g.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        # trex_res is a CTRexResult instance- and contains the summary of the test results
+        # you may see all the results keys by simply calling here for 'print trex_res.result'
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+        print ("\nLATEST DUMP:")
+        print trex_res.get_latest_dump()
+
+
+        self.check_general_scenario_results(trex_res, check_latency = False) # NBAR can cause latency
+        #       test_norm_cpu = 2*(trex_res.result['total-tx']/(core*trex_res.result['cpu_utilization']))
+        trex_tx_pckt  = trex_res.get_last_value("trex-global.data.m_total_tx_pkts")
+        cpu_util = trex_res.get_last_value("trex-global.data.m_cpu_util")
+        cpu_util_hist = trex_res.get_value_list("trex-global.data.m_cpu_util")
+        print "cpu util is:", cpu_util
+        print cpu_util_hist
+        test_norm_cpu = 2 * trex_tx_pckt / (core * cpu_util)
+        print "test_norm_cpu is:", test_norm_cpu
+
+        
+        if self.get_benchmark_param('cpu2core_custom_dev'):
+            # check this test by custom deviation
+            deviation_compare_value = self.get_benchmark_param('cpu2core_dev')
+            print "Comparing test with custom deviation value- {dev_val}%".format( dev_val = int(deviation_compare_value*100) )
+
+        # need to be fixed !
+        #if ( abs((test_norm_cpu/self.get_benchmark_param('cpu_to_core_ratio')) - 1) > deviation_compare_value):
+        #    raise AbnormalResultError('Normalized bandwidth to CPU utilization ratio exceeds benchmark boundaries')
+
+        self.match_classification()
+
+        assert True
+
+    @nottest
+    def test_rx_check (self):
+        # test initializtion
+        self.router.configure_basic_interfaces()
+
+        self.router.config_pbr(mode = "config")
+        self.router.config_nbar_pd()
+
+        mult = self.get_benchmark_param('multiplier')
+        core = self.get_benchmark_param('cores')
+        sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p  = True,
+            nc = True,
+            rx_check = sample_rate,
+            d = 100,   
+            f = 'cap2/sfr.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        # trex_res is a CTRexResult instance- and contains the summary of the test results
+        # you may see all the results keys by simply calling here for 'print trex_res.result'
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+        print ("\nLATEST DUMP:")
+        print trex_res.get_latest_dump()
+
+        self.check_general_scenario_results(trex_res)
+
+        self.check_CPU_benchmark(trex_res, 10)
+
+#       if trex_res.result['rx_check_tx']==trex_res.result['rx_check_rx']:  # rx_check verification shoud pass
+#           assert trex_res.result['rx_check_verification'] == "OK"
+#       else:
+#           assert trex_res.result['rx_check_verification'] == "FAIL"
+
+    # the name intentionally not matches nose default pattern, including the test should be specified explicitly
+    def NBarLong(self):
+        self.router.configure_basic_interfaces()
+        self.router.config_pbr(mode = "config")
+        self.router.config_nbar_pd()
+
+        mult = self.get_benchmark_param('multiplier')
+        core = self.get_benchmark_param('cores')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p  = True,
+            nc  = True,
+            d = 18000, # 5 hours
+            f = 'avl/sfr_delay_10_1g.yaml',
+            l = 1000)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        # trex_res is a CTRexResult instance- and contains the summary of the test results
+        # you may see all the results keys by simply calling here for 'print trex_res.result'
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+
+        self.check_general_scenario_results(trex_res, check_latency = False)
+
+
+    def tearDown(self):
+        CTRexGeneral_Test.tearDown(self)
+        pass
+
+if __name__ == "__main__":
+    pass
diff --git a/scripts/automation/regression/stateful_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py
new file mode 100755 (executable)
index 0000000..37b1c72
--- /dev/null
@@ -0,0 +1,275 @@
+#!/router/bin/python
+from trex_general_test import CTRexGeneral_Test
+from CPlatform import CStaticRouteConfig, CNatConfig
+from tests_exceptions import *
+#import sys
+import time
+import copy
+from nose.tools import nottest
+import traceback
+
+class CTRexRx_Test(CTRexGeneral_Test):
+    """This class defines the rx testcase of the T-Rex traffic generator"""
+    def __init__(self, *args, **kwargs):
+        CTRexGeneral_Test.__init__(self, *args, **kwargs)
+        self.unsupported_modes = ['virt_nics'] # TODO: fix
+       pass
+
+    def setUp(self):
+        CTRexGeneral_Test.setUp(self)
+        pass
+
+
+    def check_rx_errors(self, trex_res, allow_error_tolerance = True):
+        try:
+            # counters to check
+
+            latency_counters_display = {'m_unsup_prot': 0, 'm_no_magic': 0, 'm_no_id': 0, 'm_seq_error': 0, 'm_length_error': 0, 'm_no_ipv4_option': 0, 'm_tx_pkt_err': 0}
+            rx_counters = {'m_err_drop': 0, 'm_err_aged': 0, 'm_err_no_magic': 0, 'm_err_wrong_pkt_id': 0, 'm_err_fif_seen_twice': 0, 'm_err_open_with_no_fif_pkt': 0, 'm_err_oo_dup': 0, 'm_err_oo_early': 0, 'm_err_oo_late': 0, 'm_err_flow_length_changed': 0}
+
+            # get relevant TRex results
+
+            try:
+                ports_names = trex_res.get_last_value('trex-latecny-v2.data', 'port\-\d+')
+                if not ports_names:
+                    raise AbnormalResultError('Could not find ports info in TRex results, path: trex-latecny-v2.data.port-*')
+                for port_name in ports_names:
+                    path = 'trex-latecny-v2.data.%s.stats' % port_name
+                    port_result = trex_res.get_last_value(path)
+                    if not port_result:
+                        raise AbnormalResultError('Could not find port stats in TRex results, path: %s' % path)
+                    for key in latency_counters_display:
+                        latency_counters_display[key] += port_result[key]
+
+                # using -k flag in TRex produces 1 error per port in latency counter m_seq_error, allow it until issue resolved. For comparing use dict with reduces m_seq_error number.
+                latency_counters_compare = copy.deepcopy(latency_counters_display)
+                latency_counters_compare['m_seq_error'] = max(0, latency_counters_compare['m_seq_error'] - len(ports_names))
+
+                path = 'rx-check.data.stats'
+                rx_check_results = trex_res.get_last_value(path)
+                if not rx_check_results:
+                    raise AbnormalResultError('No TRex results by path: %s' % path)
+                for key in rx_counters:
+                    rx_counters[key] = rx_check_results[key]
+
+                path = 'rx-check.data.stats.m_total_rx'
+                total_rx = trex_res.get_last_value(path)
+                if not total_rx:
+                    raise AbnormalResultError('No TRex results by path: %s' % path)
+
+
+                print 'Total packets checked: %s' % total_rx
+                print 'Latency counters: %s' % latency_counters_display
+                print 'rx_check counters: %s' % rx_counters
+
+            except KeyError as e:
+                self.fail('Expected key in TRex result was not found.\n%s' % traceback.print_exc())
+
+            # the check. in loopback expect 0 problems, at others allow errors <error_tolerance>% of total_rx
+
+            total_errors = sum(rx_counters.values()) + sum(latency_counters_compare.values())
+            error_tolerance = self.get_benchmark_param('error_tolerance')
+            if not error_tolerance or not allow_error_tolerance:
+                error_tolerance = 0
+            error_percentage = float(total_errors) * 100 / total_rx
+
+            if total_errors > 0:
+                if self.is_loopback or error_percentage > error_tolerance:
+                    self.fail('Too much errors in rx_check. (~%s%% of traffic)' % error_percentage)
+                else:
+                    print 'There are errors in rx_check (%f%%), not exceeding allowed limit (%s%%)' % (error_percentage, error_tolerance)
+            else:
+                print 'No errors in rx_check.'
+        except Exception as e:
+            print traceback.print_exc()
+            self.fail('Errors in rx_check: %s' % e)
+
+    def test_rx_check_sfr(self):
+        if not self.is_loopback:
+            self.router.configure_basic_interfaces()
+            self.router.config_pbr(mode = 'config')
+
+        core  = self.get_benchmark_param('cores')
+        mult  = self.get_benchmark_param('multiplier')
+        sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p = True,
+            nc = True,
+            rx_check = sample_rate,
+            d = 100,
+            f = 'avl/sfr_delay_10_1g_no_bundeling.yaml',
+            l = 1000,
+            k = 10,
+            learn_verify = True,
+            l_pkt_mode = 2)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+        #print ("\nLATEST DUMP:")
+        #print trex_res.get_latest_dump()
+
+        self.check_general_scenario_results(trex_res)
+        self.check_CPU_benchmark(trex_res)
+        self.check_rx_errors(trex_res)
+
+
+    def test_rx_check_http(self):
+        if not self.is_loopback:
+            # TODO: skip as test_rx_check_http_negative will cover it
+            #self.skip('This test is covered by test_rx_check_http_negative')
+            self.router.configure_basic_interfaces()
+            self.router.config_pbr(mode = "config")
+
+        core  = self.get_benchmark_param('cores')
+        mult  = self.get_benchmark_param('multiplier')
+        sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p  = True,
+            nc = True,
+            rx_check = sample_rate,
+            d = 100,
+            f = 'cap2/http_simple.yaml',
+            l = 1000,
+            k = 10,
+            learn_verify = True,
+            l_pkt_mode = 2)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+
+        self.check_general_scenario_results(trex_res)
+        self.check_CPU_benchmark(trex_res)
+        self.check_rx_errors(trex_res)
+
+
+    def test_rx_check_sfr_ipv6(self):
+        if not self.is_loopback:
+            self.router.configure_basic_interfaces()
+            self.router.config_pbr(mode = 'config')
+            self.router.config_ipv6_pbr(mode = "config")
+
+        core  = self.get_benchmark_param('cores')
+        mult  = self.get_benchmark_param('multiplier')
+        sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p = True,
+            nc = True,
+            rx_check = sample_rate,
+            d = 100,
+            f = 'avl/sfr_delay_10_1g_no_bundeling.yaml',
+            l = 1000,
+            k = 10,
+            ipv6 = True)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+        #print ("\nLATEST DUMP:")
+        #print trex_res.get_latest_dump()
+
+        self.check_general_scenario_results(trex_res)
+        self.check_CPU_benchmark(trex_res)
+        self.check_rx_errors(trex_res)
+
+
+    def test_rx_check_http_ipv6(self):
+        if not self.is_loopback:
+            self.router.configure_basic_interfaces()
+            self.router.config_pbr(mode = "config")
+            self.router.config_ipv6_pbr(mode = "config")
+
+        core  = self.get_benchmark_param('cores')
+        mult  = self.get_benchmark_param('multiplier')
+        sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p  = True,
+            nc = True,
+            rx_check = sample_rate,
+            d = 100,
+            f = 'cap2/http_simple.yaml',
+            l = 1000,
+            k = 10,
+            ipv6 = True)
+
+        trex_res = self.trex.sample_to_run_finish()
+
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+
+        self.check_general_scenario_results(trex_res)
+        self.check_CPU_benchmark(trex_res)
+        self.check_rx_errors(trex_res)
+
+    #@nottest
+    def test_rx_check_http_negative(self):
+        if self.is_loopback:
+            self.skip('This test uses NAT, not relevant for loopback')
+
+        self.router.configure_basic_interfaces()
+        self.router.config_pbr(mode = "config")
+
+        core  = self.get_benchmark_param('cores')
+        mult  = self.get_benchmark_param('multiplier')
+        sample_rate = self.get_benchmark_param('rx_sample_rate')
+
+        ret = self.trex.start_trex(
+            c = core,
+            m = mult,
+            p = True,
+            rx_check = sample_rate,
+            d = 60,
+            f = 'cap2/http_simple.yaml',
+            l = 1000,
+            k = 10,
+            learn_verify = True,
+            l_pkt_mode = 2)
+
+        print 'Run for 40 seconds, expect no errors'
+        trex_res = self.trex.sample_x_seconds(40)
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+        self.check_general_scenario_results(trex_res)
+        self.check_CPU_benchmark(trex_res)
+        self.check_rx_errors(trex_res)
+
+        print 'Run until finish, expect errors'
+        old_errors = copy.deepcopy(self.fail_reasons)
+        nat_dict = self.get_benchmark_param('nat_dict', test_name = 'test_nat_simple')
+        nat_obj  = CNatConfig(nat_dict)
+        self.router.config_nat(nat_obj)
+        self.router.config_zbf()
+        trex_res = self.trex.sample_to_run_finish()
+        self.router.config_no_zbf()
+        self.router.clear_nat_translations()
+        print ("\nLATEST RESULT OBJECT:")
+        print trex_res
+        self.check_rx_errors(trex_res, allow_error_tolerance = False)
+        if self.fail_reasons == old_errors:
+            self.fail('Expected errors here, got none.')
+        else:
+            print 'Got errors as expected.'
+            self.fail_reasons = old_errors
+
+    def tearDown(self):
+        CTRexGeneral_Test.tearDown(self)
+        pass
+
+if __name__ == "__main__":
+    pass
diff --git a/scripts/automation/regression/stateless_tests/stl_examples_test.py b/scripts/automation/regression/stateless_tests/stl_examples_test.py
new file mode 100755 (executable)
index 0000000..080bb3d
--- /dev/null
@@ -0,0 +1,35 @@
+#!/router/bin/python
+from stl_general_test import CStlGeneral_Test, CTRexScenario
+import os, sys
+from misc_methods import run_command
+
+class STLExamples_Test(CStlGeneral_Test):
+    """This class defines the IMIX testcase of the T-Rex traffic generator"""
+
+    def setUp(self):
+        print 'STLExamples_Test setUp'
+        CStlGeneral_Test.setUp(self)
+        # examples connect by their own
+        if self.is_connected():
+            CTRexScenario.stl_trex.disconnect()
+
+    @classmethod
+    def tearDownClass(cls):
+        print 'STLExamples_Test tearDownClass'
+        # connect back at end of tests
+        if not cls.is_connected():
+            CTRexScenario.stl_trex.connect()
+
+    def test_stl_examples(self):
+        examples_dir = '../trex_control_plane/stl/examples'
+        examples_to_test = [
+                            'stl_imix.py',
+                            ]
+
+        for example in examples_to_test:
+            return_code, stdout, stderr = run_command("sh -c 'cd %s; %s %s -s %s'" % (examples_dir, sys.executable, example, CTRexScenario.configuration.trex['trex_name']))
+            assert return_code == 0, 'example %s failed.\nstdout: %s\nstderr: %s' % (return_code, stdout, stderr)
+
+    def test_stl_examples1(self):
+        print 'in test_stl_examples1'
+
diff --git a/scripts/automation/regression/stateless_tests/stl_general_test.py b/scripts/automation/regression/stateless_tests/stl_general_test.py
new file mode 100644 (file)
index 0000000..8d21cad
--- /dev/null
@@ -0,0 +1,62 @@
+import os, sys
+import unittest
+from trex import CTRexScenario
+from stateful_tests.trex_general_test import CTRexGeneral_Test
+from trex_stl_lib.api import *
+import time
+from nose.tools import nottest
+
+
+class CStlGeneral_Test(CTRexGeneral_Test):
+    """This class defines the general stateless testcase of the T-Rex traffic generator"""
+
+    #once for all tests under CStlGeneral_Test
+    @classmethod
+    def setUpClass(cls):
+        cls.stl_trex = CTRexScenario.stl_trex
+
+    def setUp(self):
+        CTRexGeneral_Test.setUp(self)
+        # check basic requirements, should be verified at test_connectivity, here only skip test
+        if CTRexScenario.stl_init_error:
+            self.skip(CTRexScenario.stl_init_error)
+
+    @staticmethod
+    def connect(timeout = 20):
+        sys.stdout.write('Connecting')
+        for i in range(timeout):
+            try:
+                sys.stdout.write('.')
+                sys.stdout.flush()
+                CTRexScenario.stl_trex.connect()
+                return
+            except:
+                time.sleep(1)
+        CTRexScenario.stl_trex.connect()
+
+    @staticmethod
+    def get_port_count():
+        return CTRexScenario.stl_trex.get_port_count()
+
+    @staticmethod
+    def is_connected():
+        return CTRexScenario.stl_trex.is_connected()
+
+class STLBasic_Test(CStlGeneral_Test):
+    # will run it first explicitly, check connectivity and configure routing
+    @nottest
+    def test_connectivity(self):
+        if not self.is_loopback:
+            CTRexScenario.router.load_clean_config()
+            CTRexScenario.router.configure_basic_interfaces()
+            CTRexScenario.router.config_pbr(mode = "config")
+
+        CTRexScenario.stl_init_error = 'Client could not connect'
+        self.connect()
+        CTRexScenario.stl_init_error = 'Client could not map ports'
+        CTRexScenario.stl_ports_map = stl_map_ports(CTRexScenario.stl_trex)
+        CTRexScenario.stl_init_error = 'Could not determine bidirectional ports'
+        print 'Ports mapping: %s' % CTRexScenario.stl_ports_map
+        if not len(CTRexScenario.stl_ports_map['bi']):
+            raise STLError('No bidirectional ports')
+        CTRexScenario.stl_init_error = None
diff --git a/scripts/external_libs/ansi2html/LICENSE b/scripts/external_libs/ansi2html/LICENSE
new file mode 100755 (executable)
index 0000000..94a9ed0
--- /dev/null
@@ -0,0 +1,674 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year>  <name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/scripts/external_libs/ansi2html/README.rst b/scripts/external_libs/ansi2html/README.rst
new file mode 100755 (executable)
index 0000000..eab1160
--- /dev/null
@@ -0,0 +1,71 @@
+ansi2html
+=========
+
+:Author: Ralph Bean <[email protected]>
+:Contributor: Robin Schneider <[email protected]>
+
+.. comment: split here
+
+Convert text with ANSI color codes to HTML or to LaTeX.
+
+.. _pixelbeat: http://www.pixelbeat.org/docs/terminal_colours/
+.. _blackjack: http://www.koders.com/python/fid5D57DD37184B558819D0EE22FCFD67F53078B2A3.aspx
+
+Inspired by and developed off of the work of `pixelbeat`_ and `blackjack`_.
+
+Build Status
+------------
+
+.. |master| image:: https://secure.travis-ci.org/ralphbean/ansi2html.png?branch=master
+   :alt: Build Status - master branch
+   :target: http://travis-ci.org/#!/ralphbean/ansi2html
+
+.. |develop| image:: https://secure.travis-ci.org/ralphbean/ansi2html.png?branch=develop
+   :alt: Build Status - develop branch
+   :target: http://travis-ci.org/#!/ralphbean/ansi2html
+
++----------+-----------+
+| Branch   | Status    |
++==========+===========+
+| master   | |master|  |
++----------+-----------+
+| develop  | |develop| |
++----------+-----------+
+
+
+Example - Python API
+--------------------
+
+>>> from ansi2html import Ansi2HTMLConverter
+>>> conv = Ansi2HTMLConverter()
+>>> ansi = "".join(sys.stdin.readlines())
+>>> html = conv.convert(ansi)
+
+Example - Shell Usage
+---------------------
+
+::
+
+ $ ls --color=always | ansi2html > directories.html
+ $ sudo tail /var/log/messages | ccze -A | ansi2html > logs.html
+ $ task burndown | ansi2html > burndown.html
+
+See the list of full options with::
+
+ $ ansi2html --help
+
+Get this project:
+-----------------
+
+::
+
+ $ sudo yum install python-ansi2html
+
+Source:  http://github.com/ralphbean/ansi2html/
+
+pypi:    http://pypi.python.org/pypi/ansi2html/
+
+License
+-------
+
+``ansi2html`` is licensed GPLv3+.
diff --git a/scripts/external_libs/ansi2html/ansi2html/__init__.py b/scripts/external_libs/ansi2html/ansi2html/__init__.py
new file mode 100755 (executable)
index 0000000..58250b8
--- /dev/null
@@ -0,0 +1,2 @@
+from ansi2html.converter import Ansi2HTMLConverter
+__all__ = ['Ansi2HTMLConverter']
diff --git a/scripts/external_libs/ansi2html/ansi2html/converter.py b/scripts/external_libs/ansi2html/ansi2html/converter.py
new file mode 100755 (executable)
index 0000000..c3e46ce
--- /dev/null
@@ -0,0 +1,548 @@
+# encoding: utf-8
+#  This file is part of ansi2html
+#  Convert ANSI (terminal) colours and attributes to HTML
+#  Copyright (C) 2012  Ralph Bean <[email protected]>
+#  Copyright (C) 2013  Sebastian Pipping <[email protected]>
+#
+#  Inspired by and developed off of the work by pixelbeat and blackjack.
+#
+#  This program is free software: you can redistribute it and/or
+#  modify it under the terms of the GNU General Public License as
+#  published by the Free Software Foundation, either version 3 of
+#  the License, or (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#  General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program.  If not, see
+#  <http://www.gnu.org/licenses/>.
+
+import re
+import sys
+import optparse
+import pkg_resources
+
+try:
+    from collections import OrderedDict
+except ImportError:
+    from ordereddict import OrderedDict
+
+from ansi2html.style import get_styles, SCHEME
+import six
+from six.moves import map
+from six.moves import zip
+
+
+ANSI_FULL_RESET = 0
+ANSI_INTENSITY_INCREASED = 1
+ANSI_INTENSITY_REDUCED = 2
+ANSI_INTENSITY_NORMAL = 22
+ANSI_STYLE_ITALIC = 3
+ANSI_STYLE_NORMAL = 23
+ANSI_BLINK_SLOW = 5
+ANSI_BLINK_FAST = 6
+ANSI_BLINK_OFF = 25
+ANSI_UNDERLINE_ON = 4
+ANSI_UNDERLINE_OFF = 24
+ANSI_CROSSED_OUT_ON = 9
+ANSI_CROSSED_OUT_OFF = 29
+ANSI_VISIBILITY_ON = 28
+ANSI_VISIBILITY_OFF = 8
+ANSI_FOREGROUND_CUSTOM_MIN = 30
+ANSI_FOREGROUND_CUSTOM_MAX = 37
+ANSI_FOREGROUND_256 = 38
+ANSI_FOREGROUND_DEFAULT = 39
+ANSI_BACKGROUND_CUSTOM_MIN = 40
+ANSI_BACKGROUND_CUSTOM_MAX = 47
+ANSI_BACKGROUND_256 = 48
+ANSI_BACKGROUND_DEFAULT = 49
+ANSI_NEGATIVE_ON = 7
+ANSI_NEGATIVE_OFF = 27
+
+
+# http://stackoverflow.com/a/15190498
+_latex_template = '''\\documentclass{scrartcl}
+\\usepackage[utf8]{inputenc}
+\\usepackage{fancyvrb}
+\\usepackage[usenames,dvipsnames]{xcolor}
+%% \\definecolor{red-sd}{HTML}{7ed2d2}
+
+\\title{%(title)s}
+
+\\fvset{commandchars=\\\\\\{\}}
+
+\\begin{document}
+
+\\begin{Verbatim}
+%(content)s
+\\end{Verbatim}
+\\end{document}
+'''
+
+_html_template = six.u("""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=%(output_encoding)s">
+<title>%(title)s</title>
+<style type="text/css">\n%(style)s\n</style>
+</head>
+<body class="body_foreground body_background" style="font-size: %(font_size)s;" >
+<pre class="ansi2html-content">
+%(content)s
+</pre>
+</body>
+
+</html>
+""")
+
+class _State(object):
+    def __init__(self):
+        self.reset()
+
+    def reset(self):
+        self.intensity = ANSI_INTENSITY_NORMAL
+        self.style = ANSI_STYLE_NORMAL
+        self.blink = ANSI_BLINK_OFF
+        self.underline = ANSI_UNDERLINE_OFF
+        self.crossedout = ANSI_CROSSED_OUT_OFF
+        self.visibility = ANSI_VISIBILITY_ON
+        self.foreground = (ANSI_FOREGROUND_DEFAULT, None)
+        self.background = (ANSI_BACKGROUND_DEFAULT, None)
+        self.negative = ANSI_NEGATIVE_OFF
+
+    def adjust(self, ansi_code, parameter=None):
+        if ansi_code in (ANSI_INTENSITY_INCREASED, ANSI_INTENSITY_REDUCED, ANSI_INTENSITY_NORMAL):
+            self.intensity = ansi_code
+        elif ansi_code in (ANSI_STYLE_ITALIC, ANSI_STYLE_NORMAL):
+            self.style = ansi_code
+        elif ansi_code in (ANSI_BLINK_SLOW, ANSI_BLINK_FAST, ANSI_BLINK_OFF):
+            self.blink = ansi_code
+        elif ansi_code in (ANSI_UNDERLINE_ON, ANSI_UNDERLINE_OFF):
+            self.underline = ansi_code
+        elif ansi_code in (ANSI_CROSSED_OUT_ON, ANSI_CROSSED_OUT_OFF):
+            self.crossedout = ansi_code
+        elif ansi_code in (ANSI_VISIBILITY_ON, ANSI_VISIBILITY_OFF):
+            self.visibility = ansi_code
+        elif ANSI_FOREGROUND_CUSTOM_MIN <= ansi_code <= ANSI_FOREGROUND_CUSTOM_MAX:
+            self.foreground = (ansi_code, None)
+        elif ansi_code == ANSI_FOREGROUND_256:
+            self.foreground = (ansi_code, parameter)
+        elif ansi_code == ANSI_FOREGROUND_DEFAULT:
+            self.foreground = (ansi_code, None)
+        elif ANSI_BACKGROUND_CUSTOM_MIN <= ansi_code <= ANSI_BACKGROUND_CUSTOM_MAX:
+            self.background = (ansi_code, None)
+        elif ansi_code == ANSI_BACKGROUND_256:
+            self.background = (ansi_code, parameter)
+        elif ansi_code == ANSI_BACKGROUND_DEFAULT:
+            self.background = (ansi_code, None)
+        elif ansi_code in (ANSI_NEGATIVE_ON, ANSI_NEGATIVE_OFF):
+            self.negative = ansi_code
+
+    def to_css_classes(self):
+        css_classes = []
+
+        def append_unless_default(output, value, default):
+            if value != default:
+                css_class = 'ansi%d' % value
+                output.append(css_class)
+
+        def append_color_unless_default(output, color, default, negative, neg_css_class):
+            value, parameter = color
+            if value != default:
+                prefix = 'inv' if negative else 'ansi'
+                css_class_index = str(value) \
+                        if (parameter is None) \
+                        else '%d-%d' % (value, parameter)
+                output.append(prefix + css_class_index)
+            elif negative:
+                output.append(neg_css_class)
+
+        append_unless_default(css_classes, self.intensity, ANSI_INTENSITY_NORMAL)
+        append_unless_default(css_classes, self.style, ANSI_STYLE_NORMAL)
+        append_unless_default(css_classes, self.blink, ANSI_BLINK_OFF)
+        append_unless_default(css_classes, self.underline, ANSI_UNDERLINE_OFF)
+        append_unless_default(css_classes, self.crossedout, ANSI_CROSSED_OUT_OFF)
+        append_unless_default(css_classes, self.visibility, ANSI_VISIBILITY_ON)
+
+        flip_fore_and_background = (self.negative == ANSI_NEGATIVE_ON)
+        append_color_unless_default(css_classes, self.foreground, ANSI_FOREGROUND_DEFAULT, flip_fore_and_background, 'inv_background')
+        append_color_unless_default(css_classes, self.background, ANSI_BACKGROUND_DEFAULT, flip_fore_and_background, 'inv_foreground')
+
+        return css_classes
+
+
+def linkify(line, latex_mode):
+    for match in re.findall(r'https?:\/\/\S+', line):
+        if latex_mode:
+            line = line.replace(match, '\\url{%s}' % match)
+        else:
+            line = line.replace(match, '<a href="%s">%s</a>' % (match, match))
+
+    return line
+
+
+def _needs_extra_newline(text):
+    if not text or text.endswith('\n'):
+        return False
+    return True
+
+
+class CursorMoveUp(object):
+    pass
+
+
+class Ansi2HTMLConverter(object):
+    """ Convert Ansi color codes to CSS+HTML
+
+    Example:
+    >>> conv = Ansi2HTMLConverter()
+    >>> ansi = " ".join(sys.stdin.readlines())
+    >>> html = conv.convert(ansi)
+    """
+
+    def __init__(self,
+                 latex=False,
+                 inline=False,
+                 dark_bg=True,
+                 font_size='normal',
+                 linkify=False,
+                 escaped=True,
+                 markup_lines=False,
+                 output_encoding='utf-8',
+                 scheme='ansi2html',
+                 title=''
+                ):
+
+        self.latex = latex
+        self.inline = inline
+        self.dark_bg = dark_bg
+        self.font_size = font_size
+        self.linkify = linkify
+        self.escaped = escaped
+        self.markup_lines = markup_lines
+        self.output_encoding = output_encoding
+        self.scheme = scheme
+        self.title = title
+        self._attrs = None
+
+        if inline:
+            self.styles = dict([(item.klass.strip('.'), item) for item in get_styles(self.dark_bg, self.scheme)])
+
+        self.ansi_codes_prog = re.compile('\?\\[' '([\\d;]*)' '([a-zA-z])')
+
+    def apply_regex(self, ansi):
+        parts = self._apply_regex(ansi)
+        parts = self._collapse_cursor(parts)
+        parts = list(parts)
+
+        if self.linkify:
+            parts = [linkify(part, self.latex) for part in parts]
+
+        combined = "".join(parts)
+
+        if self.markup_lines and not self.latex:
+            combined = "\n".join([
+                """<span id="line-%i">%s</span>""" % (i, line)
+                for i, line in enumerate(combined.split('\n'))
+            ])
+
+        return combined
+
+    def _apply_regex(self, ansi):
+        if self.escaped:
+            if self.latex: # Known Perl function which does this: https://tex.stackexchange.com/questions/34580/escape-character-in-latex/119383#119383
+                specials = OrderedDict([
+                ])
+            else:
+                specials = OrderedDict([
+                    ('&', '&amp;'),
+                    ('<', '&lt;'),
+                    ('>', '&gt;'),
+                ])
+            for pattern, special in specials.items():
+                ansi = ansi.replace(pattern, special)
+
+        state = _State()
+        inside_span = False
+        last_end = 0  # the index of the last end of a code we've seen
+        for match in self.ansi_codes_prog.finditer(ansi):
+            yield ansi[last_end:match.start()]
+            last_end = match.end()
+
+            params, command = match.groups()
+
+            if command not in 'mMA':
+                continue
+
+            # Special cursor-moving code.  The only supported one.
+            if command == 'A':
+                yield CursorMoveUp
+                continue
+
+            try:
+                params = list(map(int, params.split(';')))
+            except ValueError:
+                params = [ANSI_FULL_RESET]
+
+            # Find latest reset marker
+            last_null_index = None
+            skip_after_index = -1
+            for i, v in enumerate(params):
+                if i <= skip_after_index:
+                    continue
+
+                if v == ANSI_FULL_RESET:
+                    last_null_index = i
+                elif v in (ANSI_FOREGROUND_256, ANSI_BACKGROUND_256):
+                    skip_after_index = i + 2
+
+            # Process reset marker, drop everything before
+            if last_null_index is not None:
+                params = params[last_null_index + 1:]
+                if inside_span:
+                    inside_span = False
+                    if self.latex:
+                        yield '}'
+                    else:
+                        yield '</span>'
+                state.reset()
+
+                if not params:
+                    continue
+
+            # Turn codes into CSS classes
+            skip_after_index = -1
+            for i, v in enumerate(params):
+                if i <= skip_after_index:
+                    continue
+
+                if v in (ANSI_FOREGROUND_256, ANSI_BACKGROUND_256):
+                    try:
+                        parameter = params[i + 2]
+                    except IndexError:
+                        continue
+                    skip_after_index = i + 2
+                else:
+                    parameter = None
+                state.adjust(v, parameter=parameter)
+
+            if inside_span:
+                if self.latex:
+                    yield '}'
+                else:
+                    yield '</span>'
+                inside_span = False
+
+            css_classes = state.to_css_classes()
+            if not css_classes:
+                continue
+
+            if self.inline:
+                if self.latex:
+                    style = [self.styles[klass].kwl[0][1] for klass in css_classes if
+                             self.styles[klass].kwl[0][0] == 'color']
+                    yield '\\textcolor[HTML]{%s}{' % style[0]
+                else:
+                    style = [self.styles[klass].kw for klass in css_classes if
+                             klass in self.styles]
+                    yield '<span style="%s">' % "; ".join(style)
+            else:
+                if self.latex:
+                    yield '\\textcolor{%s}{' % " ".join(css_classes)
+                else:
+                    yield '<span class="%s">' % " ".join(css_classes)
+            inside_span = True
+
+        yield ansi[last_end:]
+        if inside_span:
+            if self.latex:
+                yield '}'
+            else:
+                yield '</span>'
+            inside_span = False
+
+    def _collapse_cursor(self, parts):
+        """ Act on any CursorMoveUp commands by deleting preceding tokens """
+
+        final_parts = []
+        for part in parts:
+
+            # Throw out empty string tokens ("")
+            if not part:
+                continue
+
+            # Go back, deleting every token in the last 'line'
+            if part == CursorMoveUp:
+                final_parts.pop()
+
+                if final_parts:
+                    while '\n' not in final_parts[-1]:
+                        final_parts.pop()
+
+                continue
+
+            # Otherwise, just pass this token forward
+            final_parts.append(part)
+
+        return final_parts
+
+    def prepare(self, ansi='', ensure_trailing_newline=False):
+        """ Load the contents of 'ansi' into this object """
+
+        body = self.apply_regex(ansi)
+
+        if ensure_trailing_newline and _needs_extra_newline(body):
+            body += '\n'
+
+        self._attrs = {
+            'dark_bg': self.dark_bg,
+            'font_size': self.font_size,
+            'body': body,
+        }
+
+        return self._attrs
+
+    def attrs(self):
+        """ Prepare attributes for the template """
+        if not self._attrs:
+            raise Exception("Method .prepare not yet called.")
+        return self._attrs
+
+    def convert(self, ansi, full=True, ensure_trailing_newline=False):
+        attrs = self.prepare(ansi, ensure_trailing_newline=ensure_trailing_newline)
+        if not full:
+            return attrs["body"]
+        else:
+            if self.latex:
+                _template = _latex_template
+            else:
+                _template = _html_template
+            return _template % {
+                'style' : "\n".join(map(str, get_styles(self.dark_bg, self.scheme))),
+                'title' : self.title,
+                'font_size' : self.font_size,
+                'content' :  attrs["body"],
+                'output_encoding' : self.output_encoding,
+            }
+
+    def produce_headers(self):
+        return '<style type="text/css">\n%(style)s\n</style>\n' % {
+            'style' : "\n".join(map(str, get_styles(self.dark_bg, self.scheme)))
+        }
+
+
+def main():
+    """
+    $ ls --color=always | ansi2html > directories.html
+    $ sudo tail /var/log/messages | ccze -A | ansi2html > logs.html
+    $ task burndown | ansi2html > burndown.html
+    """
+
+    scheme_names = sorted(six.iterkeys(SCHEME))
+    version_str = pkg_resources.get_distribution('ansi2html').version
+    parser = optparse.OptionParser(
+        usage=main.__doc__,
+        version="%%prog %s" % version_str)
+    parser.add_option(
+        "-p", "--partial", dest="partial",
+        default=False, action="store_true",
+        help="Process lines as them come in.  No headers are produced.")
+    parser.add_option(
+        "-L", "--latex", dest="latex",
+        default=False, action="store_true",
+        help="Export as LaTeX instead of HTML.")
+    parser.add_option(
+        "-i", "--inline", dest="inline",
+        default=False, action="store_true",
+        help="Inline style without headers or template.")
+    parser.add_option(
+        "-H", "--headers", dest="headers",
+        default=False, action="store_true",
+        help="Just produce the <style> tag.")
+    parser.add_option(
+        "-f", '--font-size', dest='font_size', metavar='SIZE',
+        default="normal",
+        help="Set the global font size in the output.")
+    parser.add_option(
+        "-l", '--light-background', dest='light_background',
+        default=False, action="store_true",
+        help="Set output to 'light background' mode.")
+    parser.add_option(
+        "-a", '--linkify', dest='linkify',
+        default=False, action="store_true",
+        help="Transform URLs into <a> links.")
+    parser.add_option(
+        "-u", '--unescape', dest='escaped',
+        default=True, action="store_false",
+        help="Do not escape XML tags found in the input.")
+    parser.add_option(
+        "-m", '--markup-lines', dest="markup_lines",
+        default=False, action="store_true",
+        help="Surround lines with <span id='line-n'>..</span>.")
+    parser.add_option(
+        '--input-encoding', dest='input_encoding', metavar='ENCODING',
+        default='utf-8',
+        help="Specify input encoding")
+    parser.add_option(
+        '--output-encoding', dest='output_encoding', metavar='ENCODING',
+        default='utf-8',
+        help="Specify output encoding")
+    parser.add_option(
+        '-s', '--scheme', dest='scheme', metavar='SCHEME',
+        default='ansi2html', choices=scheme_names,
+        help=("Specify color palette scheme. Default: %%default. Choices: %s"
+              % scheme_names))
+    parser.add_option(
+        '-t', '--title', dest='output_title',
+        default='',
+        help="Specify output title")
+
+    opts, args = parser.parse_args()
+
+    conv = Ansi2HTMLConverter(
+        latex=opts.latex,
+        inline=opts.inline,
+        dark_bg=not opts.light_background,
+        font_size=opts.font_size,
+        linkify=opts.linkify,
+        escaped=opts.escaped,
+        markup_lines=opts.markup_lines,
+        output_encoding=opts.output_encoding,
+        scheme=opts.scheme,
+        title=opts.output_title,
+    )
+
+    def _read(input_bytes):
+        if six.PY3:
+            # This is actually already unicode.  How to we explicitly decode in
+            # python3?  I don't know the answer yet.
+            return input_bytes
+        else:
+            return input_bytes.decode(opts.input_encoding)
+
+    def _print(output_unicode, end='\n'):
+        if hasattr(sys.stdout, 'buffer'):
+            output_bytes = (output_unicode + end).encode(opts.output_encoding)
+            sys.stdout.buffer.write(output_bytes)
+        elif not six.PY3:
+            sys.stdout.write((output_unicode + end).encode(opts.output_encoding))
+        else:
+            sys.stdout.write(output_unicode + end)
+
+    # Produce only the headers and quit
+    if opts.headers:
+        _print(conv.produce_headers(), end='')
+        return
+
+    full = not bool(opts.partial or opts.inline)
+    if six.PY3:
+        output = conv.convert("".join(sys.stdin.readlines()), full=full, ensure_trailing_newline=True)
+        _print(output, end='')
+    else:
+        output = conv.convert(six.u("").join(
+            map(_read, sys.stdin.readlines())
+        ), full=full, ensure_trailing_newline=True)
+        _print(output, end='')
diff --git a/scripts/external_libs/ansi2html/ansi2html/style.py b/scripts/external_libs/ansi2html/ansi2html/style.py
new file mode 100755 (executable)
index 0000000..fe95b96
--- /dev/null
@@ -0,0 +1,135 @@
+#    This file is part of ansi2html.
+#    Copyright (C) 2012  Kuno Woudt <[email protected]>
+#    Copyright (C) 2013  Sebastian Pipping <[email protected]>
+#
+#    This program is free software: you can redistribute it and/or
+#    modify it under the terms of the GNU General Public License as
+#    published by the Free Software Foundation, either version 3 of
+#    the License, or (at your option) any later version.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#    General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see
+#    <http://www.gnu.org/licenses/>.
+
+
+class Rule(object):
+
+    def __init__(self, klass, **kw):
+
+        self.klass = klass
+        self.kw = '; '.join([(k.replace('_', '-')+': '+kw[k])
+                             for k in sorted(kw.keys())]).strip()
+        self.kwl = [(k.replace('_', '-'), kw[k][1:])
+                             for k in sorted(kw.keys())]
+
+    def __str__(self):
+        return '%s { %s; }' % (self.klass, self.kw)
+
+
+def index(r, g, b):
+    return str(16 + (r * 36) + (g * 6) + b)
+
+
+def color(r, g, b):
+    return "#%.2x%.2x%.2x" % (r * 42, g * 42, b * 42)
+
+
+def level(grey):
+    return "#%.2x%.2x%.2x" % (((grey * 10) + 8,) * 3)
+
+
+def index2(grey):
+    return str(232 + grey)
+
+# http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
+SCHEME = {
+    # black red green brown/yellow blue magenta cyan grey/white
+    'ansi2html': (
+        "#000316", "#aa0000", "#00aa00", "#aa5500",
+        "#0000aa", "#E850A8", "#00aaaa", "#F5F1DE",
+        "#7f7f7f", "#ff0000", "#00ff00", "#ffff00",
+        "#5c5cff", "#ff00ff", "#00ffff", "#ffffff"),
+
+    'xterm': (
+        "#000000", "#cd0000", "#00cd00", "#cdcd00",
+        "#0000ee", "#cd00cd", "#00cdcd", "#e5e5e5",
+        "#7f7f7f", "#ff0000", "#00ff00", "#ffff00",
+        "#5c5cff", "#ff00ff", "#00ffff", "#ffffff"),
+
+    'osx': (
+        "#000000", "#c23621", "#25bc24", "#adad27",
+        "#492ee1", "#d338d3", "#33bbc8", "#cbcccd") * 2,
+
+    # http://ethanschoonover.com/solarized
+    'solarized': (
+        "#262626", "#d70000", "#5f8700", "#af8700",
+        "#0087ff", "#af005f", "#00afaf", "#e4e4e4",
+        "#1c1c1c", "#d75f00", "#585858", "#626262",
+        "#808080", "#5f5faf", "#8a8a8a", "#ffffd7"),
+    }
+
+
+def get_styles(dark_bg=True, scheme='ansi2html'):
+
+    css = [
+        Rule('.ansi2html-content', white_space='pre-wrap', word_wrap='break-word', display='inline'),
+        Rule('.body_foreground', color=('#000000', '#AAAAAA')[dark_bg]),
+        Rule('.body_background', background_color=('#AAAAAA', '#000000')[dark_bg]),
+        Rule('.body_foreground > .bold,.bold > .body_foreground, body.body_foreground > pre > .bold',
+             color=('#000000', '#FFFFFF')[dark_bg], font_weight=('bold', 'normal')[dark_bg]),
+        Rule('.inv_foreground', color=('#000000', '#FFFFFF')[not dark_bg]),
+        Rule('.inv_background', background_color=('#AAAAAA', '#000000')[not dark_bg]),
+        Rule('.ansi1', font_weight='bold'),
+        Rule('.ansi2', font_weight='lighter'),
+        Rule('.ansi3', font_style='italic'),
+        Rule('.ansi4', text_decoration='underline'),
+        Rule('.ansi5', text_decoration='blink'),
+        Rule('.ansi6', text_decoration='blink'),
+        Rule('.ansi8', visibility='hidden'),
+        Rule('.ansi9', text_decoration='line-through'),
+        ]
+
+    # set palette
+    pal = SCHEME[scheme]
+    for _index in range(8):
+        css.append(Rule('.ansi3%s' % _index, color=pal[_index]))
+        css.append(Rule('.inv3%s' % _index, background_color=pal[_index]))
+    for _index in range(8):
+        css.append(Rule('.ansi4%s' % _index, background_color=pal[_index]))
+        css.append(Rule('.inv4%s' % _index, color=pal[_index]))
+
+    # set palette colors in 256 color encoding
+    pal = SCHEME[scheme]
+    for _index in range(len(pal)):
+        css.append(Rule('.ansi38-%s' % _index, color=pal[_index]))
+        css.append(Rule('.inv38-%s' % _index, background_color=pal[_index]))
+    for _index in range(len(pal)):
+        css.append(Rule('.ansi48-%s' % _index, background_color=pal[_index]))
+        css.append(Rule('.inv48-%s' % _index, color=pal[_index]))
+
+    # css.append("/* Define the explicit color codes (obnoxious) */\n\n")
+
+    for green in range(0, 6):
+        for red in range(0, 6):
+            for blue in range(0, 6):
+                css.append(Rule(".ansi38-%s" % index(red, green, blue),
+                                color=color(red, green, blue)))
+                css.append(Rule(".inv38-%s" % index(red, green, blue),
+                                background=color(red, green, blue)))
+                css.append(Rule(".ansi48-%s" % index(red, green, blue),
+                                background=color(red, green, blue)))
+                css.append(Rule(".inv48-%s" % index(red, green, blue),
+                                color=color(red, green, blue)))
+
+    for grey in range(0, 24):
+        css.append(Rule('.ansi38-%s' % index2(grey), color=level(grey)))
+        css.append(Rule('.inv38-%s' % index2(grey), background=level(grey)))
+        css.append(Rule('.ansi48-%s' % index2(grey), background=level(grey)))
+        css.append(Rule('.inv48-%s' % index2(grey), color=level(grey)))
+
+    return css
diff --git a/scripts/external_libs/ansi2html/ansi2html/util.py b/scripts/external_libs/ansi2html/ansi2html/util.py
new file mode 100755 (executable)
index 0000000..20ea044
--- /dev/null
@@ -0,0 +1,2 @@
+def read_to_unicode(obj):
+    return [line.decode('utf-8') for line in obj.readlines()]