[V4] Add sending unidirectional traffic support for TG in CSIT 96/16696/3
authorYulong Pei <yulong.pei@intel.com>
Sat, 5 Jan 2019 15:12:59 +0000 (23:12 +0800)
committerTibor Frank <tifrank@cisco.com>
Tue, 8 Jan 2019 15:32:16 +0000 (15:32 +0000)
Extend TG in CSIT to support to send unidirectional traffic, and then
it can be used by unidirectional test cases e.g. Load balancer.

Change-Id: Ie25bcdf7d4525a6d88d8ecd11a1b6d98275fa4d6
Signed-off-by: Yulong Pei <yulong.pei@intel.com>
resources/libraries/python/TrafficGenerator.py
resources/libraries/robot/performance/performance_utils.robot
resources/tools/trex/trex_stateless_profile.py

index 36a8385..b5558e6 100644 (file)
@@ -457,6 +457,85 @@ class TrafficGenerator(AbstractMeasurer):
             self._latency.append(self._result.split(', ')[4].split('=')[1])
             self._latency.append(self._result.split(', ')[5].split('=')[1])
 
+    def trex_stl_start_unidirection(self, duration, rate, framesize,
+                                    traffic_type, tx_port=0, rx_port=1,
+                                    async_call=False, latency=False,
+                                    warmup_time=5.0):
+        """Execute script on remote node over ssh to start unidirection traffic.
+        The purpose of this function is to support performance test that need to
+        measure unidirectional traffic, e.g. Load balancer maglev mode and l3dsr
+        mode test.
+
+        :param duration: Time expresed in seconds for how long to send traffic.
+        :param rate: Traffic rate expressed with units (pps, %)
+        :param framesize: L2 frame size to send (without padding and IPG).
+        :param traffic_type: Module name as a traffic type identifier.
+            See resources/traffic_profiles/trex for implemented modules.
+        :param tx_port: Traffic generator transmit port.
+        :param rx_port: Traffic generator receive port.
+        :param latency: With latency measurement.
+        :param async_call: If enabled then don't wait for all incomming trafic.
+        :param warmup_time: Warmup time period.
+        :type duration: float
+        :type rate: str
+        :type framesize: str
+        :type traffic_type: str
+        :type tx_port: integer
+        :type rx_port: integer
+        :type latency: bool
+        :type async_call: bool
+        :type warmup_time: float
+        :returns: Nothing
+        :raises RuntimeError: In case of TG driver issue.
+        """
+        ssh = SSH()
+        ssh.connect(self._node)
+
+        _latency = "--latency" if latency else ""
+        _async = "--async" if async_call else ""
+
+        profile_path = ("{0}/resources/traffic_profiles/trex/"
+                        "{1}.py".format(Constants.REMOTE_FW_DIR,
+                                        traffic_type))
+        (ret, stdout, _) = ssh.exec_command(
+            "sh -c "
+            "'{0}/resources/tools/trex/trex_stateless_profile.py "
+            "--profile {1} "
+            "--duration {2} "
+            "--frame_size {3} "
+            "--rate {4} "
+            "--warmup_time {5} "
+            "--port_0 {6} "
+            "--port_1 {7} "
+            "{8} "  # --async
+            "{9} "  # --latency
+            "{10}'".  # --unidirection
+            format(Constants.REMOTE_FW_DIR, profile_path, duration, framesize,
+                   rate, warmup_time, tx_port, rx_port, _async, _latency,
+                   "--unidirection"),
+            timeout=float(duration) + 60)
+
+        if int(ret) != 0:
+            raise RuntimeError('TRex unidirection runtime error')
+        elif async_call:
+            #no result
+            self._received = None
+            self._sent = None
+            self._loss = None
+            self._latency = None
+        else:
+            # last line from console output
+            line = stdout.splitlines()[-1]
+
+            self._result = line
+            logger.info('TrafficGen result: {0}'.format(self._result))
+
+            self._received = self._result.split(', ')[1].split('=')[1]
+            self._sent = self._result.split(', ')[2].split('=')[1]
+            self._loss = self._result.split(', ')[3].split('=')[1]
+            self._latency = []
+            self._latency.append(self._result.split(', ')[4].split('=')[1])
+
     def stop_traffic_on_tg(self):
         """Stop all traffic on TG.
 
@@ -468,9 +547,9 @@ class TrafficGenerator(AbstractMeasurer):
         if self._node['subtype'] == NodeSubTypeTG.TREX:
             self.trex_stl_stop_remote_exec(self._node)
 
-    def send_traffic_on_tg(self, duration, rate, framesize,
-                           traffic_type, warmup_time=5, async_call=False,
-                           latency=True):
+    def send_traffic_on_tg(self, duration, rate, framesize, traffic_type,
+                           unidirection=False, tx_port=0, rx_port=1,
+                           warmup_time=5, async_call=False, latency=True):
         """Send traffic from all configured interfaces on TG.
 
         :param duration: Duration of test traffic generation in seconds.
@@ -478,6 +557,9 @@ class TrafficGenerator(AbstractMeasurer):
         :param framesize: Frame size (L2) in Bytes.
         :param traffic_type: Module name as a traffic type identifier.
             See resources/traffic_profiles/trex for implemented modules.
+        :param unidirection: Traffic is unidirectional.
+        :param tx_port: Traffic generator transmit port.
+        :param rx_port: Traffic generator receive port.
         :param warmup_time: Warmup phase in seconds.
         :param async_call: Async mode.
         :param latency: With latency measurement.
@@ -485,6 +567,9 @@ class TrafficGenerator(AbstractMeasurer):
         :type rate: str
         :type framesize: str
         :type traffic_type: str
+        :type unidirection: bool
+        :type tx_port: integer
+        :type rx_port: integer
         :type warmup_time: float
         :type async_call: bool
         :type latency: bool
@@ -505,9 +590,15 @@ class TrafficGenerator(AbstractMeasurer):
         if node['subtype'] is None:
             raise RuntimeError('TG subtype not defined')
         elif node['subtype'] == NodeSubTypeTG.TREX:
-            self.trex_stl_start_remote_exec(duration, rate, framesize,
-                                            traffic_type, async_call, latency,
-                                            warmup_time=warmup_time)
+            if unidirection:
+                self.trex_stl_start_unidirection(duration, rate, framesize,
+                                                 traffic_type, tx_port,
+                                                 rx_port, async_call, latency,
+                                                 warmup_time)
+            else:
+                self.trex_stl_start_remote_exec(duration, rate, framesize,
+                                                traffic_type, async_call,
+                                                latency, warmup_time)
         else:
             raise NotImplementedError("TG subtype not supported")
 
index 4033442..66fd307 100644 (file)
 | | ... | - rate - Rate for sending packets. Type: string
 | | ... | - framesize - L2 Frame Size [B] or IMIX_v4_1. Type: integer/string
 | | ... | - topology_type - Topology type. Type: string
+| | ... | - unidirection - False if traffic is bidirectional. Type: boolean
+| | ... | - tx_port - TX port of TG, default 0. Type: integer
+| | ... | - rx_port - RX port of TG, default 1. Type: integer
 | | ... | - subsamples - How many trials in this measurement. Type:int
 | | ... | - trial_duration - Duration of single trial [s]. Type: float
 | | ... | - fail_no_traffic - Whether to fail on zero receive count. Type: boolean
 | | ... | *Example:*
 | | ...
 | | ... | \| Traffic should pass with maximum rate \| 4.0mpps \| 64 \
-| | ... | \| 3-node-IPv4 \| ${1} \| ${10.0} | ${False} \|
+| | ... | \| 3-node-IPv4 \| ${False} \| ${0} | ${1} \|
+| | ... | \| ${1} \| ${10.0} \| ${False} \|
 | | ...
 | | [Arguments] | ${rate} | ${framesize} | ${topology_type}
+| | ... | ${unidirection}=${False} | ${tx_port}=${0} | ${rx_port}=${1}
 | | ... | ${trial_duration}=${perf_trial_duration} | ${fail_no_traffic}=${True}
 | | ... | ${subsamples}=${perf_trial_multiplicity}
 | | ...
 | | ${results} = | Send traffic at specified rate | ${trial_duration} | ${rate}
-| | ... | ${framesize} | ${topology_type} | ${subsamples}
+| | ... | ${framesize} | ${topology_type} | ${unidirection}
+| | ... | ${tx_port} | ${rx_port} | ${subsamples}
 | | Set Test Message | ${\n}Maximum Receive Rate trial results
 | | Set Test Message | in packets per second: ${results}
 | | ... | append=yes
 | | ... | - rate - Rate for sending packets. Type: string
 | | ... | - framesize - L2 Frame Size [B]. Type: integer/string
 | | ... | - topology_type - Topology type. Type: string
+| | ... | - unidirection - False if traffic is bidirectional. Type: boolean
+| | ... | - tx_port - TX port of TG, default 0. Type: integer
+| | ... | - rx_port - RX port of TG, default 1. Type: integer
 | | ... | - subsamples - How many trials in this measurement. Type: int
 | | ...
 | | ... | *Example:*
 | | ...
 | | ... | \| Send traffic at specified rate \| ${1.0} \| 4.0mpps \| 64 \
-| | ... | \| 3-node-IPv4 \| ${10} \|
+| | ... | \| 3-node-IPv4 \| ${False} \| ${0} | ${1} \| ${10} \|
 | | ...
 | | [Arguments] | ${trial_duration} | ${rate} | ${framesize}
-| | ... | ${topology_type} | ${subsamples}=${1}
+| | ... | ${topology_type} | ${unidirection}=${False} | ${tx_port}=${0}
+| | ... | ${rx_port}=${1} | ${subsamples}=${1}
 | | ...
 | | Clear and show runtime counters with running traffic | ${trial_duration}
 | | ... | ${rate} | ${framesize} | ${topology_type}
+| | ... | ${unidirection} | ${tx_port} | ${rx_port}
 | | Run Keyword If | ${dut_stats}==${True} | Clear all counters on all DUTs
 | | Run Keyword If | ${dut_stats}==${True} and ${pkt_trace}==${True}
 | | ... | VPP Enable Traces On All DUTs | ${nodes}
 | | ${results} = | Create List
 | | :FOR | ${i} | IN RANGE | ${subsamples}
 | | | Send traffic on tg | ${trial_duration} | ${rate} | ${framesize}
-| | | ... | ${topology_type} | warmup_time=0
+| | | ... | ${topology_type} | ${unidirection} | ${tx_port}
+| | | ... | ${rx_port} | warmup_time=0
 | | | ${rx} = | Get Received
 | | | ${rr} = | Evaluate | ${rx} / ${trial_duration}
 | | | Append To List | ${results} | ${rr}
 | | ... | - rate - Rate for sending packets. Type: string
 | | ... | - framesize - L2 Frame Size [B] or IMIX_v4_1. Type: integer/string
 | | ... | - topology_type - Topology type. Type: string
+| | ... | - unidirection - False if traffic is bidirectional. Type: boolean
+| | ... | - tx_port - TX port of TG, default 0. Type: integer
+| | ... | - rx_port - RX port of TG, default 1. Type: integer
 | | ...
 | | ... | *Example:*
 | | ...
-| | ... | \| Traffic should pass with partial loss \| 10 \| 4.0mpps \| 64 \
-| | ... | \| 3-node-IPv4 \| 0.5 \| percentage \|
+| | ... | \| Clear and show runtime counters with running traffic \| 10 \
+| | ... | \| 4.0mpps \| 64 \| 3-node-IPv4 \| ${False} \| ${0} | ${1} \|
 | | ...
 | | [Arguments] | ${duration} | ${rate} | ${framesize} | ${topology_type}
+| | ... | ${unidirection}=${False} | ${tx_port}=${0} | ${rx_port}=${1}
 | | ...
 | | Send traffic on tg | -1 | ${rate} | ${framesize} | ${topology_type}
+| | ... | ${unidirection} | ${tx_port} | ${rx_port}
 | | ... | warmup_time=0 | async_call=${True} | latency=${False}
 | | Run Keyword If | ${dut_stats}==${True}
 | | ... | Clear runtime counters on all DUTs | ${nodes}
index e1e56d9..de29ff5 100755 (executable)
@@ -58,8 +58,8 @@ def fmt_latency(lat_min, lat_avg, lat_max):
 
 
 def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
-                 port_1, latency, async_start=False):
-    """Send the traffic and measure packet loss and latency.
+                 port_1, latency, async_start=False, unidirection=False):
+    """Send traffic and measure packet loss and latency.
 
     Procedure:
      - reads the given traffic profile with streams,
@@ -85,6 +85,7 @@ def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
     :param port_1: Port 1 on the traffic generator.
     :param latency: With latency stats.
     :param async_start: Start the traffic and exit.
+    :param unidirection: Traffic is unidirectional.
     :type profile_file: str
     :type framesize: int or str
     :type duration: float
@@ -92,7 +93,45 @@ def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
     :type warmup_time: float
     :type port_0: int
     :type port_1: int
-    :type latency: boo;
+    :type latency: bool
+    :type async_start: bool
+    :type unidirection: bool
+    """
+
+    #unidirection traffic
+    if unidirection:
+        send_traffic_unidirection(profile_file, duration, framesize, rate,
+                                  warmup_time, port_0, port_1, latency,
+                                  async_start)
+    #bidirection traffic
+    else:
+        send_traffic_bidirection(profile_file, duration, framesize, rate,
+                                 warmup_time, port_0, port_1, latency,
+                                 async_start)
+
+
+def send_traffic_bidirection(profile_file, duration, framesize, rate,
+                             warmup_time, port_0, port_1, latency,
+                             async_start=False):
+    """Send traffic bidirection and measure packet loss and latency.
+
+    :param profile_file: A python module with T-rex traffic profile.
+    :param framesize: Frame size.
+    :param duration: Duration of traffic run in seconds (-1=infinite).
+    :param rate: Traffic rate [percentage, pps, bps].
+    :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
+    :param port_0: Port 0 on the traffic generator.
+    :param port_1: Port 1 on the traffic generator.
+    :param latency: With latency stats.
+    :param async_start: Start the traffic and exit.
+    :type profile_file: str
+    :type framesize: int or str
+    :type duration: float
+    :type rate: str
+    :type warmup_time: float
+    :type port_0: int
+    :type port_1: int
+    :type latency: bool
     :type async_start: bool
     """
 
@@ -233,6 +272,161 @@ def simple_burst(profile_file, duration, framesize, rate, warmup_time, port_0,
                          lat_a, lat_b))
 
 
+def send_traffic_unidirection(profile_file, duration, framesize, rate,
+                              warmup_time, port_0, port_1, latency,
+                              async_start=False):
+    """Send traffic unidirection and measure packet loss and latency.
+
+    :param profile_file: A python module with T-rex traffic profile.
+    :param framesize: Frame size.
+    :param duration: Duration of traffic run in seconds (-1=infinite).
+    :param rate: Traffic rate [percentage, pps, bps].
+    :param warmup_time: Traffic warm-up time in seconds, 0 = disable.
+    :param port_0: Port 0 on the traffic generator.
+    :param port_1: Port 1 on the traffic generator.
+    :param latency: With latency stats.
+    :param async_start: Start the traffic and exit.
+    :type profile_file: str
+    :type framesize: int or str
+    :type duration: float
+    :type rate: str
+    :type warmup_time: float
+    :type port_0: int
+    :type port_1: int
+    :type latency: bool
+    :type async_start: bool
+    """
+
+    client = None
+    total_rcvd = 0
+    total_sent = 0
+    lost_a = 0
+    lat_a = "-1/-1/-1"
+
+    # Read the profile:
+    try:
+        print("### Profile file:\n{}".format(profile_file))
+        profile = STLProfile.load(profile_file, direction=0, port_id=0,
+                                  framesize=framesize)
+        streams = profile.get_streams()
+    except STLError as err:
+        print("Error while loading profile '{0}' {1}".format(profile_file, err))
+        sys.exit(1)
+
+    try:
+        # Create the client:
+        client = STLClient(verbose_level=LoggerApi.VERBOSE_QUIET)
+        # Connect to server:
+        client.connect()
+        # Prepare our ports:
+        if port_0 == port_1:
+            client.reset(ports=[port_0])
+            client.remove_all_streams(ports=[port_0])
+
+            if "macsrc" in profile_file:
+                client.set_port_attr(ports=[port_0], promiscuous=True,
+                                     resolve=False)
+        else:
+            client.reset(ports=[port_0, port_1])
+            client.remove_all_streams(ports=[port_0, port_1])
+
+            if "macsrc" in profile_file:
+                client.set_port_attr(ports=[port_0, port_1], promiscuous=True,
+                                     resolve=False)
+
+        if isinstance(framesize, int):
+            client.add_streams(streams[0], ports=[port_0])
+        elif isinstance(framesize, str):
+            client.add_streams(streams[0:3], ports=[port_0])
+        if latency:
+            try:
+                if isinstance(framesize, int):
+                    client.add_streams(streams[2], ports=[port_0])
+                elif isinstance(framesize, str):
+                    latency = False
+            except STLError:
+                # Disable latency if NIC does not support requested stream type
+                print("##### FAILED to add latency streams #####")
+                latency = False
+
+        # Warm-up phase:
+        if warmup_time > 0:
+            # Clear the stats before injecting:
+            client.clear_stats()
+
+            # Choose rate and start traffic:
+            client.start(ports=[port_0], mult=rate,
+                         duration=warmup_time)
+
+            # Block until done:
+            client.wait_on_traffic(ports=[port_0],
+                                   timeout=warmup_time+30)
+
+            if client.get_warnings():
+                for warning in client.get_warnings():
+                    print(warning)
+
+            # Read the stats after the test:
+            stats = client.get_stats()
+
+            print("##### Warmup statistics #####")
+            print(json.dumps(stats, indent=4, separators=(',', ': '),
+                             sort_keys=True))
+
+            lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"]
+            print("\npackets lost : {0} pkts".format(lost_a))
+
+        # Clear the stats before injecting:
+        client.clear_stats()
+        lost_a = 0
+
+        # Choose rate and start traffic:
+        client.start(ports=[port_0], mult=rate, duration=duration)
+
+        if not async_start:
+            # Block until done:
+            client.wait_on_traffic(ports=[port_0], timeout=duration+30)
+
+            if client.get_warnings():
+                for warning in client.get_warnings():
+                    print(warning)
+
+            # Read the stats after the test
+            stats = client.get_stats()
+
+            print("##### Statistics #####")
+            print(json.dumps(stats, indent=4, separators=(',', ': '),
+                             sort_keys=True))
+
+            lost_a = stats[port_0]["opackets"] - stats[port_1]["ipackets"]
+
+            if latency:
+                lat_a = fmt_latency(
+                    str(stats["latency"][0]["latency"]["total_min"]),
+                    str(stats["latency"][0]["latency"]["average"]),
+                    str(stats["latency"][0]["latency"]["total_max"]))
+
+            total_sent = stats[port_0]["opackets"]
+            total_rcvd = stats[port_1]["ipackets"]
+
+            print("\npackets lost : {0} pkts".format(lost_a))
+
+    except STLError as err:
+        sys.stderr.write("{0}\n".format(err))
+        sys.exit(1)
+
+    finally:
+        if async_start:
+            if client:
+                client.disconnect(stop_traffic=False, release_ports=True)
+        else:
+            if client:
+                client.disconnect()
+            print("rate={0}, totalReceived={1}, totalSent={2}, "
+                  "frameLoss={3}, latencyStream0(usec)={4}".
+                  format(rate, total_rcvd, total_sent, lost_a, lat_a))
+
+
 def main():
     """Main function for the traffic generator using T-rex.
 
@@ -274,7 +468,12 @@ def main():
     parser.add_argument("--latency",
                         action="store_true",
                         default=False,
-                        help="Add latency stream")
+                        help="Add latency stream.")
+    parser.add_argument("--unidirection",
+                        action="store_true",
+                        default=False,
+                        help="Send unidirection traffic.")
+
     args = parser.parse_args()
 
     try:
@@ -290,7 +489,8 @@ def main():
                  port_0=args.port_0,
                  port_1=args.port_1,
                  latency=args.latency,
-                 async_start=args.async)
+                 async_start=args.async,
+                 unidirection=args.unidirection)
 
 
 if __name__ == '__main__':