dpdk-hqos: don't hold up packets indefinitely under low load 46/3646/6
authorDavid Hotham <david.hotham@metaswitch.com>
Tue, 1 Nov 2016 10:51:24 +0000 (10:51 +0000)
committerDamjan Marion <dmarion.lists@gmail.com>
Tue, 1 Nov 2016 23:50:36 +0000 (23:50 +0000)
Change-Id: If884637a6db0cb813a40920194795da2e98c8b23
Signed-off-by: David Hotham <david.hotham@metaswitch.com>
vnet/vnet/devices/dpdk/dpdk.h
vnet/vnet/devices/dpdk/hqos.c

index e34d4b9..dfbfce5 100644 (file)
@@ -184,6 +184,7 @@ typedef struct
   u32 hqos_burst_deq;
   u32 pkts_enq_len;
   u32 swq_pos;
+  u32 flush_count;
 } dpdk_device_hqos_per_hqos_thread_t;
 
 typedef struct
@@ -304,6 +305,10 @@ typedef struct dpdk_efd_t
 #define DPDK_HQOS_DBG_BYPASS 0
 #endif
 
+#ifndef HQOS_FLUSH_COUNT_THRESHOLD
+#define HQOS_FLUSH_COUNT_THRESHOLD              100000
+#endif
+
 typedef struct dpdk_device_config_hqos_t
 {
   u32 hqos_thread;
index d05ae09..12bf3fa 100644 (file)
@@ -351,6 +351,7 @@ dpdk_port_setup_hqos (dpdk_device_t * xd, dpdk_device_config_hqos_t * hqos)
   vec_validate (xd->hqos_ht->pkts_deq, hqos->burst_deq - 1);
   xd->hqos_ht->pkts_enq_len = 0;
   xd->hqos_ht->swq_pos = 0;
+  xd->hqos_ht->flush_count = 0;
 
   /* Set up per-thread device data for each worker thread */
   for (i = 0; i < worker_thread_count; i++)
@@ -416,6 +417,7 @@ dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm)
       u32 pkts_enq_len = hqos->pkts_enq_len;
       u32 swq_pos = hqos->swq_pos;
       u32 n_swq = vec_len (hqos->swq), i;
+      u32 flush_count = hqos->flush_count;
 
       for (i = 0; i < n_swq; i++)
        {
@@ -446,10 +448,23 @@ dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm)
                rte_pktmbuf_free (pkts_enq[n_pkts]);
 
              pkts_enq_len = 0;
+             flush_count = 0;
              break;
            }
        }
+      if (pkts_enq_len)
+       {
+         flush_count++;
+         if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
+           {
+             rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
+
+             pkts_enq_len = 0;
+             flush_count = 0;
+           }
+       }
       hqos->pkts_enq_len = pkts_enq_len;
+      hqos->flush_count = flush_count;
 
       /* Advance to next device */
       dev_pos++;
@@ -490,6 +505,7 @@ dpdk_hqos_thread_internal (vlib_main_t * vm)
       u32 pkts_enq_len = hqos->pkts_enq_len;
       u32 swq_pos = hqos->swq_pos;
       u32 n_swq = vec_len (hqos->swq), i;
+      u32 flush_count = hqos->flush_count;
 
       /*
        * SWQ dequeue and HQoS enqueue for current device
@@ -517,10 +533,23 @@ dpdk_hqos_thread_internal (vlib_main_t * vm)
              rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
 
              pkts_enq_len = 0;
+             flush_count = 0;
              break;
            }
        }
+      if (pkts_enq_len)
+       {
+         flush_count++;
+         if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD))
+           {
+             rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len);
+
+             pkts_enq_len = 0;
+             flush_count = 0;
+           }
+       }
       hqos->pkts_enq_len = pkts_enq_len;
+      hqos->flush_count = flush_count;
 
       /*
        * HQoS dequeue and HWQ TX enqueue for current device