Move dpdk (un)format functions to separate c file
[vpp.git] / vnet / vnet / devices / dpdk / node.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
20
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/dpdk/dpdk.h>
23 #include <vnet/classify/vnet_classify.h>
24 #include <vnet/mpls-gre/packet.h>
25
26 #include "dpdk_priv.h"
27
28 #ifndef MAX
29 #define MAX(a,b) ((a) < (b) ? (b) : (a))
30 #endif
31
32 #ifndef MIN
33 #define MIN(a,b) ((a) < (b) ? (a) : (b))
34 #endif
35
36 /*
37  * At least in certain versions of ESXi, vmware e1000's don't honor the
38  * "strip rx CRC" bit. Set this flag to work around that bug FOR UNIT TEST ONLY.
39  *
40  * If wireshark complains like so:
41  *
42  * "Frame check sequence: 0x00000000 [incorrect, should be <hex-num>]"
43  * and you're using ESXi emulated e1000's, set this flag FOR UNIT TEST ONLY.
44  *
45  * Note: do NOT check in this file with this workaround enabled! You'll lose
46  * actual data from e.g. 10xGE interfaces. The extra 4 bytes annoy
47  * wireshark, but they're harmless...
48  */
49 #define VMWARE_LENGTH_BUG_WORKAROUND 0
50
51 typedef struct {
52   u32 cached_next_index;
53
54   /* convenience variables */
55   vlib_main_t * vlib_main;
56   vnet_main_t * vnet_main;
57 } handoff_dispatch_main_t;
58
59 typedef struct {
60   u32 buffer_index;
61   u32 next_index;
62   u32 sw_if_index;
63 } handoff_dispatch_trace_t;
64
65 /* packet trace format function */
66 static u8 * format_handoff_dispatch_trace (u8 * s, va_list * args)
67 {
68   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
69   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
70   handoff_dispatch_trace_t * t = va_arg (*args, handoff_dispatch_trace_t *);
71
72   s = format (s, "HANDOFF_DISPATCH: sw_if_index %d next_index %d buffer 0x%x",
73       t->sw_if_index,
74       t->next_index,
75       t->buffer_index);
76   return s;
77 }
78
79 handoff_dispatch_main_t handoff_dispatch_main;
80
81 vlib_node_registration_t handoff_dispatch_node;
82
83 #define foreach_handoff_dispatch_error \
84 _(EXAMPLE, "example packets")
85
86 typedef enum {
87 #define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym,
88   foreach_handoff_dispatch_error
89 #undef _
90   HANDOFF_DISPATCH_N_ERROR,
91 } handoff_dispatch_error_t;
92
93 static char * handoff_dispatch_error_strings[] = {
94 #define _(sym,string) string,
95   foreach_handoff_dispatch_error
96 #undef _
97 };
98
99 static inline
100 void vlib_put_handoff_queue_elt (vlib_frame_queue_elt_t * hf)
101 {
102   CLIB_MEMORY_BARRIER();
103   hf->valid = 1;
104 }
105
106 static uword
107 handoff_dispatch_node_fn (vlib_main_t * vm,
108                   vlib_node_runtime_t * node,
109                   vlib_frame_t * frame)
110 {
111   u32 n_left_from, * from, * to_next;
112   dpdk_rx_next_t next_index;
113
114   from = vlib_frame_vector_args (frame);
115   n_left_from = frame->n_vectors;
116   next_index = node->cached_next_index;
117
118   while (n_left_from > 0)
119     {
120       u32 n_left_to_next;
121
122       vlib_get_next_frame (vm, node, next_index,
123                            to_next, n_left_to_next);
124
125       while (n_left_from >= 4 && n_left_to_next >= 2)
126         {
127           u32 bi0, bi1;
128           vlib_buffer_t * b0, * b1;
129           u32 next0, next1;
130           u32 sw_if_index0, sw_if_index1;
131           
132           /* Prefetch next iteration. */
133           {
134             vlib_buffer_t * p2, * p3;
135             
136             p2 = vlib_get_buffer (vm, from[2]);
137             p3 = vlib_get_buffer (vm, from[3]);
138             
139             vlib_prefetch_buffer_header (p2, LOAD);
140             vlib_prefetch_buffer_header (p3, LOAD);
141           }
142
143           /* speculatively enqueue b0 and b1 to the current next frame */
144           to_next[0] = bi0 = from[0];
145           to_next[1] = bi1 = from[1];
146           from += 2;
147           to_next += 2;
148           n_left_from -= 2;
149           n_left_to_next -= 2;
150
151           b0 = vlib_get_buffer (vm, bi0);
152           b1 = vlib_get_buffer (vm, bi1);
153
154           next0 = vnet_buffer(b0)->io_handoff.next_index;
155           next1 = vnet_buffer(b1)->io_handoff.next_index;
156
157           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
158             {
159               vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
160               handoff_dispatch_trace_t *t =
161                 vlib_add_trace (vm, node, b0, sizeof (*t));
162               sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
163               t->sw_if_index = sw_if_index0;
164               t->next_index = next0;
165               t->buffer_index = bi0;
166             }
167           if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
168             {
169               vlib_trace_buffer (vm, node, next1, b1, /* follow_chain */ 0);
170               handoff_dispatch_trace_t *t =
171                 vlib_add_trace (vm, node, b1, sizeof (*t));
172               sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
173               t->sw_if_index = sw_if_index1;
174               t->next_index = next1;
175               t->buffer_index = bi1;
176             }
177             
178           /* verify speculative enqueues, maybe switch current next frame */
179           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
180                                            to_next, n_left_to_next,
181                                            bi0, bi1, next0, next1);
182         }
183       
184       while (n_left_from > 0 && n_left_to_next > 0)
185         {
186           u32 bi0;
187           vlib_buffer_t * b0;
188           u32 next0;
189           u32 sw_if_index0;
190
191           /* speculatively enqueue b0 to the current next frame */
192           bi0 = from[0];
193           to_next[0] = bi0;
194           from += 1;
195           to_next += 1;
196           n_left_from -= 1;
197           n_left_to_next -= 1;
198
199           b0 = vlib_get_buffer (vm, bi0);
200
201           next0 = vnet_buffer(b0)->io_handoff.next_index;
202
203           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
204             {
205               vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
206               handoff_dispatch_trace_t *t =
207                 vlib_add_trace (vm, node, b0, sizeof (*t));
208               sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
209               t->sw_if_index = sw_if_index0;
210               t->next_index = next0;
211               t->buffer_index = bi0;
212            }
213
214           /* verify speculative enqueue, maybe switch current next frame */
215           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
216                                            to_next, n_left_to_next,
217                                            bi0, next0);
218         }
219
220       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
221     }
222
223   return frame->n_vectors;
224 }
225
226 VLIB_REGISTER_NODE (handoff_dispatch_node) = {
227   .function = handoff_dispatch_node_fn,
228   .name = "handoff-dispatch",
229   .vector_size = sizeof (u32),
230   .format_trace = format_handoff_dispatch_trace,
231   .type = VLIB_NODE_TYPE_INTERNAL,
232   .flags = VLIB_NODE_FLAG_IS_HANDOFF,
233   
234   .n_errors = ARRAY_LEN(handoff_dispatch_error_strings),
235   .error_strings = handoff_dispatch_error_strings,
236
237   .n_next_nodes = DPDK_RX_N_NEXT,
238
239   .next_nodes = {
240         [DPDK_RX_NEXT_DROP] = "error-drop",
241         [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
242         [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input",
243         [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
244         [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
245   },
246 };
247
248 clib_error_t *handoff_dispatch_init (vlib_main_t *vm)
249 {
250   handoff_dispatch_main_t * mp = &handoff_dispatch_main;
251     
252   mp->vlib_main = vm;
253   mp->vnet_main = &vnet_main;
254
255   return 0;
256 }
257
258 VLIB_INIT_FUNCTION (handoff_dispatch_init);
259
260 u32 dpdk_get_handoff_node_index (void)
261 {
262   return handoff_dispatch_node.index;
263 }
264
265 static char * dpdk_error_strings[] = {
266 #define _(n,s) s,
267     foreach_dpdk_error
268 #undef _
269 };
270
271 always_inline void
272 dpdk_rx_next_and_error_from_mb_flags_x1 (dpdk_device_t *xd, struct rte_mbuf *mb,
273                                          vlib_buffer_t *b0,
274                                          u8 * next0, u8 * error0)
275 {
276   u8 is0_ip4, is0_ip6, is0_mpls, n0;
277   uint16_t mb_flags = mb->ol_flags;
278
279   if (PREDICT_FALSE(mb_flags & (
280 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
281        PKT_EXT_RX_PKT_ERROR | PKT_EXT_RX_BAD_FCS   |
282 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
283         PKT_RX_IP_CKSUM_BAD  | PKT_RX_L4_CKSUM_BAD
284     ))) 
285     {
286       /* some error was flagged. determine the drop reason */ 
287       n0 = DPDK_RX_NEXT_DROP;
288       *error0 = 
289 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
290         (mb_flags & PKT_EXT_RX_PKT_ERROR) ? DPDK_ERROR_RX_PACKET_ERROR : 
291         (mb_flags & PKT_EXT_RX_BAD_FCS) ? DPDK_ERROR_RX_BAD_FCS : 
292 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
293         (mb_flags & PKT_RX_IP_CKSUM_BAD) ? DPDK_ERROR_IP_CHECKSUM_ERROR : 
294         (mb_flags & PKT_RX_L4_CKSUM_BAD) ? DPDK_ERROR_L4_CHECKSUM_ERROR : 
295         DPDK_ERROR_NONE;
296     }
297   else
298     {
299       *error0 = DPDK_ERROR_NONE;
300       if (xd->per_interface_next_index != ~0)
301         n0 = xd->per_interface_next_index;
302       else if (mb_flags & PKT_RX_VLAN_PKT)
303         n0 = DPDK_RX_NEXT_ETHERNET_INPUT;
304       else
305         {
306           n0 = DPDK_RX_NEXT_ETHERNET_INPUT;
307 #if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0)
308           is0_ip4 = RTE_ETH_IS_IPV4_HDR(mb->packet_type) != 0;
309 #else
310           is0_ip4 = (mb_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV4_HDR_EXT)) != 0;
311 #endif
312
313           if (PREDICT_TRUE(is0_ip4))
314             n0 = DPDK_RX_NEXT_IP4_INPUT;
315           else
316             {
317 #if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0)
318               is0_ip6 = RTE_ETH_IS_IPV6_HDR(mb->packet_type) != 0;
319 #else
320               is0_ip6 = 
321                       (mb_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) != 0;
322 #endif
323               if (PREDICT_TRUE(is0_ip6))
324                 n0 = DPDK_RX_NEXT_IP6_INPUT;
325               else
326                 {
327                   ethernet_header_t *h0 = (ethernet_header_t *) b0->data;
328                   is0_mpls = (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST));
329                   n0 = is0_mpls ? DPDK_RX_NEXT_MPLS_INPUT : n0;
330                 }
331             }
332         }
333     }
334   *next0 = n0;
335 }
336
337 void dpdk_rx_trace (dpdk_main_t * dm,
338                     vlib_node_runtime_t * node,
339                     dpdk_device_t * xd,
340                     u16 queue_id,
341                     u32 * buffers,
342                     uword n_buffers)
343 {
344   vlib_main_t * vm = vlib_get_main();
345   u32 * b, n_left;
346   u8 next0;
347
348   n_left = n_buffers;
349   b = buffers;
350
351   while (n_left >= 1)
352     {
353       u32 bi0;
354       vlib_buffer_t * b0;
355       dpdk_rx_dma_trace_t * t0;
356       struct rte_mbuf *mb;
357       u8 error0;
358
359       bi0 = b[0];
360       n_left -= 1;
361
362       b0 = vlib_get_buffer (vm, bi0);
363       mb = ((struct rte_mbuf *)b0) - 1;
364       dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
365                                                &next0, &error0);
366       vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
367       t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
368       t0->queue_index = queue_id;
369       t0->device_index = xd->device_index;
370       t0->buffer_index = bi0;
371
372       memcpy (&t0->mb, mb, sizeof (t0->mb));
373       memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
374       memcpy (t0->buffer.pre_data, b0->data, sizeof (t0->buffer.pre_data));
375
376 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
377       /*
378        * Clear overloaded TX offload flags when a DPDK driver
379        * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
380        */
381       mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
382 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
383
384       b += 1;
385     }
386 }
387
388 /*
389  * dpdk_efd_update_counters()
390  * Update EFD (early-fast-discard) counters
391  */
392 void dpdk_efd_update_counters (dpdk_device_t *xd,
393                                u32 n_buffers,
394                                u16 enabled)
395 {
396   if (enabled & DPDK_EFD_MONITOR_ENABLED)
397     {
398       u64 now = clib_cpu_time_now();
399       if (xd->efd_agent.last_poll_time > 0)
400         {
401           u64 elapsed_time = (now - xd->efd_agent.last_poll_time);
402           if (elapsed_time > xd->efd_agent.max_poll_delay)
403             xd->efd_agent.max_poll_delay = elapsed_time;
404         }
405       xd->efd_agent.last_poll_time = now;
406     }
407   
408   xd->efd_agent.total_packet_cnt += n_buffers;
409   xd->efd_agent.last_burst_sz = n_buffers;
410
411   if (n_buffers > xd->efd_agent.max_burst_sz)
412     xd->efd_agent.max_burst_sz = n_buffers;
413
414   if (PREDICT_FALSE(n_buffers == VLIB_FRAME_SIZE))
415     {
416       xd->efd_agent.full_frames_cnt++;
417       xd->efd_agent.consec_full_frames_cnt++;
418     }
419   else
420     {
421       xd->efd_agent.consec_full_frames_cnt = 0;
422     }
423 }
424
425 /* is_efd_discardable()
426  *   returns non zero DPDK error if packet meets early-fast-discard criteria,
427  *           zero otherwise
428  */
429 u32 is_efd_discardable (vlib_thread_main_t *tm,
430                         vlib_buffer_t * b0,
431                         struct rte_mbuf *mb)
432 {
433   ethernet_header_t *eh = (ethernet_header_t *) b0->data;
434
435   if (eh->type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
436     {
437       ip4_header_t *ipv4 =
438           (ip4_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
439       u8 pkt_prec = (ipv4->tos >> 5);
440           
441       return (tm->efd.ip_prec_bitmap & (1 << pkt_prec) ?
442                   DPDK_ERROR_IPV4_EFD_DROP_PKTS : DPDK_ERROR_NONE);
443     }
444   else if (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_IP6))
445     {
446       ip6_header_t *ipv6 =
447           (ip6_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
448       u8 pkt_tclass =
449           ((ipv6->ip_version_traffic_class_and_flow_label >> 20) & 0xff);
450           
451       return (tm->efd.ip_prec_bitmap & (1 << pkt_tclass) ?
452                   DPDK_ERROR_IPV6_EFD_DROP_PKTS : DPDK_ERROR_NONE);
453     }
454   else if (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_MPLS_UNICAST))
455     {
456       mpls_unicast_header_t *mpls =
457           (mpls_unicast_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
458       u8 pkt_exp = ((mpls->label_exp_s_ttl >> 9) & 0x07);
459
460       return (tm->efd.mpls_exp_bitmap & (1 << pkt_exp) ?
461                   DPDK_ERROR_MPLS_EFD_DROP_PKTS : DPDK_ERROR_NONE);
462     }
463   else if ((eh->type == clib_net_to_host_u16(ETHERNET_TYPE_VLAN)) ||
464            (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_DOT1AD)))
465     {
466       ethernet_vlan_header_t *vlan =
467           (ethernet_vlan_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
468       u8 pkt_cos = ((vlan->priority_cfi_and_id >> 13) & 0x07);
469
470       return (tm->efd.vlan_cos_bitmap & (1 << pkt_cos) ?
471                   DPDK_ERROR_VLAN_EFD_DROP_PKTS : DPDK_ERROR_NONE);
472     }
473
474   return DPDK_ERROR_NONE;
475 }
476
477 /*
478  * This function is used when there are no worker threads.
479  * The main thread performs IO and forwards the packets. 
480  */
481 static inline u32 dpdk_device_input ( dpdk_main_t * dm, 
482                                       dpdk_device_t * xd,
483                                       vlib_node_runtime_t * node,
484                                       u32 cpu_index,
485                                       u16 queue_id)
486 {
487   u32 n_buffers;
488   u32 next_index = DPDK_RX_NEXT_ETHERNET_INPUT;
489   u32 n_left_to_next, * to_next;
490   u32 mb_index;
491   vlib_main_t * vm = vlib_get_main();
492   uword n_rx_bytes = 0;
493   u32 n_trace, trace_cnt __attribute__((unused));
494   vlib_buffer_free_list_t * fl;
495   u8 efd_discard_burst = 0;
496   u16 ip_align_offset = 0;
497   u32 buffer_flags_template;
498   
499   if (xd->admin_up == 0)
500     return 0;
501
502   n_buffers = dpdk_rx_burst(dm, xd, queue_id);
503
504   if (n_buffers == 0)
505     {
506       /* check if EFD (dpdk) is enabled */
507       if (PREDICT_FALSE(dm->efd.enabled))
508         {
509           /* reset a few stats */
510           xd->efd_agent.last_poll_time = 0;
511           xd->efd_agent.last_burst_sz = 0;
512         }
513       return 0;
514     }
515
516   if (xd->pmd == VNET_DPDK_PMD_THUNDERX)
517       ip_align_offset = 6;
518
519   buffer_flags_template = dm->buffer_flags_template;
520
521   vec_reset_length (xd->d_trace_buffers);
522   trace_cnt = n_trace = vlib_get_trace_count (vm, node);
523
524   fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
525
526   /*
527    * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
528    * therefore fake the stop in the dpdk driver by
529    * silently dropping all of the incoming pkts instead of 
530    * stopping the driver / hardware.
531    */
532   if (PREDICT_FALSE(xd->admin_up != 1))
533     {
534       for (mb_index = 0; mb_index < n_buffers; mb_index++)
535         rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
536       
537       return 0;
538     }
539
540   /* Check for congestion if EFD (Early-Fast-Discard) is enabled
541    * in any mode (e.g. dpdk, monitor, or drop_all)
542    */
543   if (PREDICT_FALSE(dm->efd.enabled))
544     {
545       /* update EFD counters */
546       dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
547
548       if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
549         {
550           /* discard all received packets */
551           for (mb_index = 0; mb_index < n_buffers; mb_index++)
552             rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
553
554           xd->efd_agent.discard_cnt += n_buffers;
555           increment_efd_drop_counter(vm, 
556                                      DPDK_ERROR_VLAN_EFD_DROP_PKTS,
557                                      n_buffers);
558
559           return 0;
560         }
561       
562       if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
563                         dm->efd.consec_full_frames_hi_thresh))
564         {
565           u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
566                                                        queue_id);
567           if (device_queue_sz >= dm->efd.queue_hi_thresh)
568             {
569               /* dpdk device queue has reached the critical threshold */
570               xd->efd_agent.congestion_cnt++;
571
572               /* apply EFD to packets from the burst */
573               efd_discard_burst = 1;
574             }
575         }
576     }
577   
578   mb_index = 0;
579
580   while (n_buffers > 0)
581     {
582       u32 bi0;
583       u8 next0, error0;
584       u32 l3_offset0;
585       vlib_buffer_t * b0, * b_seg, * b_chain = 0;
586       u32 cntr_type;
587
588       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
589
590       while (n_buffers > 0 && n_left_to_next > 0)
591         {
592           u8 nb_seg = 1;
593           struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
594           struct rte_mbuf *mb_seg = mb->next;
595
596           if (PREDICT_TRUE(n_buffers > 2))
597           {
598               struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
599               vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
600               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, STORE);
601               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
602           }
603
604           ASSERT(mb);
605
606           b0 = (vlib_buffer_t *)(mb+1);
607
608           /* check whether EFD is looking for packets to discard */
609           if (PREDICT_FALSE(efd_discard_burst))
610             {
611               vlib_thread_main_t * tm = vlib_get_thread_main();
612               
613               if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
614                 {
615                   rte_pktmbuf_free(mb);
616                   xd->efd_agent.discard_cnt++;
617                   increment_efd_drop_counter(vm, 
618                                              cntr_type,
619                                              1);
620                   n_buffers--;
621                   mb_index++;
622                   continue;
623                 }
624             }
625
626           /* Prefetch one next segment if it exists. */
627           if (PREDICT_FALSE(mb->nb_segs > 1))
628             {
629               struct rte_mbuf *pfmb = mb->next;
630               vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
631               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
632               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
633               b_chain = b0;
634             }
635
636           vlib_buffer_init_for_free_list (b0, fl);
637           b0->clone_count = 0;
638           
639           bi0 = vlib_get_buffer_index (vm, b0);
640
641           to_next[0] = bi0;
642           to_next++;
643           n_left_to_next--;
644           
645           dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
646                                                    &next0, &error0);
647 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
648           /*
649            * Clear overloaded TX offload flags when a DPDK driver
650            * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
651            */
652
653           if (PREDICT_TRUE(trace_cnt == 0))
654             mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
655           else
656             trace_cnt--;
657 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
658
659           b0->error = node->errors[error0];
660
661           l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
662                          next0 == DPDK_RX_NEXT_IP6_INPUT ||
663                          next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
664                         sizeof (ethernet_header_t) : 0);
665
666           b0->current_data = l3_offset0;
667           b0->current_length = mb->data_len - l3_offset0;
668
669           if (PREDICT_FALSE (ip_align_offset != 0))
670             {
671               if (next0 == DPDK_RX_NEXT_IP4_INPUT ||
672                   next0 == DPDK_RX_NEXT_IP6_INPUT)
673                 b0->current_data += ip_align_offset;
674             }
675              
676           b0->flags = buffer_flags_template;
677
678           if (VMWARE_LENGTH_BUG_WORKAROUND)
679               b0->current_length -= 4;
680
681           vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
682           vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
683           n_rx_bytes += mb->pkt_len;
684
685           /* Process subsequent segments of multi-segment packets */
686           while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
687             {
688               ASSERT(mb_seg != 0);
689
690               b_seg = (vlib_buffer_t *)(mb_seg+1);
691               vlib_buffer_init_for_free_list (b_seg, fl);
692               b_seg->clone_count = 0;
693
694               ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
695               ASSERT(b_seg->current_data == 0);
696
697               /*
698                * The driver (e.g. virtio) may not put the packet data at the start
699                * of the segment, so don't assume b_seg->current_data == 0 is correct.
700                */
701               b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
702
703               b_seg->current_length = mb_seg->data_len;
704               b0->total_length_not_including_first_buffer +=
705                 mb_seg->data_len;
706
707               b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
708               b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
709
710               b_chain = b_seg;
711               mb_seg = mb_seg->next;
712               nb_seg++;
713             } 
714
715           /*
716            * Turn this on if you run into
717            * "bad monkey" contexts, and you want to know exactly
718            * which nodes they've visited... See main.c...
719            */
720           VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
721
722           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
723                                            to_next, n_left_to_next,
724                                            bi0, next0);
725           if (PREDICT_FALSE (n_trace > mb_index))
726             vec_add1 (xd->d_trace_buffers, bi0);
727           n_buffers--;
728           mb_index++;
729         }
730       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
731     }
732
733   if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
734     {
735       dpdk_rx_trace (dm, node, xd, queue_id, xd->d_trace_buffers,
736                      vec_len (xd->d_trace_buffers));
737       vlib_set_trace_count (vm, node, n_trace - vec_len (xd->d_trace_buffers));
738     }
739   
740   vlib_increment_combined_counter 
741     (vnet_get_main()->interface_main.combined_sw_if_counters
742      + VNET_INTERFACE_COUNTER_RX,
743      cpu_index, 
744      xd->vlib_sw_if_index,
745      mb_index, n_rx_bytes);
746
747   dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
748   dw->aggregate_rx_packets += mb_index;
749
750   return mb_index;
751 }
752
753 #if VIRL > 0
754 #define VIRL_SPEED_LIMIT()                         \
755   /* Limit the input rate to 1000 vectors / sec */ \
756   {                                                \
757     struct timespec ts, tsrem;                     \
758                                                    \
759     ts.tv_sec = 0;                                 \
760     ts.tv_nsec = 1000*1000; /* 1ms */              \
761                                                    \
762     while (nanosleep(&ts, &tsrem) < 0)             \
763       {                                            \
764         ts = tsrem;                                \
765       }                                            \
766   }
767 #else
768 #define VIRL_SPEED_LIMIT()
769 #endif
770
771
772 static uword
773 dpdk_input (vlib_main_t * vm,
774             vlib_node_runtime_t * node,
775             vlib_frame_t * f)
776 {
777   dpdk_main_t * dm = &dpdk_main;
778   dpdk_device_t * xd;
779   uword n_rx_packets = 0;
780   dpdk_device_and_queue_t * dq;
781   u32 cpu_index = os_get_cpu_number();
782
783   /*
784    * Poll all devices on this cpu for input/interrupts.
785    */
786   vec_foreach (dq, dm->devices_by_cpu[cpu_index])
787     {
788       xd = vec_elt_at_index(dm->devices, dq->device);
789       ASSERT(dq->queue_id == 0);
790       n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, 0);
791     }
792
793   VIRL_SPEED_LIMIT()
794
795   return n_rx_packets;
796 }
797
798 uword
799 dpdk_input_rss (vlib_main_t * vm,
800       vlib_node_runtime_t * node,
801       vlib_frame_t * f)
802 {
803   dpdk_main_t * dm = &dpdk_main;
804   dpdk_device_t * xd;
805   uword n_rx_packets = 0;
806   dpdk_device_and_queue_t * dq;
807   u32 cpu_index = os_get_cpu_number();
808
809   /*
810    * Poll all devices on this cpu for input/interrupts.
811    */
812   vec_foreach (dq, dm->devices_by_cpu[cpu_index])
813     {
814       xd = vec_elt_at_index(dm->devices, dq->device);
815       n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id);
816     }
817
818   VIRL_SPEED_LIMIT()
819
820   return n_rx_packets;
821 }
822
823 VLIB_REGISTER_NODE (dpdk_input_node) = {
824   .function = dpdk_input,
825   .type = VLIB_NODE_TYPE_INPUT,
826   .name = "dpdk-input",
827
828   /* Will be enabled if/when hardware is detected. */
829   .state = VLIB_NODE_STATE_DISABLED,
830
831   .format_buffer = format_ethernet_header_with_length,
832   .format_trace = format_dpdk_rx_dma_trace,
833
834   .n_errors = DPDK_N_ERROR,
835   .error_strings = dpdk_error_strings,
836
837   .n_next_nodes = DPDK_RX_N_NEXT,
838   .next_nodes = {
839     [DPDK_RX_NEXT_DROP] = "error-drop",
840     [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
841     [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
842     [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
843     [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
844   },
845 };
846
847 /*
848  * Override the next nodes for the dpdk input nodes.
849  * Must be invoked prior to VLIB_INIT_FUNCTION calls.
850  */
851 void dpdk_set_next_node (dpdk_rx_next_t next, char *name)
852 {
853   vlib_node_registration_t *r = &dpdk_input_node;
854   vlib_node_registration_t *r_io = &dpdk_io_input_node;
855   vlib_node_registration_t *r_handoff = &handoff_dispatch_node;
856
857   switch (next)
858     {
859     case DPDK_RX_NEXT_IP4_INPUT:
860     case DPDK_RX_NEXT_IP6_INPUT:
861     case DPDK_RX_NEXT_MPLS_INPUT:
862     case DPDK_RX_NEXT_ETHERNET_INPUT:
863       r->next_nodes[next] = name;
864       r_io->next_nodes[next] = name;
865       r_handoff->next_nodes[next] = name;
866       break;
867
868     default:
869       clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
870       break;
871     }
872 }
873
874 inline vlib_frame_queue_elt_t * 
875 vlib_get_handoff_queue_elt (u32 vlib_worker_index) 
876 {
877   vlib_frame_queue_t *fq;
878   vlib_frame_queue_elt_t *elt;
879   u64 new_tail;
880   
881   fq = vlib_frame_queues[vlib_worker_index];
882   ASSERT (fq);
883
884   new_tail = __sync_add_and_fetch (&fq->tail, 1);
885
886   /* Wait until a ring slot is available */
887   while (new_tail >= fq->head_hint + fq->nelts)
888       vlib_worker_thread_barrier_check ();
889
890   elt = fq->elts + (new_tail & (fq->nelts-1));
891
892   /* this would be very bad... */
893   while (elt->valid) 
894     ;
895
896   elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME;
897   elt->last_n_vectors = elt->n_vectors = 0;
898
899   return elt;
900 }
901
902 static inline vlib_frame_queue_elt_t *
903 dpdk_get_handoff_queue_elt ( 
904     u32 vlib_worker_index, 
905     vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index)
906 {
907   vlib_frame_queue_elt_t *elt;
908
909   if (handoff_queue_elt_by_worker_index [vlib_worker_index])
910       return handoff_queue_elt_by_worker_index [vlib_worker_index];
911
912   elt = vlib_get_handoff_queue_elt (vlib_worker_index);
913
914   handoff_queue_elt_by_worker_index [vlib_worker_index] = elt;
915
916   return elt;
917 }
918
919 static inline vlib_frame_queue_t *
920 is_vlib_handoff_queue_congested (
921     u32 vlib_worker_index,
922     u32 queue_hi_thresh,
923     vlib_frame_queue_t ** handoff_queue_by_worker_index)
924 {
925   vlib_frame_queue_t *fq;
926
927   fq = handoff_queue_by_worker_index [vlib_worker_index];
928   if (fq != (vlib_frame_queue_t *)(~0)) 
929       return fq;
930   
931   fq = vlib_frame_queues[vlib_worker_index];
932   ASSERT (fq);
933
934   if (PREDICT_FALSE(fq->tail >= (fq->head_hint + queue_hi_thresh))) {
935     /* a valid entry in the array will indicate the queue has reached
936      * the specified threshold and is congested
937      */
938     handoff_queue_by_worker_index [vlib_worker_index] = fq;
939     fq->enqueue_full_events++;
940     return fq;
941   }
942
943   return NULL;
944 }
945
946 static inline u64 ipv4_get_key (ip4_header_t *ip)
947 {
948    u64  hash_key;
949
950    hash_key = *((u64*)(&ip->address_pair)) ^ ip->protocol;
951
952    return hash_key;
953 }
954
955 static inline u64 ipv6_get_key (ip6_header_t *ip)
956 {
957    u64  hash_key;
958
959    hash_key = ip->src_address.as_u64[0] ^
960               ip->src_address.as_u64[1] ^
961               ip->dst_address.as_u64[0] ^
962               ip->dst_address.as_u64[1] ^
963               ip->protocol;
964
965    return hash_key;
966 }
967
968
969 #define MPLS_BOTTOM_OF_STACK_BIT_MASK   0x00000100U
970 #define MPLS_LABEL_MASK                 0xFFFFF000U
971
972 static inline u64 mpls_get_key (mpls_unicast_header_t *m)
973 {
974    u64                     hash_key;
975    u8                      ip_ver;
976
977
978    /* find the bottom of the MPLS label stack. */
979    if (PREDICT_TRUE(m->label_exp_s_ttl & 
980                     clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
981        goto bottom_lbl_found;
982    }
983    m++;
984
985    if (PREDICT_TRUE(m->label_exp_s_ttl & 
986                     clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
987        goto bottom_lbl_found;
988    }
989    m++;
990
991    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
992        goto bottom_lbl_found;
993    }
994    m++;
995
996    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
997        goto bottom_lbl_found;
998    }
999    m++;
1000
1001    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1002        goto bottom_lbl_found;
1003    }
1004    
1005    /* the bottom label was not found - use the last label */
1006    hash_key = m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
1007
1008    return hash_key;
1009    
1010
1011 bottom_lbl_found:
1012    m++;
1013    ip_ver = (*((u8 *)m) >> 4);
1014
1015    /* find out if it is IPV4 or IPV6 header */
1016    if (PREDICT_TRUE(ip_ver == 4)) {
1017        hash_key = ipv4_get_key((ip4_header_t *)m);
1018    } else if (PREDICT_TRUE(ip_ver == 6)) {
1019        hash_key = ipv6_get_key((ip6_header_t *)m);
1020    } else {
1021        /* use the bottom label */
1022        hash_key = (m-1)->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
1023    }
1024
1025    return hash_key;
1026
1027 }
1028
1029 static inline u64 eth_get_key (ethernet_header_t *h0)
1030 {
1031    u64 hash_key;
1032
1033
1034    if (PREDICT_TRUE(h0->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
1035        hash_key = ipv4_get_key((ip4_header_t *)(h0+1));
1036    } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6)) {
1037        hash_key = ipv6_get_key((ip6_header_t *)(h0+1));
1038    } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1039        hash_key = mpls_get_key((mpls_unicast_header_t *)(h0+1));
1040    } else if ((h0->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) || 
1041               (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_DOT1AD))) {
1042        ethernet_vlan_header_t * outer = (ethernet_vlan_header_t *)(h0 + 1);
1043        
1044        outer = (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ? 
1045                                   outer+1 : outer;
1046        if (PREDICT_TRUE(outer->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
1047            hash_key = ipv4_get_key((ip4_header_t *)(outer+1));
1048        } else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)) {
1049            hash_key = ipv6_get_key((ip6_header_t *)(outer+1));
1050        } else if (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1051            hash_key = mpls_get_key((mpls_unicast_header_t *)(outer+1));
1052        }  else {
1053            hash_key = outer->type; 
1054        }
1055    } else {
1056        hash_key  = 0;
1057    }
1058
1059    return hash_key;
1060 }
1061
1062 /*
1063  * This function is used when dedicated IO threads feed the worker threads.
1064  *
1065  * Devices are allocated to this thread based on instances and instance_id.
1066  * If instances==0 then the function automatically determines the number
1067  * of instances of this thread, and allocates devices between them. 
1068  * If instances != 0, then instance_id must be in the range 0..instances-1.
1069  * The function allocates devices among the specified number of instances,
1070  * with this thread having the given instance id. This option is used for 
1071  * splitting devices among differently named "io"-type threads.
1072  */
1073 void dpdk_io_thread (vlib_worker_thread_t * w,
1074                      u32 instances,
1075                      u32 instance_id,
1076                      char *worker_name,
1077                      dpdk_io_thread_callback_t callback)
1078 {
1079   vlib_main_t * vm = vlib_get_main();
1080   vlib_thread_main_t * tm = vlib_get_thread_main();
1081   vlib_thread_registration_t * tr;
1082   dpdk_main_t * dm = &dpdk_main;
1083   char *io_name = w->registration->name;
1084   dpdk_device_t * xd;
1085   dpdk_device_t ** my_devices = 0;
1086   vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index = 0;
1087   vlib_frame_queue_t ** congested_handoff_queue_by_worker_index = 0;
1088   vlib_frame_queue_elt_t * hf = 0;
1089   int i;
1090   u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1091   u32 next_worker_index = 0;
1092   u32 current_worker_index = ~0;
1093   u32 cpu_index = os_get_cpu_number();
1094   u32 num_workers = 0;
1095   u32 num_devices = 0;
1096   uword * p;
1097   u16 queue_id = 0;
1098   vlib_node_runtime_t * node_trace;
1099   u32 first_worker_index = 0;
1100   u32 buffer_flags_template;
1101   
1102   /* Wait until the dpdk init sequence is complete */
1103   while (dm->io_thread_release == 0)
1104     vlib_worker_thread_barrier_check();
1105
1106   clib_time_init (&vm->clib_time);
1107
1108   p = hash_get_mem (tm->thread_registrations_by_name, worker_name);
1109   ASSERT (p);
1110   tr = (vlib_thread_registration_t *) p[0];
1111   if (tr) 
1112     {
1113       num_workers = tr->count;
1114       first_worker_index = tr->first_index;
1115     }
1116
1117   /* Allocate devices to this thread */
1118   if (instances == 0) 
1119     {
1120       /* auto-assign */
1121       instance_id = w->instance_id;
1122
1123       p = hash_get_mem (tm->thread_registrations_by_name, io_name);
1124       tr = (vlib_thread_registration_t *) p[0];
1125       /* Otherwise, how did we get here */
1126       ASSERT (tr && tr->count);
1127       instances = tr->count;
1128     }
1129   else
1130     {
1131       /* manually assign */
1132       ASSERT (instance_id < instances);
1133     }
1134
1135   vec_validate (handoff_queue_elt_by_worker_index,
1136                 first_worker_index + num_workers - 1);
1137
1138   vec_validate_init_empty (congested_handoff_queue_by_worker_index,
1139                            first_worker_index + num_workers - 1,
1140                            (vlib_frame_queue_t *)(~0));
1141
1142   /* packet tracing is triggered on the dpdk-input node for ease-of-use */
1143   node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
1144
1145   buffer_flags_template = dm->buffer_flags_template;
1146
1147   /* And handle them... */
1148   while (1)
1149     {
1150       u32 n_buffers;
1151       u32 mb_index;
1152       uword n_rx_bytes = 0;
1153       u32 n_trace, trace_cnt __attribute__((unused));
1154       vlib_buffer_free_list_t * fl;
1155       u32 hash;
1156       u64 hash_key;
1157       u8 efd_discard_burst;
1158
1159       vlib_worker_thread_barrier_check ();
1160
1161       /* Invoke callback if supplied */
1162       if (PREDICT_FALSE(callback != NULL))
1163           callback(vm);
1164
1165       if (PREDICT_FALSE(vec_len(dm->devices) != num_devices))
1166       {
1167         vec_reset_length(my_devices);
1168         vec_foreach (xd, dm->devices)
1169           {
1170             if (((xd - dm->devices) % tr->count) == instance_id)
1171               {
1172                 fprintf(stderr, "i/o thread %d (cpu %d) takes port %d\n",
1173                         instance_id, (int) os_get_cpu_number(), (int) (xd - dm->devices));
1174                 vec_add1 (my_devices, xd);
1175               }
1176           }
1177         num_devices = vec_len(dm->devices);
1178       }
1179
1180       for (i = 0; i < vec_len (my_devices); i++)
1181       {
1182           xd = my_devices[i];
1183
1184           if (!xd->admin_up)
1185             continue;
1186
1187           n_buffers = dpdk_rx_burst(dm, xd, 0 /* queue_id */);
1188
1189           if (n_buffers == 0)
1190             {
1191               /* check if EFD (dpdk) is enabled */
1192               if (PREDICT_FALSE(dm->efd.enabled))
1193                 {
1194                   /* reset a few stats */
1195                   xd->efd_agent.last_poll_time = 0;
1196                   xd->efd_agent.last_burst_sz = 0;
1197                 }
1198               continue;
1199             }
1200
1201           vec_reset_length (xd->d_trace_buffers);
1202           trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
1203         
1204           /*
1205            * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
1206            * therefore fake the stop in the dpdk driver by
1207            * silently dropping all of the incoming pkts instead of 
1208            * stopping the driver / hardware.
1209            */
1210           if (PREDICT_FALSE(xd->admin_up != 1))
1211             {
1212               for (mb_index = 0; mb_index < n_buffers; mb_index++)
1213                 rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
1214               continue;
1215             }
1216
1217           /* reset EFD action for the burst */
1218           efd_discard_burst = 0;
1219           
1220           /* Check for congestion if EFD (Early-Fast-Discard) is enabled
1221            * in any mode (e.g. dpdk, monitor, or drop_all)
1222            */
1223           if (PREDICT_FALSE(dm->efd.enabled))
1224             {
1225               /* update EFD counters */
1226               dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
1227
1228               if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
1229                 {
1230                   /* drop all received packets */
1231                   for (mb_index = 0; mb_index < n_buffers; mb_index++)
1232                     rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
1233
1234                   xd->efd_agent.discard_cnt += n_buffers;
1235                   increment_efd_drop_counter(vm, 
1236                                              DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1237                                              n_buffers);
1238
1239                   continue;
1240                 }
1241
1242               if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
1243                                 dm->efd.consec_full_frames_hi_thresh))
1244                 {
1245                   u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
1246                                                                queue_id);
1247                   if (device_queue_sz >= dm->efd.queue_hi_thresh)
1248                     {
1249                       /* dpdk device queue has reached the critical threshold */
1250                       xd->efd_agent.congestion_cnt++;
1251
1252                       /* apply EFD to packets from the burst */
1253                       efd_discard_burst = 1;
1254                     }
1255                 }
1256             }
1257
1258           fl = vlib_buffer_get_free_list 
1259             (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
1260         
1261           mb_index = 0;
1262
1263           while (n_buffers > 0)
1264             {
1265               u32 bi0;
1266               u8 next0, error0;
1267               u32 l3_offset0;
1268               vlib_buffer_t * b0, * b_seg, * b_chain = 0;
1269               ethernet_header_t * h0;
1270               u8 nb_seg = 1;
1271               struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
1272               struct rte_mbuf *mb_seg = mb->next;
1273                 
1274               if (PREDICT_TRUE(n_buffers > 1))
1275                 {
1276                   struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
1277                   vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1278                   CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1279                   CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1280                   CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
1281                 }
1282                 
1283               b0 = (vlib_buffer_t *)(mb+1);
1284
1285               /* check whether EFD is looking for packets to discard */
1286               if (PREDICT_FALSE(efd_discard_burst))
1287                 {
1288                   u32 cntr_type;
1289                   if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
1290                     {
1291                       rte_pktmbuf_free(mb);
1292                       xd->efd_agent.discard_cnt++;
1293                       increment_efd_drop_counter(vm, 
1294                                                  cntr_type,
1295                                                  1);
1296
1297                       n_buffers--;
1298                       mb_index++;
1299                       continue;
1300                     }
1301                 }
1302               
1303               /* Prefetch one next segment if it exists */
1304               if (PREDICT_FALSE(mb->nb_segs > 1))
1305                 {
1306                   struct rte_mbuf *pfmb = mb->next;
1307                   vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1308                   CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1309                   CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1310                   b_chain = b0;
1311                 }
1312
1313               bi0 = vlib_get_buffer_index (vm, b0);
1314               vlib_buffer_init_for_free_list (b0, fl);
1315               b0->clone_count = 0;
1316
1317               dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
1318                                                        &next0, &error0);
1319 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
1320               /*
1321                * Clear overloaded TX offload flags when a DPDK driver
1322                * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
1323                */
1324               if (PREDICT_TRUE(trace_cnt == 0))
1325                 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1326               else
1327                 trace_cnt--;
1328 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
1329
1330               if (error0)
1331                   clib_warning ("bi %d error %d", bi0, error0);
1332
1333               b0->error = 0;
1334
1335               l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
1336                              next0 == DPDK_RX_NEXT_IP6_INPUT || 
1337                              next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
1338                             sizeof (ethernet_header_t) : 0);
1339
1340               b0->current_data = l3_offset0;
1341               b0->current_length = mb->data_len - l3_offset0;
1342
1343               b0->flags = buffer_flags_template;
1344
1345               if (VMWARE_LENGTH_BUG_WORKAROUND)
1346                   b0->current_length -= 4;
1347                 
1348               vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1349               vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
1350               vnet_buffer(b0)->io_handoff.next_index = next0;
1351               n_rx_bytes += mb->pkt_len;
1352
1353               /* Process subsequent segments of multi-segment packets */
1354               while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1355                 {
1356                   ASSERT(mb_seg != 0);
1357  
1358                   b_seg = (vlib_buffer_t *)(mb_seg+1);
1359                   vlib_buffer_init_for_free_list (b_seg, fl);
1360                   b_seg->clone_count = 0;
1361  
1362                   ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1363                   ASSERT(b_seg->current_data == 0);
1364  
1365                   /*
1366                    * The driver (e.g. virtio) may not put the packet data at the start
1367                    * of the segment, so don't assume b_seg->current_data == 0 is correct.
1368                    */
1369                   b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
1370
1371                   b_seg->current_length = mb_seg->data_len;
1372                   b0->total_length_not_including_first_buffer +=
1373                     mb_seg->data_len;
1374  
1375                   b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
1376                   b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
1377  
1378                   b_chain = b_seg;
1379                   mb_seg = mb_seg->next;
1380                   nb_seg++;
1381                 }
1382
1383               /*
1384                * Turn this on if you run into
1385                * "bad monkey" contexts, and you want to know exactly
1386                * which nodes they've visited... See main.c...
1387                */
1388               VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
1389  
1390               if (PREDICT_FALSE (n_trace > mb_index))
1391                 vec_add1 (xd->d_trace_buffers, bi0);
1392
1393               next_worker_index = first_worker_index;
1394
1395               /* 
1396                * Force unknown traffic onto worker 0, 
1397                * and into ethernet-input. $$$$ add more hashes.
1398                */
1399               h0 = (ethernet_header_t *) b0->data;
1400
1401               /* Compute ingress LB hash */
1402               hash_key = eth_get_key(h0);
1403               hash = (u32)clib_xxhash(hash_key);
1404
1405               if (PREDICT_TRUE (is_pow2(num_workers)))
1406                 next_worker_index += hash & (num_workers - 1);
1407               else
1408                 next_worker_index += hash % num_workers;
1409
1410               /* if EFD is enabled and not already discarding from dpdk,
1411                * check the worker ring/queue for congestion
1412                */
1413               if (PREDICT_FALSE(tm->efd.enabled && !efd_discard_burst))
1414                 {
1415                   vlib_frame_queue_t *fq;
1416
1417                   /* fq will be valid if the ring is congested */
1418                   fq = is_vlib_handoff_queue_congested(
1419                       next_worker_index, tm->efd.queue_hi_thresh,
1420                       congested_handoff_queue_by_worker_index);
1421                   
1422                   if (PREDICT_FALSE(fq != NULL))
1423                     {
1424                       u32 cntr_type;
1425                       if (PREDICT_TRUE(cntr_type =
1426                                        is_efd_discardable(tm, b0, mb)))
1427                         {
1428                           /* discard the packet */
1429                           fq->enqueue_efd_discards++;
1430                           increment_efd_drop_counter(vm, cntr_type, 1);
1431                           rte_pktmbuf_free(mb);
1432                           n_buffers--;
1433                           mb_index++;
1434                           continue;
1435                         }
1436                     }
1437                 }
1438               
1439               if (next_worker_index != current_worker_index)
1440                 {
1441                   if (hf)
1442                     hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1443
1444                   hf = dpdk_get_handoff_queue_elt(
1445                            next_worker_index,
1446                            handoff_queue_elt_by_worker_index);
1447                       
1448                   n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
1449                   to_next_worker = &hf->buffer_index[hf->n_vectors];
1450                   current_worker_index = next_worker_index;
1451                 }
1452               
1453               /* enqueue to correct worker thread */
1454               to_next_worker[0] = bi0;
1455               to_next_worker++;
1456               n_left_to_next_worker--;
1457
1458               if (n_left_to_next_worker == 0)
1459                 {
1460                   hf->n_vectors = VLIB_FRAME_SIZE;
1461                   vlib_put_handoff_queue_elt(hf);
1462                   current_worker_index = ~0;
1463                   handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1464                   hf = 0;
1465                 }
1466                   
1467               n_buffers--;
1468               mb_index++;
1469             }
1470
1471           if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
1472             {
1473               /* credit the trace to the trace node */
1474               dpdk_rx_trace (dm, node_trace, xd, queue_id, xd->d_trace_buffers,
1475                              vec_len (xd->d_trace_buffers));
1476               vlib_set_trace_count (vm, node_trace, n_trace - vec_len (xd->d_trace_buffers));
1477             }
1478
1479           vlib_increment_combined_counter 
1480             (vnet_get_main()->interface_main.combined_sw_if_counters
1481              + VNET_INTERFACE_COUNTER_RX,
1482              cpu_index, 
1483              xd->vlib_sw_if_index,
1484              mb_index, n_rx_bytes);
1485
1486           dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
1487           dw->aggregate_rx_packets += mb_index;
1488         }
1489
1490       if (hf)
1491         hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1492
1493       /* Ship frames to the worker nodes */
1494       for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
1495         {
1496           if (handoff_queue_elt_by_worker_index[i])
1497             {
1498               hf = handoff_queue_elt_by_worker_index[i];
1499               /* 
1500                * It works better to let the handoff node
1501                * rate-adapt, always ship the handoff queue element.
1502                */
1503               if (1 || hf->n_vectors == hf->last_n_vectors)
1504                 {
1505                   vlib_put_handoff_queue_elt(hf);
1506                   handoff_queue_elt_by_worker_index[i] = 0;
1507                 }
1508               else
1509                 hf->last_n_vectors = hf->n_vectors;
1510             }
1511           congested_handoff_queue_by_worker_index[i] = (vlib_frame_queue_t *)(~0);
1512         }
1513       hf = 0;
1514       current_worker_index = ~0;
1515
1516       vlib_increment_main_loop_counter (vm);
1517     }
1518 }
1519
1520 /*
1521  * This function is used when the main thread performs IO and feeds the
1522  * worker threads.
1523  */
1524 static uword
1525 dpdk_io_input (vlib_main_t * vm,
1526                vlib_node_runtime_t * node,
1527                vlib_frame_t * f)
1528 {
1529   dpdk_main_t * dm = &dpdk_main;
1530   dpdk_device_t * xd;
1531   vlib_thread_main_t * tm = vlib_get_thread_main();
1532   uword n_rx_packets = 0;
1533   static vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index;
1534   static vlib_frame_queue_t ** congested_handoff_queue_by_worker_index = 0;
1535   vlib_frame_queue_elt_t * hf = 0;
1536   int i;
1537   u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1538   u32 next_worker_index = 0;
1539   u32 current_worker_index = ~0;
1540   u32 cpu_index = os_get_cpu_number();
1541   static int num_workers_set;
1542   static u32 num_workers;
1543   u16 queue_id = 0;
1544   vlib_node_runtime_t * node_trace;
1545   static u32 first_worker_index;
1546   u32 buffer_flags_template;
1547
1548   if (PREDICT_FALSE(num_workers_set == 0))
1549     {
1550       uword * p;
1551       vlib_thread_registration_t * tr;
1552       /* Only the standard vnet worker threads are supported */
1553       p = hash_get_mem (tm->thread_registrations_by_name, "workers");
1554       tr = (vlib_thread_registration_t *) p[0];
1555       if (tr) 
1556         {
1557           num_workers = tr->count;
1558           first_worker_index = tr->first_index;
1559         }
1560       num_workers_set = 1;
1561     }
1562
1563   if (PREDICT_FALSE(handoff_queue_elt_by_worker_index == 0))
1564     {
1565       vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
1566       
1567       vec_validate_init_empty (congested_handoff_queue_by_worker_index,
1568                                first_worker_index + num_workers - 1,
1569                                (vlib_frame_queue_t *)(~0));
1570     }
1571
1572   /* packet tracing is triggered on the dpdk-input node for ease-of-use */
1573   node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
1574
1575   buffer_flags_template = dm->buffer_flags_template;
1576
1577   vec_foreach (xd, dm->devices)
1578     {
1579       u32 n_buffers;
1580       u32 mb_index;
1581       uword n_rx_bytes = 0;
1582       u32 n_trace, trace_cnt __attribute__((unused));
1583       vlib_buffer_free_list_t * fl;
1584       u32 hash;
1585       u64 hash_key;
1586       u8 efd_discard_burst = 0;
1587
1588       if (!xd->admin_up)
1589         continue;
1590
1591       n_buffers = dpdk_rx_burst(dm, xd, queue_id );
1592
1593       if (n_buffers == 0)
1594         {
1595           /* check if EFD (dpdk) is enabled */
1596           if (PREDICT_FALSE(dm->efd.enabled))
1597             {
1598               /* reset a few stats */
1599               xd->efd_agent.last_poll_time = 0;
1600               xd->efd_agent.last_burst_sz = 0;
1601             }
1602           continue;
1603         }
1604
1605       vec_reset_length (xd->d_trace_buffers);
1606       trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
1607         
1608       /*
1609        * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
1610        * therefore fake the stop in the dpdk driver by
1611        * silently dropping all of the incoming pkts instead of 
1612        * stopping the driver / hardware.
1613        */
1614       if (PREDICT_FALSE(xd->admin_up != 1))
1615         {
1616           for (mb_index = 0; mb_index < n_buffers; mb_index++)
1617             rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
1618           continue;
1619         }
1620
1621       /* Check for congestion if EFD (Early-Fast-Discard) is enabled
1622        * in any mode (e.g. dpdk, monitor, or drop_all)
1623        */
1624       if (PREDICT_FALSE(dm->efd.enabled))
1625         {
1626           /* update EFD counters */
1627           dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
1628
1629           if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
1630             {
1631               /* discard all received packets */
1632               for (mb_index = 0; mb_index < n_buffers; mb_index++)
1633                 rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
1634
1635               xd->efd_agent.discard_cnt += n_buffers;
1636               increment_efd_drop_counter(vm, 
1637                                          DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1638                                          n_buffers);
1639             
1640               continue;
1641             }
1642           
1643           if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
1644                             dm->efd.consec_full_frames_hi_thresh))
1645             {
1646               u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
1647                                                            queue_id);
1648               if (device_queue_sz >= dm->efd.queue_hi_thresh)
1649                 {
1650                   /* dpdk device queue has reached the critical threshold */
1651                   xd->efd_agent.congestion_cnt++;
1652
1653                   /* apply EFD to packets from the burst */
1654                   efd_discard_burst = 1;
1655                 }
1656             }
1657         }
1658       
1659       fl = vlib_buffer_get_free_list 
1660         (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
1661           
1662       mb_index = 0;
1663
1664       while (n_buffers > 0)
1665         {
1666           u32 bi0;
1667           u8 next0, error0;
1668           u32 l3_offset0;
1669           vlib_buffer_t * b0, * b_seg, * b_chain = 0;
1670           ethernet_header_t * h0;
1671           u8 nb_seg = 1;
1672           struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
1673           struct rte_mbuf *mb_seg = mb->next;
1674
1675           if (PREDICT_TRUE(n_buffers > 1))
1676             {
1677               struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
1678               vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1679               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1680               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1681               CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
1682             }
1683                 
1684           b0 = (vlib_buffer_t *)(mb+1);
1685                 
1686           /* check whether EFD is looking for packets to discard */
1687           if (PREDICT_FALSE(efd_discard_burst))
1688             {
1689               u32 cntr_type;
1690               if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
1691                 {
1692                   rte_pktmbuf_free(mb);
1693                   xd->efd_agent.discard_cnt++;
1694                   increment_efd_drop_counter(vm, 
1695                                              cntr_type,
1696                                              1);
1697
1698                   n_buffers--;
1699                   mb_index++;
1700                   continue;
1701                 }
1702             }
1703
1704           /* Prefetch one next segment if it exists */
1705           if (PREDICT_FALSE(mb->nb_segs > 1))
1706             {
1707               struct rte_mbuf *pfmb = mb->next;
1708               vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1709               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1710               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1711               b_chain = b0;
1712             }
1713
1714           bi0 = vlib_get_buffer_index (vm, b0);
1715           vlib_buffer_init_for_free_list (b0, fl);
1716           b0->clone_count = 0;
1717
1718           dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
1719                                                    &next0, &error0);
1720 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
1721           /*
1722            * Clear overloaded TX offload flags when a DPDK driver
1723            * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
1724            */
1725           if (PREDICT_TRUE(trace_cnt == 0))
1726             mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1727           else
1728             trace_cnt--;
1729 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
1730
1731           if (error0)
1732             clib_warning ("bi %d error %d", bi0, error0);
1733
1734           b0->error = 0;
1735
1736           l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
1737                          next0 == DPDK_RX_NEXT_IP6_INPUT || 
1738                          next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
1739                         sizeof (ethernet_header_t) : 0);
1740
1741           b0->current_data = l3_offset0;
1742           b0->current_length = mb->data_len - l3_offset0;
1743
1744           b0->flags = buffer_flags_template;
1745                 
1746           if (VMWARE_LENGTH_BUG_WORKAROUND)
1747               b0->current_length -= 4;
1748
1749           vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1750           vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
1751           vnet_buffer(b0)->io_handoff.next_index = next0;
1752           n_rx_bytes += mb->pkt_len;
1753
1754           /* Process subsequent segments of multi-segment packets */
1755           while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1756             {
1757               ASSERT(mb_seg != 0);
1758  
1759               b_seg = (vlib_buffer_t *)(mb_seg+1);
1760               vlib_buffer_init_for_free_list (b_seg, fl);
1761               b_seg->clone_count = 0;
1762  
1763               ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1764               ASSERT(b_seg->current_data == 0);
1765  
1766               /*
1767                * The driver (e.g. virtio) may not put the packet data at the start
1768                * of the segment, so don't assume b_seg->current_data == 0 is correct.
1769                */
1770               b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
1771
1772               b_seg->current_length = mb_seg->data_len;
1773               b0->total_length_not_including_first_buffer +=
1774                 mb_seg->data_len;
1775  
1776               b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
1777               b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
1778  
1779               b_chain = b_seg;
1780               mb_seg = mb_seg->next;
1781               nb_seg++;
1782             }
1783  
1784           /*
1785            * Turn this on if you run into
1786            * "bad monkey" contexts, and you want to know exactly
1787            * which nodes they've visited... See main.c...
1788            */
1789           VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
1790  
1791           if (PREDICT_FALSE (n_trace > mb_index))
1792             vec_add1 (xd->d_trace_buffers, bi0);
1793
1794           next_worker_index = first_worker_index;
1795
1796           /* 
1797            * Force unknown traffic onto worker 0, 
1798            * and into ethernet-input. $$$$ add more hashes.
1799            */
1800           h0 = (ethernet_header_t *) b0->data;
1801
1802           /* Compute ingress LB hash */
1803           hash_key = eth_get_key(h0);
1804           hash = (u32)clib_xxhash(hash_key);
1805
1806           if (PREDICT_TRUE (is_pow2(num_workers)))
1807             next_worker_index += hash & (num_workers - 1);
1808           else
1809             next_worker_index += hash % num_workers;
1810
1811           /* if EFD is enabled and not already discarding from dpdk,
1812            * check the worker ring/queue for congestion
1813            */
1814           if (PREDICT_FALSE(tm->efd.enabled && !efd_discard_burst))
1815             {
1816               vlib_frame_queue_t *fq;
1817
1818               /* fq will be valid if the ring is congested */
1819               fq = is_vlib_handoff_queue_congested(
1820                   next_worker_index, tm->efd.queue_hi_thresh,
1821                   congested_handoff_queue_by_worker_index);
1822               
1823               if (PREDICT_FALSE(fq != NULL))
1824                 {
1825                   u32 cntr_type;
1826                   if (PREDICT_TRUE(cntr_type =
1827                                    is_efd_discardable(tm, b0, mb)))
1828                     {
1829                       /* discard the packet */
1830                       fq->enqueue_efd_discards++;
1831                       increment_efd_drop_counter(vm, cntr_type, 1);
1832                       rte_pktmbuf_free(mb);
1833                       n_buffers--;
1834                       mb_index++;
1835                       continue;
1836                     }
1837                 }
1838             }
1839           
1840           if (next_worker_index != current_worker_index)
1841             {
1842               if (hf)
1843                 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1844
1845               hf = dpdk_get_handoff_queue_elt(
1846                      next_worker_index,
1847                      handoff_queue_elt_by_worker_index);
1848
1849               n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
1850               to_next_worker = &hf->buffer_index[hf->n_vectors];
1851               current_worker_index = next_worker_index;
1852             }
1853           
1854           /* enqueue to correct worker thread */
1855           to_next_worker[0] = bi0;
1856           to_next_worker++;
1857           n_left_to_next_worker--;
1858
1859           if (n_left_to_next_worker == 0)
1860             {
1861               hf->n_vectors = VLIB_FRAME_SIZE;
1862               vlib_put_handoff_queue_elt(hf);
1863               current_worker_index = ~0;
1864               handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1865               hf = 0;
1866             }
1867           
1868           n_buffers--;
1869           mb_index++;
1870         }
1871
1872       if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
1873         {
1874           /* credit the trace to the trace node */
1875           dpdk_rx_trace (dm, node_trace, xd, queue_id, xd->d_trace_buffers,
1876                          vec_len (xd->d_trace_buffers));
1877           vlib_set_trace_count (vm, node_trace, n_trace - vec_len (xd->d_trace_buffers));
1878         }
1879
1880       vlib_increment_combined_counter 
1881         (vnet_get_main()->interface_main.combined_sw_if_counters
1882          + VNET_INTERFACE_COUNTER_RX,
1883          cpu_index, 
1884          xd->vlib_sw_if_index,
1885          mb_index, n_rx_bytes);
1886
1887       dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
1888       dw->aggregate_rx_packets += mb_index;
1889       n_rx_packets += mb_index;
1890     }
1891
1892   if (hf)
1893     hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1894   
1895   /* Ship frames to the worker nodes */
1896   for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
1897     {
1898       if (handoff_queue_elt_by_worker_index[i])
1899         {
1900           hf = handoff_queue_elt_by_worker_index[i];
1901           /* 
1902            * It works better to let the handoff node
1903            * rate-adapt, always ship the handoff queue element.
1904            */
1905           if (1 || hf->n_vectors == hf->last_n_vectors)
1906             {
1907               vlib_put_handoff_queue_elt(hf);
1908               handoff_queue_elt_by_worker_index[i] = 0;
1909             }
1910           else
1911             hf->last_n_vectors = hf->n_vectors;
1912         }
1913       congested_handoff_queue_by_worker_index[i] = (vlib_frame_queue_t *)(~0);
1914     }
1915   hf = 0;
1916   current_worker_index = ~0;
1917   return n_rx_packets;
1918 }
1919
1920 VLIB_REGISTER_NODE (dpdk_io_input_node) = {
1921   .function = dpdk_io_input,
1922   .type = VLIB_NODE_TYPE_INPUT,
1923   .name = "dpdk-io-input",
1924
1925   /* Will be enabled if/when hardware is detected. */
1926   .state = VLIB_NODE_STATE_DISABLED,
1927
1928   .format_buffer = format_ethernet_header_with_length,
1929   .format_trace = format_dpdk_rx_dma_trace,
1930
1931   .n_errors = DPDK_N_ERROR,
1932   .error_strings = dpdk_error_strings,
1933
1934   .n_next_nodes = DPDK_RX_N_NEXT,
1935   .next_nodes = {
1936     [DPDK_RX_NEXT_DROP] = "error-drop",
1937     [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
1938     [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1939     [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
1940     [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
1941   },
1942 };
1943
1944 /*
1945  * set_efd_bitmap()
1946  * Based on the operation type, set lower/upper bits for the given index value
1947  */
1948 void
1949 set_efd_bitmap (u8 *bitmap, u32 value, u32 op)
1950 {
1951     int ix;
1952
1953     *bitmap = 0;
1954     for (ix = 0; ix < 8; ix++) {
1955         if (((op == EFD_OPERATION_LESS_THAN) && (ix < value)) ||
1956             ((op == EFD_OPERATION_GREATER_OR_EQUAL) && (ix >= value))){
1957             (*bitmap) |= (1 << ix);
1958         }
1959     }
1960 }
1961
1962 void
1963 efd_config (u32 enabled, 
1964             u32 ip_prec,  u32 ip_op,
1965             u32 mpls_exp, u32 mpls_op,
1966             u32 vlan_cos, u32 vlan_op)
1967 {
1968    vlib_thread_main_t * tm = vlib_get_thread_main();
1969    dpdk_main_t * dm = &dpdk_main;
1970
1971    if (enabled) {
1972        tm->efd.enabled |= VLIB_EFD_DISCARD_ENABLED;
1973        dm->efd.enabled |= DPDK_EFD_DISCARD_ENABLED;
1974    } else {
1975        tm->efd.enabled &= ~VLIB_EFD_DISCARD_ENABLED;
1976        dm->efd.enabled &= ~DPDK_EFD_DISCARD_ENABLED;
1977    }
1978
1979    set_efd_bitmap(&tm->efd.ip_prec_bitmap, ip_prec, ip_op);
1980    set_efd_bitmap(&tm->efd.mpls_exp_bitmap, mpls_exp, mpls_op);
1981    set_efd_bitmap(&tm->efd.vlan_cos_bitmap, vlan_cos, vlan_op);
1982
1983 }