Add support for multiple microarchitectures in single binary
[vpp.git] / vnet / vnet / devices / dpdk / node.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
20
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/dpdk/dpdk.h>
23 #include <vnet/classify/vnet_classify.h>
24 #include <vnet/mpls-gre/packet.h>
25
26 #include "dpdk_priv.h"
27
28 #ifndef MAX
29 #define MAX(a,b) ((a) < (b) ? (b) : (a))
30 #endif
31
32 #ifndef MIN
33 #define MIN(a,b) ((a) < (b) ? (a) : (b))
34 #endif
35
36 /*
37  * At least in certain versions of ESXi, vmware e1000's don't honor the
38  * "strip rx CRC" bit. Set this flag to work around that bug FOR UNIT TEST ONLY.
39  *
40  * If wireshark complains like so:
41  *
42  * "Frame check sequence: 0x00000000 [incorrect, should be <hex-num>]"
43  * and you're using ESXi emulated e1000's, set this flag FOR UNIT TEST ONLY.
44  *
45  * Note: do NOT check in this file with this workaround enabled! You'll lose
46  * actual data from e.g. 10xGE interfaces. The extra 4 bytes annoy
47  * wireshark, but they're harmless...
48  */
49 #define VMWARE_LENGTH_BUG_WORKAROUND 0
50
51 typedef struct {
52   u32 cached_next_index;
53
54   /* convenience variables */
55   vlib_main_t * vlib_main;
56   vnet_main_t * vnet_main;
57 } handoff_dispatch_main_t;
58
59 typedef struct {
60   u32 buffer_index;
61   u32 next_index;
62   u32 sw_if_index;
63 } handoff_dispatch_trace_t;
64
65 /* packet trace format function */
66 static u8 * format_handoff_dispatch_trace (u8 * s, va_list * args)
67 {
68   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
69   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
70   handoff_dispatch_trace_t * t = va_arg (*args, handoff_dispatch_trace_t *);
71
72   s = format (s, "HANDOFF_DISPATCH: sw_if_index %d next_index %d buffer 0x%x",
73       t->sw_if_index,
74       t->next_index,
75       t->buffer_index);
76   return s;
77 }
78
79 handoff_dispatch_main_t handoff_dispatch_main;
80
81 vlib_node_registration_t handoff_dispatch_node;
82
83 #define foreach_handoff_dispatch_error \
84 _(EXAMPLE, "example packets")
85
86 typedef enum {
87 #define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym,
88   foreach_handoff_dispatch_error
89 #undef _
90   HANDOFF_DISPATCH_N_ERROR,
91 } handoff_dispatch_error_t;
92
93 static char * handoff_dispatch_error_strings[] = {
94 #define _(sym,string) string,
95   foreach_handoff_dispatch_error
96 #undef _
97 };
98
99 static inline
100 void vlib_put_handoff_queue_elt (vlib_frame_queue_elt_t * hf)
101 {
102   CLIB_MEMORY_BARRIER();
103   hf->valid = 1;
104 }
105
106 static uword
107 handoff_dispatch_node_fn (vlib_main_t * vm,
108                   vlib_node_runtime_t * node,
109                   vlib_frame_t * frame)
110 {
111   u32 n_left_from, * from, * to_next;
112   dpdk_rx_next_t next_index;
113
114   from = vlib_frame_vector_args (frame);
115   n_left_from = frame->n_vectors;
116   next_index = node->cached_next_index;
117
118   while (n_left_from > 0)
119     {
120       u32 n_left_to_next;
121
122       vlib_get_next_frame (vm, node, next_index,
123                            to_next, n_left_to_next);
124
125       while (n_left_from >= 4 && n_left_to_next >= 2)
126         {
127           u32 bi0, bi1;
128           vlib_buffer_t * b0, * b1;
129           u32 next0, next1;
130           u32 sw_if_index0, sw_if_index1;
131           
132           /* Prefetch next iteration. */
133           {
134             vlib_buffer_t * p2, * p3;
135             
136             p2 = vlib_get_buffer (vm, from[2]);
137             p3 = vlib_get_buffer (vm, from[3]);
138             
139             vlib_prefetch_buffer_header (p2, LOAD);
140             vlib_prefetch_buffer_header (p3, LOAD);
141           }
142
143           /* speculatively enqueue b0 and b1 to the current next frame */
144           to_next[0] = bi0 = from[0];
145           to_next[1] = bi1 = from[1];
146           from += 2;
147           to_next += 2;
148           n_left_from -= 2;
149           n_left_to_next -= 2;
150
151           b0 = vlib_get_buffer (vm, bi0);
152           b1 = vlib_get_buffer (vm, bi1);
153
154           next0 = vnet_buffer(b0)->io_handoff.next_index;
155           next1 = vnet_buffer(b1)->io_handoff.next_index;
156
157           if (PREDICT_FALSE(vm->trace_main.trace_active_hint))
158             {
159             if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
160               {
161                 vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
162                 handoff_dispatch_trace_t *t =
163                   vlib_add_trace (vm, node, b0, sizeof (*t));
164                 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
165                 t->sw_if_index = sw_if_index0;
166                 t->next_index = next0;
167                 t->buffer_index = bi0;
168               }
169             if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
170               {
171                 vlib_trace_buffer (vm, node, next1, b1, /* follow_chain */ 0);
172                 handoff_dispatch_trace_t *t =
173                   vlib_add_trace (vm, node, b1, sizeof (*t));
174                 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
175                 t->sw_if_index = sw_if_index1;
176                 t->next_index = next1;
177                 t->buffer_index = bi1;
178               }
179             }
180             
181           /* verify speculative enqueues, maybe switch current next frame */
182           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
183                                            to_next, n_left_to_next,
184                                            bi0, bi1, next0, next1);
185         }
186       
187       while (n_left_from > 0 && n_left_to_next > 0)
188         {
189           u32 bi0;
190           vlib_buffer_t * b0;
191           u32 next0;
192           u32 sw_if_index0;
193
194           /* speculatively enqueue b0 to the current next frame */
195           bi0 = from[0];
196           to_next[0] = bi0;
197           from += 1;
198           to_next += 1;
199           n_left_from -= 1;
200           n_left_to_next -= 1;
201
202           b0 = vlib_get_buffer (vm, bi0);
203
204           next0 = vnet_buffer(b0)->io_handoff.next_index;
205
206           if (PREDICT_FALSE(vm->trace_main.trace_active_hint))
207             {
208             if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
209               {
210                 vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
211                 handoff_dispatch_trace_t *t =
212                   vlib_add_trace (vm, node, b0, sizeof (*t));
213                 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
214                 t->sw_if_index = sw_if_index0;
215                 t->next_index = next0;
216                 t->buffer_index = bi0;
217               }
218             }
219
220           /* verify speculative enqueue, maybe switch current next frame */
221           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
222                                            to_next, n_left_to_next,
223                                            bi0, next0);
224         }
225
226       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
227     }
228
229   return frame->n_vectors;
230 }
231
232 VLIB_REGISTER_NODE (handoff_dispatch_node) = {
233   .function = handoff_dispatch_node_fn,
234   .name = "handoff-dispatch",
235   .vector_size = sizeof (u32),
236   .format_trace = format_handoff_dispatch_trace,
237   .type = VLIB_NODE_TYPE_INTERNAL,
238   .flags = VLIB_NODE_FLAG_IS_HANDOFF,
239   
240   .n_errors = ARRAY_LEN(handoff_dispatch_error_strings),
241   .error_strings = handoff_dispatch_error_strings,
242
243   .n_next_nodes = DPDK_RX_N_NEXT,
244
245   .next_nodes = {
246         [DPDK_RX_NEXT_DROP] = "error-drop",
247         [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
248         [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input",
249         [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
250         [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
251   },
252 };
253
254 VLIB_NODE_FUNCTION_MULTIARCH (handoff_dispatch_node, handoff_dispatch_node_fn)
255
256 clib_error_t *handoff_dispatch_init (vlib_main_t *vm)
257 {
258   handoff_dispatch_main_t * mp = &handoff_dispatch_main;
259     
260   mp->vlib_main = vm;
261   mp->vnet_main = &vnet_main;
262
263   return 0;
264 }
265
266 VLIB_INIT_FUNCTION (handoff_dispatch_init);
267
268 u32 dpdk_get_handoff_node_index (void)
269 {
270   return handoff_dispatch_node.index;
271 }
272
273 static char * dpdk_error_strings[] = {
274 #define _(n,s) s,
275     foreach_dpdk_error
276 #undef _
277 };
278
279 always_inline void
280 dpdk_rx_next_and_error_from_mb_flags_x1 (dpdk_device_t *xd, struct rte_mbuf *mb,
281                                          vlib_buffer_t *b0,
282                                          u8 * next0, u8 * error0)
283 {
284   u8 is0_ip4, is0_ip6, is0_mpls, n0;
285   uint16_t mb_flags = mb->ol_flags;
286
287   if (PREDICT_FALSE(mb_flags & (
288 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
289        PKT_EXT_RX_PKT_ERROR | PKT_EXT_RX_BAD_FCS   |
290 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
291         PKT_RX_IP_CKSUM_BAD  | PKT_RX_L4_CKSUM_BAD
292     ))) 
293     {
294       /* some error was flagged. determine the drop reason */ 
295       n0 = DPDK_RX_NEXT_DROP;
296       *error0 = 
297 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
298         (mb_flags & PKT_EXT_RX_PKT_ERROR) ? DPDK_ERROR_RX_PACKET_ERROR : 
299         (mb_flags & PKT_EXT_RX_BAD_FCS) ? DPDK_ERROR_RX_BAD_FCS : 
300 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
301         (mb_flags & PKT_RX_IP_CKSUM_BAD) ? DPDK_ERROR_IP_CHECKSUM_ERROR : 
302         (mb_flags & PKT_RX_L4_CKSUM_BAD) ? DPDK_ERROR_L4_CHECKSUM_ERROR : 
303         DPDK_ERROR_NONE;
304     }
305   else
306     {
307       *error0 = DPDK_ERROR_NONE;
308       if (xd->per_interface_next_index != ~0)
309         n0 = xd->per_interface_next_index;
310       else if (mb_flags & PKT_RX_VLAN_PKT)
311         n0 = DPDK_RX_NEXT_ETHERNET_INPUT;
312       else
313         {
314           n0 = DPDK_RX_NEXT_ETHERNET_INPUT;
315 #if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0)
316           is0_ip4 = RTE_ETH_IS_IPV4_HDR(mb->packet_type) != 0;
317 #else
318           is0_ip4 = (mb_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV4_HDR_EXT)) != 0;
319 #endif
320
321           if (PREDICT_TRUE(is0_ip4))
322             n0 = DPDK_RX_NEXT_IP4_INPUT;
323           else
324             {
325 #if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0)
326               is0_ip6 = RTE_ETH_IS_IPV6_HDR(mb->packet_type) != 0;
327 #else
328               is0_ip6 = 
329                       (mb_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) != 0;
330 #endif
331               if (PREDICT_TRUE(is0_ip6))
332                 n0 = DPDK_RX_NEXT_IP6_INPUT;
333               else
334                 {
335                   ethernet_header_t *h0 = (ethernet_header_t *) b0->data;
336                   is0_mpls = (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST));
337                   n0 = is0_mpls ? DPDK_RX_NEXT_MPLS_INPUT : n0;
338                 }
339             }
340         }
341     }
342   *next0 = n0;
343 }
344
345 void dpdk_rx_trace (dpdk_main_t * dm,
346                     vlib_node_runtime_t * node,
347                     dpdk_device_t * xd,
348                     u16 queue_id,
349                     u32 * buffers,
350                     uword n_buffers)
351 {
352   vlib_main_t * vm = vlib_get_main();
353   u32 * b, n_left;
354   u8 next0;
355
356   n_left = n_buffers;
357   b = buffers;
358
359   while (n_left >= 1)
360     {
361       u32 bi0;
362       vlib_buffer_t * b0;
363       dpdk_rx_dma_trace_t * t0;
364       struct rte_mbuf *mb;
365       u8 error0;
366
367       bi0 = b[0];
368       n_left -= 1;
369
370       b0 = vlib_get_buffer (vm, bi0);
371       mb = rte_mbuf_from_vlib_buffer(b0);
372       dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
373                                                &next0, &error0);
374       vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
375       t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
376       t0->queue_index = queue_id;
377       t0->device_index = xd->device_index;
378       t0->buffer_index = bi0;
379
380       clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
381       clib_memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
382       clib_memcpy (t0->buffer.pre_data, b0->data, sizeof (t0->buffer.pre_data));
383
384 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
385       /*
386        * Clear overloaded TX offload flags when a DPDK driver
387        * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
388        */
389       mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
390 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
391
392       b += 1;
393     }
394 }
395
396 /*
397  * dpdk_efd_update_counters()
398  * Update EFD (early-fast-discard) counters
399  */
400 void dpdk_efd_update_counters (dpdk_device_t *xd,
401                                u32 n_buffers,
402                                u16 enabled)
403 {
404   if (enabled & DPDK_EFD_MONITOR_ENABLED)
405     {
406       u64 now = clib_cpu_time_now();
407       if (xd->efd_agent.last_poll_time > 0)
408         {
409           u64 elapsed_time = (now - xd->efd_agent.last_poll_time);
410           if (elapsed_time > xd->efd_agent.max_poll_delay)
411             xd->efd_agent.max_poll_delay = elapsed_time;
412         }
413       xd->efd_agent.last_poll_time = now;
414     }
415   
416   xd->efd_agent.total_packet_cnt += n_buffers;
417   xd->efd_agent.last_burst_sz = n_buffers;
418
419   if (n_buffers > xd->efd_agent.max_burst_sz)
420     xd->efd_agent.max_burst_sz = n_buffers;
421
422   if (PREDICT_FALSE(n_buffers == VLIB_FRAME_SIZE))
423     {
424       xd->efd_agent.full_frames_cnt++;
425       xd->efd_agent.consec_full_frames_cnt++;
426     }
427   else
428     {
429       xd->efd_agent.consec_full_frames_cnt = 0;
430     }
431 }
432
433 /* is_efd_discardable()
434  *   returns non zero DPDK error if packet meets early-fast-discard criteria,
435  *           zero otherwise
436  */
437 u32 is_efd_discardable (vlib_thread_main_t *tm,
438                         vlib_buffer_t * b0,
439                         struct rte_mbuf *mb)
440 {
441   ethernet_header_t *eh = (ethernet_header_t *) b0->data;
442
443   if (eh->type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
444     {
445       ip4_header_t *ipv4 =
446           (ip4_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
447       u8 pkt_prec = (ipv4->tos >> 5);
448           
449       return (tm->efd.ip_prec_bitmap & (1 << pkt_prec) ?
450                   DPDK_ERROR_IPV4_EFD_DROP_PKTS : DPDK_ERROR_NONE);
451     }
452   else if (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_IP6))
453     {
454       ip6_header_t *ipv6 =
455           (ip6_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
456       u8 pkt_tclass =
457           ((ipv6->ip_version_traffic_class_and_flow_label >> 20) & 0xff);
458           
459       return (tm->efd.ip_prec_bitmap & (1 << pkt_tclass) ?
460                   DPDK_ERROR_IPV6_EFD_DROP_PKTS : DPDK_ERROR_NONE);
461     }
462   else if (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_MPLS_UNICAST))
463     {
464       mpls_unicast_header_t *mpls =
465           (mpls_unicast_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
466       u8 pkt_exp = ((mpls->label_exp_s_ttl >> 9) & 0x07);
467
468       return (tm->efd.mpls_exp_bitmap & (1 << pkt_exp) ?
469                   DPDK_ERROR_MPLS_EFD_DROP_PKTS : DPDK_ERROR_NONE);
470     }
471   else if ((eh->type == clib_net_to_host_u16(ETHERNET_TYPE_VLAN)) ||
472            (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_DOT1AD)))
473     {
474       ethernet_vlan_header_t *vlan =
475           (ethernet_vlan_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
476       u8 pkt_cos = ((vlan->priority_cfi_and_id >> 13) & 0x07);
477
478       return (tm->efd.vlan_cos_bitmap & (1 << pkt_cos) ?
479                   DPDK_ERROR_VLAN_EFD_DROP_PKTS : DPDK_ERROR_NONE);
480     }
481
482   return DPDK_ERROR_NONE;
483 }
484
485 /*
486  * This function is used when there are no worker threads.
487  * The main thread performs IO and forwards the packets. 
488  */
489 static inline u32 dpdk_device_input ( dpdk_main_t * dm, 
490                                       dpdk_device_t * xd,
491                                       vlib_node_runtime_t * node,
492                                       u32 cpu_index,
493                                       u16 queue_id,
494                                       int use_efd)
495 {
496   u32 n_buffers;
497   u32 next_index = DPDK_RX_NEXT_ETHERNET_INPUT;
498   u32 n_left_to_next, * to_next;
499   u32 mb_index;
500   vlib_main_t * vm = vlib_get_main();
501   uword n_rx_bytes = 0;
502   u32 n_trace, trace_cnt __attribute__((unused));
503   vlib_buffer_free_list_t * fl;
504   u8 efd_discard_burst = 0;
505   u16 ip_align_offset = 0;
506   u32 buffer_flags_template;
507   
508   if (xd->admin_up == 0)
509     return 0;
510
511   n_buffers = dpdk_rx_burst(dm, xd, queue_id);
512
513   if (n_buffers == 0)
514     {
515       /* check if EFD (dpdk) is enabled */
516       if (PREDICT_FALSE(use_efd && dm->efd.enabled))
517         {
518           /* reset a few stats */
519           xd->efd_agent.last_poll_time = 0;
520           xd->efd_agent.last_burst_sz = 0;
521         }
522       return 0;
523     }
524
525   if (xd->pmd == VNET_DPDK_PMD_THUNDERX)
526       ip_align_offset = 6;
527
528   buffer_flags_template = dm->buffer_flags_template;
529
530   vec_reset_length (xd->d_trace_buffers);
531   trace_cnt = n_trace = vlib_get_trace_count (vm, node);
532
533   fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
534
535   /*
536    * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
537    * therefore fake the stop in the dpdk driver by
538    * silently dropping all of the incoming pkts instead of 
539    * stopping the driver / hardware.
540    */
541   if (PREDICT_FALSE(xd->admin_up != 1))
542     {
543       for (mb_index = 0; mb_index < n_buffers; mb_index++)
544         rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
545       
546       return 0;
547     }
548
549   /* Check for congestion if EFD (Early-Fast-Discard) is enabled
550    * in any mode (e.g. dpdk, monitor, or drop_all)
551    */
552   if (PREDICT_FALSE(use_efd && dm->efd.enabled))
553     {
554       /* update EFD counters */
555       dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
556
557       if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
558         {
559           /* discard all received packets */
560           for (mb_index = 0; mb_index < n_buffers; mb_index++)
561             rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
562
563           xd->efd_agent.discard_cnt += n_buffers;
564           increment_efd_drop_counter(vm, 
565                                      DPDK_ERROR_VLAN_EFD_DROP_PKTS,
566                                      n_buffers);
567
568           return 0;
569         }
570       
571       if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
572                         dm->efd.consec_full_frames_hi_thresh))
573         {
574           u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
575                                                        queue_id);
576           if (device_queue_sz >= dm->efd.queue_hi_thresh)
577             {
578               /* dpdk device queue has reached the critical threshold */
579               xd->efd_agent.congestion_cnt++;
580
581               /* apply EFD to packets from the burst */
582               efd_discard_burst = 1;
583             }
584         }
585     }
586   
587   mb_index = 0;
588
589   while (n_buffers > 0)
590     {
591       u32 bi0;
592       u8 next0, error0;
593       u32 l3_offset0;
594       vlib_buffer_t * b0, * b_seg, * b_chain = 0;
595       u32 cntr_type;
596
597       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
598
599       while (n_buffers > 0 && n_left_to_next > 0)
600         {
601           u8 nb_seg = 1;
602           struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
603           struct rte_mbuf *mb_seg = mb->next;
604
605           if (PREDICT_TRUE(n_buffers > 2))
606           {
607               struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
608               vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
609               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, STORE);
610               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
611           }
612
613           ASSERT(mb);
614
615           b0 = vlib_buffer_from_rte_mbuf(mb);
616
617           /* check whether EFD is looking for packets to discard */
618           if (PREDICT_FALSE(efd_discard_burst))
619             {
620               vlib_thread_main_t * tm = vlib_get_thread_main();
621
622               if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
623                 {
624                   rte_pktmbuf_free(mb);
625                   xd->efd_agent.discard_cnt++;
626                   increment_efd_drop_counter(vm, 
627                                              cntr_type,
628                                              1);
629                   n_buffers--;
630                   mb_index++;
631                   continue;
632                 }
633             }
634
635           /* Prefetch one next segment if it exists. */
636           if (PREDICT_FALSE(mb->nb_segs > 1))
637             {
638               struct rte_mbuf *pfmb = mb->next;
639               vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
640               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
641               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
642               b_chain = b0;
643             }
644
645           vlib_buffer_init_for_free_list (b0, fl);
646           b0->clone_count = 0;
647           
648           bi0 = vlib_get_buffer_index (vm, b0);
649
650           to_next[0] = bi0;
651           to_next++;
652           n_left_to_next--;
653           
654           dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
655                                                    &next0, &error0);
656 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
657           /*
658            * Clear overloaded TX offload flags when a DPDK driver
659            * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
660            */
661
662           if (PREDICT_TRUE(trace_cnt == 0))
663             mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
664           else
665             trace_cnt--;
666 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
667
668           b0->error = node->errors[error0];
669
670           l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
671                          next0 == DPDK_RX_NEXT_IP6_INPUT ||
672                          next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
673                         sizeof (ethernet_header_t) : 0);
674
675           b0->current_data = l3_offset0;
676           b0->current_length = mb->data_len - l3_offset0;
677
678           if (PREDICT_FALSE (ip_align_offset != 0))
679             {
680               if (next0 == DPDK_RX_NEXT_IP4_INPUT ||
681                   next0 == DPDK_RX_NEXT_IP6_INPUT)
682                 b0->current_data += ip_align_offset;
683             }
684              
685           b0->flags = buffer_flags_template;
686
687           if (VMWARE_LENGTH_BUG_WORKAROUND)
688               b0->current_length -= 4;
689
690           vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
691           vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
692           n_rx_bytes += mb->pkt_len;
693
694           /* Process subsequent segments of multi-segment packets */
695           while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
696             {
697               ASSERT(mb_seg != 0);
698
699               b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
700               vlib_buffer_init_for_free_list (b_seg, fl);
701               b_seg->clone_count = 0;
702
703               ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
704               ASSERT(b_seg->current_data == 0);
705
706               /*
707                * The driver (e.g. virtio) may not put the packet data at the start
708                * of the segment, so don't assume b_seg->current_data == 0 is correct.
709                */
710               b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
711
712               b_seg->current_length = mb_seg->data_len;
713               b0->total_length_not_including_first_buffer +=
714                 mb_seg->data_len;
715
716               b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
717               b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
718
719               b_chain = b_seg;
720               mb_seg = mb_seg->next;
721               nb_seg++;
722             } 
723
724           /*
725            * Turn this on if you run into
726            * "bad monkey" contexts, and you want to know exactly
727            * which nodes they've visited... See main.c...
728            */
729           VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
730
731           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
732                                            to_next, n_left_to_next,
733                                            bi0, next0);
734           if (PREDICT_FALSE (n_trace > mb_index))
735             vec_add1 (xd->d_trace_buffers, bi0);
736           n_buffers--;
737           mb_index++;
738         }
739       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
740     }
741
742   if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
743     {
744       dpdk_rx_trace (dm, node, xd, queue_id, xd->d_trace_buffers,
745                      vec_len (xd->d_trace_buffers));
746       vlib_set_trace_count (vm, node, n_trace - vec_len (xd->d_trace_buffers));
747     }
748   
749   vlib_increment_combined_counter 
750     (vnet_get_main()->interface_main.combined_sw_if_counters
751      + VNET_INTERFACE_COUNTER_RX,
752      cpu_index, 
753      xd->vlib_sw_if_index,
754      mb_index, n_rx_bytes);
755
756   dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
757   dw->aggregate_rx_packets += mb_index;
758
759   return mb_index;
760 }
761
762 #if VIRL > 0
763 #define VIRL_SPEED_LIMIT()                         \
764   /* Limit the input rate to 1000 vectors / sec */ \
765   {                                                \
766     struct timespec ts, tsrem;                     \
767                                                    \
768     ts.tv_sec = 0;                                 \
769     ts.tv_nsec = 1000*1000; /* 1ms */              \
770                                                    \
771     while (nanosleep(&ts, &tsrem) < 0)             \
772       {                                            \
773         ts = tsrem;                                \
774       }                                            \
775   }
776 #else
777 #define VIRL_SPEED_LIMIT()
778 #endif
779
780
781 static uword
782 dpdk_input (vlib_main_t * vm,
783             vlib_node_runtime_t * node,
784             vlib_frame_t * f)
785 {
786   dpdk_main_t * dm = &dpdk_main;
787   dpdk_device_t * xd;
788   uword n_rx_packets = 0;
789   dpdk_device_and_queue_t * dq;
790   u32 cpu_index = os_get_cpu_number();
791
792   /*
793    * Poll all devices on this cpu for input/interrupts.
794    */
795   vec_foreach (dq, dm->devices_by_cpu[cpu_index])
796     {
797       xd = vec_elt_at_index(dm->devices, dq->device);
798       ASSERT(dq->queue_id == 0);
799       n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, 0, 0);
800     }
801
802   VIRL_SPEED_LIMIT()
803
804   return n_rx_packets;
805 }
806
807 uword
808 dpdk_input_rss (vlib_main_t * vm,
809       vlib_node_runtime_t * node,
810       vlib_frame_t * f)
811 {
812   dpdk_main_t * dm = &dpdk_main;
813   dpdk_device_t * xd;
814   uword n_rx_packets = 0;
815   dpdk_device_and_queue_t * dq;
816   u32 cpu_index = os_get_cpu_number();
817
818   /*
819    * Poll all devices on this cpu for input/interrupts.
820    */
821   vec_foreach (dq, dm->devices_by_cpu[cpu_index])
822     {
823       xd = vec_elt_at_index(dm->devices, dq->device);
824       n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, 0);
825     }
826
827   VIRL_SPEED_LIMIT()
828
829   return n_rx_packets;
830 }
831
832 uword
833 dpdk_input_efd (vlib_main_t * vm,
834       vlib_node_runtime_t * node,
835       vlib_frame_t * f)
836 {
837   dpdk_main_t * dm = &dpdk_main;
838   dpdk_device_t * xd;
839   uword n_rx_packets = 0;
840   dpdk_device_and_queue_t * dq;
841   u32 cpu_index = os_get_cpu_number();
842
843   /*
844    * Poll all devices on this cpu for input/interrupts.
845    */
846   vec_foreach (dq, dm->devices_by_cpu[cpu_index])
847     {
848       xd = vec_elt_at_index(dm->devices, dq->device);
849       n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, 1);
850     }
851
852   VIRL_SPEED_LIMIT()
853
854   return n_rx_packets;
855 }
856
857
858 VLIB_REGISTER_NODE (dpdk_input_node) = {
859   .function = dpdk_input,
860   .type = VLIB_NODE_TYPE_INPUT,
861   .name = "dpdk-input",
862
863   /* Will be enabled if/when hardware is detected. */
864   .state = VLIB_NODE_STATE_DISABLED,
865
866   .format_buffer = format_ethernet_header_with_length,
867   .format_trace = format_dpdk_rx_dma_trace,
868
869   .n_errors = DPDK_N_ERROR,
870   .error_strings = dpdk_error_strings,
871
872   .n_next_nodes = DPDK_RX_N_NEXT,
873   .next_nodes = {
874     [DPDK_RX_NEXT_DROP] = "error-drop",
875     [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
876     [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
877     [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
878     [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
879   },
880 };
881
882
883 /* handle dpdk_input_rss alternative function */
884 VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input)
885 VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input_rss)
886 VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input_efd)
887
888 /* this macro defines dpdk_input_rss_multiarch_select() */
889 CLIB_MULTIARCH_SELECT_FN(dpdk_input);
890 CLIB_MULTIARCH_SELECT_FN(dpdk_input_rss);
891 CLIB_MULTIARCH_SELECT_FN(dpdk_input_efd);
892
893 /*
894  * Override the next nodes for the dpdk input nodes.
895  * Must be invoked prior to VLIB_INIT_FUNCTION calls.
896  */
897 void dpdk_set_next_node (dpdk_rx_next_t next, char *name)
898 {
899   vlib_node_registration_t *r = &dpdk_input_node;
900   vlib_node_registration_t *r_io = &dpdk_io_input_node;
901   vlib_node_registration_t *r_handoff = &handoff_dispatch_node;
902
903   switch (next)
904     {
905     case DPDK_RX_NEXT_IP4_INPUT:
906     case DPDK_RX_NEXT_IP6_INPUT:
907     case DPDK_RX_NEXT_MPLS_INPUT:
908     case DPDK_RX_NEXT_ETHERNET_INPUT:
909       r->next_nodes[next] = name;
910       r_io->next_nodes[next] = name;
911       r_handoff->next_nodes[next] = name;
912       break;
913
914     default:
915       clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
916       break;
917     }
918 }
919
920 inline vlib_frame_queue_elt_t * 
921 vlib_get_handoff_queue_elt (u32 vlib_worker_index) 
922 {
923   vlib_frame_queue_t *fq;
924   vlib_frame_queue_elt_t *elt;
925   u64 new_tail;
926   
927   fq = vlib_frame_queues[vlib_worker_index];
928   ASSERT (fq);
929
930   new_tail = __sync_add_and_fetch (&fq->tail, 1);
931
932   /* Wait until a ring slot is available */
933   while (new_tail >= fq->head_hint + fq->nelts)
934       vlib_worker_thread_barrier_check ();
935
936   elt = fq->elts + (new_tail & (fq->nelts-1));
937
938   /* this would be very bad... */
939   while (elt->valid) 
940     ;
941
942   elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME;
943   elt->last_n_vectors = elt->n_vectors = 0;
944
945   return elt;
946 }
947
948 static inline vlib_frame_queue_elt_t *
949 dpdk_get_handoff_queue_elt ( 
950     u32 vlib_worker_index, 
951     vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index)
952 {
953   vlib_frame_queue_elt_t *elt;
954
955   if (handoff_queue_elt_by_worker_index [vlib_worker_index])
956       return handoff_queue_elt_by_worker_index [vlib_worker_index];
957
958   elt = vlib_get_handoff_queue_elt (vlib_worker_index);
959
960   handoff_queue_elt_by_worker_index [vlib_worker_index] = elt;
961
962   return elt;
963 }
964
965 static inline vlib_frame_queue_t *
966 is_vlib_handoff_queue_congested (
967     u32 vlib_worker_index,
968     u32 queue_hi_thresh,
969     vlib_frame_queue_t ** handoff_queue_by_worker_index)
970 {
971   vlib_frame_queue_t *fq;
972
973   fq = handoff_queue_by_worker_index [vlib_worker_index];
974   if (fq != (vlib_frame_queue_t *)(~0)) 
975       return fq;
976   
977   fq = vlib_frame_queues[vlib_worker_index];
978   ASSERT (fq);
979
980   if (PREDICT_FALSE(fq->tail >= (fq->head_hint + queue_hi_thresh))) {
981     /* a valid entry in the array will indicate the queue has reached
982      * the specified threshold and is congested
983      */
984     handoff_queue_by_worker_index [vlib_worker_index] = fq;
985     fq->enqueue_full_events++;
986     return fq;
987   }
988
989   return NULL;
990 }
991
992 static inline u64 ipv4_get_key (ip4_header_t *ip)
993 {
994    u64  hash_key;
995
996    hash_key = *((u64*)(&ip->address_pair)) ^ ip->protocol;
997
998    return hash_key;
999 }
1000
1001 static inline u64 ipv6_get_key (ip6_header_t *ip)
1002 {
1003    u64  hash_key;
1004
1005    hash_key = ip->src_address.as_u64[0] ^
1006               rotate_left(ip->src_address.as_u64[1],13) ^
1007               rotate_left(ip->dst_address.as_u64[0],26) ^
1008               rotate_left(ip->dst_address.as_u64[1],39) ^
1009               ip->protocol;
1010
1011    return hash_key;
1012 }
1013
1014
1015 #define MPLS_BOTTOM_OF_STACK_BIT_MASK   0x00000100U
1016 #define MPLS_LABEL_MASK                 0xFFFFF000U
1017
1018 static inline u64 mpls_get_key (mpls_unicast_header_t *m)
1019 {
1020    u64                     hash_key;
1021    u8                      ip_ver;
1022
1023
1024    /* find the bottom of the MPLS label stack. */
1025    if (PREDICT_TRUE(m->label_exp_s_ttl & 
1026                     clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
1027        goto bottom_lbl_found;
1028    }
1029    m++;
1030
1031    if (PREDICT_TRUE(m->label_exp_s_ttl & 
1032                     clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
1033        goto bottom_lbl_found;
1034    }
1035    m++;
1036
1037    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1038        goto bottom_lbl_found;
1039    }
1040    m++;
1041
1042    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1043        goto bottom_lbl_found;
1044    }
1045    m++;
1046
1047    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1048        goto bottom_lbl_found;
1049    }
1050    
1051    /* the bottom label was not found - use the last label */
1052    hash_key = m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
1053
1054    return hash_key;
1055    
1056
1057 bottom_lbl_found:
1058    m++;
1059    ip_ver = (*((u8 *)m) >> 4);
1060
1061    /* find out if it is IPV4 or IPV6 header */
1062    if (PREDICT_TRUE(ip_ver == 4)) {
1063        hash_key = ipv4_get_key((ip4_header_t *)m);
1064    } else if (PREDICT_TRUE(ip_ver == 6)) {
1065        hash_key = ipv6_get_key((ip6_header_t *)m);
1066    } else {
1067        /* use the bottom label */
1068        hash_key = (m-1)->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
1069    }
1070
1071    return hash_key;
1072
1073 }
1074
1075 static inline u64 eth_get_key (ethernet_header_t *h0)
1076 {
1077    u64 hash_key;
1078
1079
1080    if (PREDICT_TRUE(h0->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
1081        hash_key = ipv4_get_key((ip4_header_t *)(h0+1));
1082    } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6)) {
1083        hash_key = ipv6_get_key((ip6_header_t *)(h0+1));
1084    } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1085        hash_key = mpls_get_key((mpls_unicast_header_t *)(h0+1));
1086    } else if ((h0->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) || 
1087               (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_DOT1AD))) {
1088        ethernet_vlan_header_t * outer = (ethernet_vlan_header_t *)(h0 + 1);
1089        
1090        outer = (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ? 
1091                                   outer+1 : outer;
1092        if (PREDICT_TRUE(outer->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
1093            hash_key = ipv4_get_key((ip4_header_t *)(outer+1));
1094        } else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)) {
1095            hash_key = ipv6_get_key((ip6_header_t *)(outer+1));
1096        } else if (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1097            hash_key = mpls_get_key((mpls_unicast_header_t *)(outer+1));
1098        }  else {
1099            hash_key = outer->type; 
1100        }
1101    } else {
1102        hash_key  = 0;
1103    }
1104
1105    return hash_key;
1106 }
1107
1108 /*
1109  * This function is used when dedicated IO threads feed the worker threads.
1110  *
1111  * Devices are allocated to this thread based on instances and instance_id.
1112  * If instances==0 then the function automatically determines the number
1113  * of instances of this thread, and allocates devices between them. 
1114  * If instances != 0, then instance_id must be in the range 0..instances-1.
1115  * The function allocates devices among the specified number of instances,
1116  * with this thread having the given instance id. This option is used for 
1117  * splitting devices among differently named "io"-type threads.
1118  */
1119 void dpdk_io_thread (vlib_worker_thread_t * w,
1120                      u32 instances,
1121                      u32 instance_id,
1122                      char *worker_name,
1123                      dpdk_io_thread_callback_t callback)
1124 {
1125   vlib_main_t * vm = vlib_get_main();
1126   vlib_thread_main_t * tm = vlib_get_thread_main();
1127   vlib_thread_registration_t * tr;
1128   dpdk_main_t * dm = &dpdk_main;
1129   char *io_name = w->registration->name;
1130   dpdk_device_t * xd;
1131   dpdk_device_t ** my_devices = 0;
1132   vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index = 0;
1133   vlib_frame_queue_t ** congested_handoff_queue_by_worker_index = 0;
1134   vlib_frame_queue_elt_t * hf = 0;
1135   int i;
1136   u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1137   u32 next_worker_index = 0;
1138   u32 current_worker_index = ~0;
1139   u32 cpu_index = os_get_cpu_number();
1140   u32 num_workers = 0;
1141   u32 num_devices = 0;
1142   uword * p;
1143   u16 queue_id = 0;
1144   vlib_node_runtime_t * node_trace = 0;
1145   u32 first_worker_index = 0;
1146   u32 buffer_flags_template;
1147   
1148   /* Wait until the dpdk init sequence is complete */
1149   while (dm->io_thread_release == 0)
1150     vlib_worker_thread_barrier_check();
1151
1152   clib_time_init (&vm->clib_time);
1153
1154   p = hash_get_mem (tm->thread_registrations_by_name, worker_name);
1155   ASSERT (p);
1156   tr = (vlib_thread_registration_t *) p[0];
1157   if (tr) 
1158     {
1159       num_workers = tr->count;
1160       first_worker_index = tr->first_index;
1161     }
1162
1163   /* Allocate devices to this thread */
1164   if (instances == 0) 
1165     {
1166       /* auto-assign */
1167       instance_id = w->instance_id;
1168
1169       p = hash_get_mem (tm->thread_registrations_by_name, io_name);
1170       tr = (vlib_thread_registration_t *) p[0];
1171       /* Otherwise, how did we get here */
1172       ASSERT (tr && tr->count);
1173       instances = tr->count;
1174     }
1175   else
1176     {
1177       /* manually assign */
1178       ASSERT (instance_id < instances);
1179     }
1180
1181   vec_validate (handoff_queue_elt_by_worker_index,
1182                 first_worker_index + num_workers - 1);
1183
1184   vec_validate_init_empty (congested_handoff_queue_by_worker_index,
1185                            first_worker_index + num_workers - 1,
1186                            (vlib_frame_queue_t *)(~0));
1187
1188   buffer_flags_template = dm->buffer_flags_template;
1189
1190   /* And handle them... */
1191   while (1)
1192     {
1193       u32 n_buffers;
1194       u32 mb_index;
1195       uword n_rx_bytes = 0;
1196       u32 n_trace, trace_cnt __attribute__((unused));
1197       vlib_buffer_free_list_t * fl;
1198       u32 hash;
1199       u64 hash_key;
1200       u8 efd_discard_burst;
1201
1202       vlib_worker_thread_barrier_check ();
1203
1204       /* Invoke callback if supplied */
1205       if (PREDICT_FALSE(callback != NULL))
1206           callback(vm);
1207
1208       if (PREDICT_FALSE(vec_len(dm->devices) != num_devices))
1209       {
1210         vec_reset_length(my_devices);
1211         vec_foreach (xd, dm->devices)
1212           {
1213             if (((xd - dm->devices) % tr->count) == instance_id)
1214               {
1215                 fprintf(stderr, "i/o thread %d (cpu %d) takes port %d\n",
1216                         instance_id, (int) os_get_cpu_number(), (int) (xd - dm->devices));
1217                 vec_add1 (my_devices, xd);
1218               }
1219           }
1220         num_devices = vec_len(dm->devices);
1221       }
1222
1223       for (i = 0; i < vec_len (my_devices); i++)
1224       {
1225           xd = my_devices[i];
1226
1227           if (!xd->admin_up)
1228             continue;
1229
1230           n_buffers = dpdk_rx_burst(dm, xd, 0 /* queue_id */);
1231
1232           if (n_buffers == 0)
1233             {
1234               /* check if EFD (dpdk) is enabled */
1235               if (PREDICT_FALSE(dm->efd.enabled))
1236                 {
1237                   /* reset a few stats */
1238                   xd->efd_agent.last_poll_time = 0;
1239                   xd->efd_agent.last_burst_sz = 0;
1240                 }
1241               continue;
1242             }
1243
1244           trace_cnt = n_trace = 0;
1245           if (PREDICT_FALSE(vm->trace_main.trace_active_hint))
1246             {
1247               /*
1248                * packet tracing is triggered on the dpdk-input node for
1249                * ease-of-use. Re-fetch the node_runtime for dpdk-input
1250                * in case it has changed.
1251                */
1252               node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
1253
1254               vec_reset_length (xd->d_trace_buffers);
1255               trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
1256             }
1257         
1258           /*
1259            * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
1260            * therefore fake the stop in the dpdk driver by
1261            * silently dropping all of the incoming pkts instead of 
1262            * stopping the driver / hardware.
1263            */
1264           if (PREDICT_FALSE(xd->admin_up != 1))
1265             {
1266               for (mb_index = 0; mb_index < n_buffers; mb_index++)
1267                 rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
1268               continue;
1269             }
1270
1271           /* reset EFD action for the burst */
1272           efd_discard_burst = 0;
1273           
1274           /* Check for congestion if EFD (Early-Fast-Discard) is enabled
1275            * in any mode (e.g. dpdk, monitor, or drop_all)
1276            */
1277           if (PREDICT_FALSE(dm->efd.enabled))
1278             {
1279               /* update EFD counters */
1280               dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
1281
1282               if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
1283                 {
1284                   /* drop all received packets */
1285                   for (mb_index = 0; mb_index < n_buffers; mb_index++)
1286                     rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
1287
1288                   xd->efd_agent.discard_cnt += n_buffers;
1289                   increment_efd_drop_counter(vm, 
1290                                              DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1291                                              n_buffers);
1292
1293                   continue;
1294                 }
1295
1296               if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
1297                                 dm->efd.consec_full_frames_hi_thresh))
1298                 {
1299                   u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
1300                                                                queue_id);
1301                   if (device_queue_sz >= dm->efd.queue_hi_thresh)
1302                     {
1303                       /* dpdk device queue has reached the critical threshold */
1304                       xd->efd_agent.congestion_cnt++;
1305
1306                       /* apply EFD to packets from the burst */
1307                       efd_discard_burst = 1;
1308                     }
1309                 }
1310             }
1311
1312           fl = vlib_buffer_get_free_list 
1313             (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
1314         
1315           mb_index = 0;
1316
1317           while (n_buffers > 0)
1318             {
1319               u32 bi0;
1320               u8 next0, error0;
1321               u32 l3_offset0;
1322               vlib_buffer_t * b0, * b_seg, * b_chain = 0;
1323               ethernet_header_t * h0;
1324               u8 nb_seg = 1;
1325               struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
1326               struct rte_mbuf *mb_seg = mb->next;
1327                 
1328               if (PREDICT_TRUE(n_buffers > 1))
1329                 {
1330                   struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
1331                   vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
1332                   CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1333                   CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1334                   CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
1335                 }
1336                 
1337               b0 = vlib_buffer_from_rte_mbuf(mb);
1338
1339               /* check whether EFD is looking for packets to discard */
1340               if (PREDICT_FALSE(efd_discard_burst))
1341                 {
1342                   u32 cntr_type;
1343                   if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
1344                     {
1345                       rte_pktmbuf_free(mb);
1346                       xd->efd_agent.discard_cnt++;
1347                       increment_efd_drop_counter(vm, 
1348                                                  cntr_type,
1349                                                  1);
1350
1351                       n_buffers--;
1352                       mb_index++;
1353                       continue;
1354                     }
1355                 }
1356               
1357               /* Prefetch one next segment if it exists */
1358               if (PREDICT_FALSE(mb->nb_segs > 1))
1359                 {
1360                   struct rte_mbuf *pfmb = mb->next;
1361                   vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
1362                   CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1363                   CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1364                   b_chain = b0;
1365                 }
1366
1367               bi0 = vlib_get_buffer_index (vm, b0);
1368               vlib_buffer_init_for_free_list (b0, fl);
1369               b0->clone_count = 0;
1370
1371               dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
1372                                                        &next0, &error0);
1373 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
1374               /*
1375                * Clear overloaded TX offload flags when a DPDK driver
1376                * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
1377                */
1378               if (PREDICT_TRUE(trace_cnt == 0))
1379                 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1380               else
1381                 trace_cnt--;
1382 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
1383
1384               if (error0)
1385                   clib_warning ("bi %d error %d", bi0, error0);
1386
1387               b0->error = 0;
1388
1389               l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
1390                              next0 == DPDK_RX_NEXT_IP6_INPUT || 
1391                              next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
1392                             sizeof (ethernet_header_t) : 0);
1393
1394               b0->current_data = l3_offset0;
1395               b0->current_length = mb->data_len - l3_offset0;
1396
1397               b0->flags = buffer_flags_template;
1398
1399               if (VMWARE_LENGTH_BUG_WORKAROUND)
1400                   b0->current_length -= 4;
1401                 
1402               vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1403               vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
1404               vnet_buffer(b0)->io_handoff.next_index = next0;
1405               n_rx_bytes += mb->pkt_len;
1406
1407               /* Process subsequent segments of multi-segment packets */
1408               while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1409                 {
1410                   ASSERT(mb_seg != 0);
1411  
1412                   b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
1413                   vlib_buffer_init_for_free_list (b_seg, fl);
1414                   b_seg->clone_count = 0;
1415  
1416                   ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1417                   ASSERT(b_seg->current_data == 0);
1418  
1419                   /*
1420                    * The driver (e.g. virtio) may not put the packet data at the start
1421                    * of the segment, so don't assume b_seg->current_data == 0 is correct.
1422                    */
1423                   b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
1424
1425                   b_seg->current_length = mb_seg->data_len;
1426                   b0->total_length_not_including_first_buffer +=
1427                     mb_seg->data_len;
1428  
1429                   b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
1430                   b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
1431  
1432                   b_chain = b_seg;
1433                   mb_seg = mb_seg->next;
1434                   nb_seg++;
1435                 }
1436
1437               /*
1438                * Turn this on if you run into
1439                * "bad monkey" contexts, and you want to know exactly
1440                * which nodes they've visited... See main.c...
1441                */
1442               VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
1443  
1444               if (PREDICT_FALSE (n_trace > mb_index))
1445                 vec_add1 (xd->d_trace_buffers, bi0);
1446
1447               next_worker_index = first_worker_index;
1448
1449               /* 
1450                * Force unknown traffic onto worker 0, 
1451                * and into ethernet-input. $$$$ add more hashes.
1452                */
1453               h0 = (ethernet_header_t *) b0->data;
1454
1455               /* Compute ingress LB hash */
1456               hash_key = eth_get_key(h0);
1457               hash = (u32)clib_xxhash(hash_key);
1458
1459               if (PREDICT_TRUE (is_pow2(num_workers)))
1460                 next_worker_index += hash & (num_workers - 1);
1461               else
1462                 next_worker_index += hash % num_workers;
1463
1464               /* if EFD is enabled and not already discarding from dpdk,
1465                * check the worker ring/queue for congestion
1466                */
1467               if (PREDICT_FALSE(tm->efd.enabled && !efd_discard_burst))
1468                 {
1469                   vlib_frame_queue_t *fq;
1470
1471                   /* fq will be valid if the ring is congested */
1472                   fq = is_vlib_handoff_queue_congested(
1473                       next_worker_index, tm->efd.queue_hi_thresh,
1474                       congested_handoff_queue_by_worker_index);
1475                   
1476                   if (PREDICT_FALSE(fq != NULL))
1477                     {
1478                       u32 cntr_type;
1479                       if (PREDICT_TRUE(cntr_type =
1480                                        is_efd_discardable(tm, b0, mb)))
1481                         {
1482                           /* discard the packet */
1483                           fq->enqueue_efd_discards++;
1484                           increment_efd_drop_counter(vm, cntr_type, 1);
1485                           rte_pktmbuf_free(mb);
1486                           n_buffers--;
1487                           mb_index++;
1488                           continue;
1489                         }
1490                     }
1491                 }
1492               
1493               if (next_worker_index != current_worker_index)
1494                 {
1495                   if (hf)
1496                     hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1497
1498                   hf = dpdk_get_handoff_queue_elt(
1499                            next_worker_index,
1500                            handoff_queue_elt_by_worker_index);
1501                       
1502                   n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
1503                   to_next_worker = &hf->buffer_index[hf->n_vectors];
1504                   current_worker_index = next_worker_index;
1505                 }
1506               
1507               /* enqueue to correct worker thread */
1508               to_next_worker[0] = bi0;
1509               to_next_worker++;
1510               n_left_to_next_worker--;
1511
1512               if (n_left_to_next_worker == 0)
1513                 {
1514                   hf->n_vectors = VLIB_FRAME_SIZE;
1515                   vlib_put_handoff_queue_elt(hf);
1516                   current_worker_index = ~0;
1517                   handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1518                   hf = 0;
1519                 }
1520                   
1521               n_buffers--;
1522               mb_index++;
1523             }
1524
1525           if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
1526             {
1527               /* credit the trace to the trace node */
1528               dpdk_rx_trace (dm, node_trace, xd, queue_id, xd->d_trace_buffers,
1529                              vec_len (xd->d_trace_buffers));
1530               vlib_set_trace_count (vm, node_trace, n_trace - vec_len (xd->d_trace_buffers));
1531             }
1532
1533           vlib_increment_combined_counter 
1534             (vnet_get_main()->interface_main.combined_sw_if_counters
1535              + VNET_INTERFACE_COUNTER_RX,
1536              cpu_index, 
1537              xd->vlib_sw_if_index,
1538              mb_index, n_rx_bytes);
1539
1540           dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
1541           dw->aggregate_rx_packets += mb_index;
1542         }
1543
1544       if (hf)
1545         hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1546
1547       /* Ship frames to the worker nodes */
1548       for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
1549         {
1550           if (handoff_queue_elt_by_worker_index[i])
1551             {
1552               hf = handoff_queue_elt_by_worker_index[i];
1553               /* 
1554                * It works better to let the handoff node
1555                * rate-adapt, always ship the handoff queue element.
1556                */
1557               if (1 || hf->n_vectors == hf->last_n_vectors)
1558                 {
1559                   vlib_put_handoff_queue_elt(hf);
1560                   handoff_queue_elt_by_worker_index[i] = 0;
1561                 }
1562               else
1563                 hf->last_n_vectors = hf->n_vectors;
1564             }
1565           congested_handoff_queue_by_worker_index[i] = (vlib_frame_queue_t *)(~0);
1566         }
1567       hf = 0;
1568       current_worker_index = ~0;
1569
1570       vlib_increment_main_loop_counter (vm);
1571     }
1572 }
1573
1574 /*
1575  * This function is used when the main thread performs IO and feeds the
1576  * worker threads.
1577  */
1578 static uword
1579 dpdk_io_input (vlib_main_t * vm,
1580                vlib_node_runtime_t * node,
1581                vlib_frame_t * f)
1582 {
1583   dpdk_main_t * dm = &dpdk_main;
1584   dpdk_device_t * xd;
1585   vlib_thread_main_t * tm = vlib_get_thread_main();
1586   uword n_rx_packets = 0;
1587   static vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index;
1588   static vlib_frame_queue_t ** congested_handoff_queue_by_worker_index = 0;
1589   vlib_frame_queue_elt_t * hf = 0;
1590   int i;
1591   u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1592   u32 next_worker_index = 0;
1593   u32 current_worker_index = ~0;
1594   u32 cpu_index = os_get_cpu_number();
1595   static int num_workers_set;
1596   static u32 num_workers;
1597   u16 queue_id = 0;
1598   vlib_node_runtime_t * node_trace;
1599   static u32 first_worker_index;
1600   u32 buffer_flags_template;
1601
1602   if (PREDICT_FALSE(num_workers_set == 0))
1603     {
1604       uword * p;
1605       vlib_thread_registration_t * tr;
1606       /* Only the standard vnet worker threads are supported */
1607       p = hash_get_mem (tm->thread_registrations_by_name, "workers");
1608       tr = (vlib_thread_registration_t *) p[0];
1609       if (tr) 
1610         {
1611           num_workers = tr->count;
1612           first_worker_index = tr->first_index;
1613         }
1614       num_workers_set = 1;
1615     }
1616
1617   if (PREDICT_FALSE(handoff_queue_elt_by_worker_index == 0))
1618     {
1619       vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
1620       
1621       vec_validate_init_empty (congested_handoff_queue_by_worker_index,
1622                                first_worker_index + num_workers - 1,
1623                                (vlib_frame_queue_t *)(~0));
1624     }
1625
1626   /* packet tracing is triggered on the dpdk-input node for ease-of-use */
1627   node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
1628
1629   buffer_flags_template = dm->buffer_flags_template;
1630
1631   vec_foreach (xd, dm->devices)
1632     {
1633       u32 n_buffers;
1634       u32 mb_index;
1635       uword n_rx_bytes = 0;
1636       u32 n_trace, trace_cnt __attribute__((unused));
1637       vlib_buffer_free_list_t * fl;
1638       u32 hash;
1639       u64 hash_key;
1640       u8 efd_discard_burst = 0;
1641
1642       if (!xd->admin_up)
1643         continue;
1644
1645       n_buffers = dpdk_rx_burst(dm, xd, queue_id );
1646
1647       if (n_buffers == 0)
1648         {
1649           /* check if EFD (dpdk) is enabled */
1650           if (PREDICT_FALSE(dm->efd.enabled))
1651             {
1652               /* reset a few stats */
1653               xd->efd_agent.last_poll_time = 0;
1654               xd->efd_agent.last_burst_sz = 0;
1655             }
1656           continue;
1657         }
1658
1659       vec_reset_length (xd->d_trace_buffers);
1660       trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
1661         
1662       /*
1663        * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
1664        * therefore fake the stop in the dpdk driver by
1665        * silently dropping all of the incoming pkts instead of 
1666        * stopping the driver / hardware.
1667        */
1668       if (PREDICT_FALSE(xd->admin_up != 1))
1669         {
1670           for (mb_index = 0; mb_index < n_buffers; mb_index++)
1671             rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
1672           continue;
1673         }
1674
1675       /* Check for congestion if EFD (Early-Fast-Discard) is enabled
1676        * in any mode (e.g. dpdk, monitor, or drop_all)
1677        */
1678       if (PREDICT_FALSE(dm->efd.enabled))
1679         {
1680           /* update EFD counters */
1681           dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
1682
1683           if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
1684             {
1685               /* discard all received packets */
1686               for (mb_index = 0; mb_index < n_buffers; mb_index++)
1687                 rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
1688
1689               xd->efd_agent.discard_cnt += n_buffers;
1690               increment_efd_drop_counter(vm, 
1691                                          DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1692                                          n_buffers);
1693             
1694               continue;
1695             }
1696           
1697           if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
1698                             dm->efd.consec_full_frames_hi_thresh))
1699             {
1700               u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
1701                                                            queue_id);
1702               if (device_queue_sz >= dm->efd.queue_hi_thresh)
1703                 {
1704                   /* dpdk device queue has reached the critical threshold */
1705                   xd->efd_agent.congestion_cnt++;
1706
1707                   /* apply EFD to packets from the burst */
1708                   efd_discard_burst = 1;
1709                 }
1710             }
1711         }
1712       
1713       fl = vlib_buffer_get_free_list 
1714         (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
1715           
1716       mb_index = 0;
1717
1718       while (n_buffers > 0)
1719         {
1720           u32 bi0;
1721           u8 next0, error0;
1722           u32 l3_offset0;
1723           vlib_buffer_t * b0, * b_seg, * b_chain = 0;
1724           ethernet_header_t * h0;
1725           u8 nb_seg = 1;
1726           struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
1727           struct rte_mbuf *mb_seg = mb->next;
1728
1729           if (PREDICT_TRUE(n_buffers > 1))
1730             {
1731               struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
1732               vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
1733               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1734               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1735               CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
1736             }
1737
1738           b0 = vlib_buffer_from_rte_mbuf(mb);
1739
1740           /* check whether EFD is looking for packets to discard */
1741           if (PREDICT_FALSE(efd_discard_burst))
1742             {
1743               u32 cntr_type;
1744               if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
1745                 {
1746                   rte_pktmbuf_free(mb);
1747                   xd->efd_agent.discard_cnt++;
1748                   increment_efd_drop_counter(vm, 
1749                                              cntr_type,
1750                                              1);
1751
1752                   n_buffers--;
1753                   mb_index++;
1754                   continue;
1755                 }
1756             }
1757
1758           /* Prefetch one next segment if it exists */
1759           if (PREDICT_FALSE(mb->nb_segs > 1))
1760             {
1761               struct rte_mbuf *pfmb = mb->next;
1762               vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
1763               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1764               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1765               b_chain = b0;
1766             }
1767
1768           bi0 = vlib_get_buffer_index (vm, b0);
1769           vlib_buffer_init_for_free_list (b0, fl);
1770           b0->clone_count = 0;
1771
1772           dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
1773                                                    &next0, &error0);
1774 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
1775           /*
1776            * Clear overloaded TX offload flags when a DPDK driver
1777            * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
1778            */
1779           if (PREDICT_TRUE(trace_cnt == 0))
1780             mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1781           else
1782             trace_cnt--;
1783 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
1784
1785           if (error0)
1786             clib_warning ("bi %d error %d", bi0, error0);
1787
1788           b0->error = 0;
1789
1790           l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
1791                          next0 == DPDK_RX_NEXT_IP6_INPUT || 
1792                          next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
1793                         sizeof (ethernet_header_t) : 0);
1794
1795           b0->current_data = l3_offset0;
1796           b0->current_length = mb->data_len - l3_offset0;
1797
1798           b0->flags = buffer_flags_template;
1799                 
1800           if (VMWARE_LENGTH_BUG_WORKAROUND)
1801               b0->current_length -= 4;
1802
1803           vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1804           vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
1805           vnet_buffer(b0)->io_handoff.next_index = next0;
1806           n_rx_bytes += mb->pkt_len;
1807
1808           /* Process subsequent segments of multi-segment packets */
1809           while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1810             {
1811               ASSERT(mb_seg != 0);
1812  
1813               b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
1814               vlib_buffer_init_for_free_list (b_seg, fl);
1815               b_seg->clone_count = 0;
1816  
1817               ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1818               ASSERT(b_seg->current_data == 0);
1819  
1820               /*
1821                * The driver (e.g. virtio) may not put the packet data at the start
1822                * of the segment, so don't assume b_seg->current_data == 0 is correct.
1823                */
1824               b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
1825
1826               b_seg->current_length = mb_seg->data_len;
1827               b0->total_length_not_including_first_buffer +=
1828                 mb_seg->data_len;
1829  
1830               b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
1831               b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
1832  
1833               b_chain = b_seg;
1834               mb_seg = mb_seg->next;
1835               nb_seg++;
1836             }
1837  
1838           /*
1839            * Turn this on if you run into
1840            * "bad monkey" contexts, and you want to know exactly
1841            * which nodes they've visited... See main.c...
1842            */
1843           VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
1844  
1845           if (PREDICT_FALSE (n_trace > mb_index))
1846             vec_add1 (xd->d_trace_buffers, bi0);
1847
1848           next_worker_index = first_worker_index;
1849
1850           /* 
1851            * Force unknown traffic onto worker 0, 
1852            * and into ethernet-input. $$$$ add more hashes.
1853            */
1854           h0 = (ethernet_header_t *) b0->data;
1855
1856           /* Compute ingress LB hash */
1857           hash_key = eth_get_key(h0);
1858           hash = (u32)clib_xxhash(hash_key);
1859
1860           if (PREDICT_TRUE (is_pow2(num_workers)))
1861             next_worker_index += hash & (num_workers - 1);
1862           else
1863             next_worker_index += hash % num_workers;
1864
1865           /* if EFD is enabled and not already discarding from dpdk,
1866            * check the worker ring/queue for congestion
1867            */
1868           if (PREDICT_FALSE(tm->efd.enabled && !efd_discard_burst))
1869             {
1870               vlib_frame_queue_t *fq;
1871
1872               /* fq will be valid if the ring is congested */
1873               fq = is_vlib_handoff_queue_congested(
1874                   next_worker_index, tm->efd.queue_hi_thresh,
1875                   congested_handoff_queue_by_worker_index);
1876               
1877               if (PREDICT_FALSE(fq != NULL))
1878                 {
1879                   u32 cntr_type;
1880                   if (PREDICT_TRUE(cntr_type =
1881                                    is_efd_discardable(tm, b0, mb)))
1882                     {
1883                       /* discard the packet */
1884                       fq->enqueue_efd_discards++;
1885                       increment_efd_drop_counter(vm, cntr_type, 1);
1886                       rte_pktmbuf_free(mb);
1887                       n_buffers--;
1888                       mb_index++;
1889                       continue;
1890                     }
1891                 }
1892             }
1893           
1894           if (next_worker_index != current_worker_index)
1895             {
1896               if (hf)
1897                 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1898
1899               hf = dpdk_get_handoff_queue_elt(
1900                      next_worker_index,
1901                      handoff_queue_elt_by_worker_index);
1902
1903               n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
1904               to_next_worker = &hf->buffer_index[hf->n_vectors];
1905               current_worker_index = next_worker_index;
1906             }
1907           
1908           /* enqueue to correct worker thread */
1909           to_next_worker[0] = bi0;
1910           to_next_worker++;
1911           n_left_to_next_worker--;
1912
1913           if (n_left_to_next_worker == 0)
1914             {
1915               hf->n_vectors = VLIB_FRAME_SIZE;
1916               vlib_put_handoff_queue_elt(hf);
1917               current_worker_index = ~0;
1918               handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1919               hf = 0;
1920             }
1921           
1922           n_buffers--;
1923           mb_index++;
1924         }
1925
1926       if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
1927         {
1928           /* credit the trace to the trace node */
1929           dpdk_rx_trace (dm, node_trace, xd, queue_id, xd->d_trace_buffers,
1930                          vec_len (xd->d_trace_buffers));
1931           vlib_set_trace_count (vm, node_trace, n_trace - vec_len (xd->d_trace_buffers));
1932         }
1933
1934       vlib_increment_combined_counter 
1935         (vnet_get_main()->interface_main.combined_sw_if_counters
1936          + VNET_INTERFACE_COUNTER_RX,
1937          cpu_index, 
1938          xd->vlib_sw_if_index,
1939          mb_index, n_rx_bytes);
1940
1941       dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
1942       dw->aggregate_rx_packets += mb_index;
1943       n_rx_packets += mb_index;
1944     }
1945
1946   if (hf)
1947     hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1948   
1949   /* Ship frames to the worker nodes */
1950   for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
1951     {
1952       if (handoff_queue_elt_by_worker_index[i])
1953         {
1954           hf = handoff_queue_elt_by_worker_index[i];
1955           /* 
1956            * It works better to let the handoff node
1957            * rate-adapt, always ship the handoff queue element.
1958            */
1959           if (1 || hf->n_vectors == hf->last_n_vectors)
1960             {
1961               vlib_put_handoff_queue_elt(hf);
1962               handoff_queue_elt_by_worker_index[i] = 0;
1963             }
1964           else
1965             hf->last_n_vectors = hf->n_vectors;
1966         }
1967       congested_handoff_queue_by_worker_index[i] = (vlib_frame_queue_t *)(~0);
1968     }
1969   hf = 0;
1970   current_worker_index = ~0;
1971   return n_rx_packets;
1972 }
1973
1974 VLIB_REGISTER_NODE (dpdk_io_input_node) = {
1975   .function = dpdk_io_input,
1976   .type = VLIB_NODE_TYPE_INPUT,
1977   .name = "dpdk-io-input",
1978
1979   /* Will be enabled if/when hardware is detected. */
1980   .state = VLIB_NODE_STATE_DISABLED,
1981
1982   .format_buffer = format_ethernet_header_with_length,
1983   .format_trace = format_dpdk_rx_dma_trace,
1984
1985   .n_errors = DPDK_N_ERROR,
1986   .error_strings = dpdk_error_strings,
1987
1988   .n_next_nodes = DPDK_RX_N_NEXT,
1989   .next_nodes = {
1990     [DPDK_RX_NEXT_DROP] = "error-drop",
1991     [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
1992     [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1993     [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
1994     [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
1995   },
1996 };
1997
1998 /*
1999  * set_efd_bitmap()
2000  * Based on the operation type, set lower/upper bits for the given index value
2001  */
2002 void
2003 set_efd_bitmap (u8 *bitmap, u32 value, u32 op)
2004 {
2005     int ix;
2006
2007     *bitmap = 0;
2008     for (ix = 0; ix < 8; ix++) {
2009         if (((op == EFD_OPERATION_LESS_THAN) && (ix < value)) ||
2010             ((op == EFD_OPERATION_GREATER_OR_EQUAL) && (ix >= value))){
2011             (*bitmap) |= (1 << ix);
2012         }
2013     }
2014 }
2015
2016 void
2017 efd_config (u32 enabled, 
2018             u32 ip_prec,  u32 ip_op,
2019             u32 mpls_exp, u32 mpls_op,
2020             u32 vlan_cos, u32 vlan_op)
2021 {
2022    vlib_thread_main_t * tm = vlib_get_thread_main();
2023    dpdk_main_t * dm = &dpdk_main;
2024
2025    if (enabled) {
2026        tm->efd.enabled |= VLIB_EFD_DISCARD_ENABLED;
2027        dm->efd.enabled |= DPDK_EFD_DISCARD_ENABLED;
2028    } else {
2029        tm->efd.enabled &= ~VLIB_EFD_DISCARD_ENABLED;
2030        dm->efd.enabled &= ~DPDK_EFD_DISCARD_ENABLED;
2031    }
2032
2033    set_efd_bitmap(&tm->efd.ip_prec_bitmap, ip_prec, ip_op);
2034    set_efd_bitmap(&tm->efd.mpls_exp_bitmap, mpls_exp, mpls_op);
2035    set_efd_bitmap(&tm->efd.vlan_cos_bitmap, vlan_cos, vlan_op);
2036
2037 }