aarch64 CPU arch / ThunderX platform initial support
[vpp.git] / vnet / vnet / devices / dpdk / node.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
20
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/dpdk/dpdk.h>
23 #include <vnet/classify/vnet_classify.h>
24 #include <vnet/mpls-gre/packet.h>
25
26 #include "dpdk_priv.h"
27
28 #ifndef MAX
29 #define MAX(a,b) ((a) < (b) ? (b) : (a))
30 #endif
31
32 #ifndef MIN
33 #define MIN(a,b) ((a) < (b) ? (a) : (b))
34 #endif
35
36 /*
37  * At least in certain versions of ESXi, vmware e1000's don't honor the
38  * "strip rx CRC" bit. Set this flag to work around that bug FOR UNIT TEST ONLY.
39  *
40  * If wireshark complains like so:
41  *
42  * "Frame check sequence: 0x00000000 [incorrect, should be <hex-num>]"
43  * and you're using ESXi emulated e1000's, set this flag FOR UNIT TEST ONLY.
44  *
45  * Note: do NOT check in this file with this workaround enabled! You'll lose
46  * actual data from e.g. 10xGE interfaces. The extra 4 bytes annoy
47  * wireshark, but they're harmless...
48  */
49 #define VMWARE_LENGTH_BUG_WORKAROUND 0
50
51 typedef struct {
52   u32 cached_next_index;
53
54   /* convenience variables */
55   vlib_main_t * vlib_main;
56   vnet_main_t * vnet_main;
57 } handoff_dispatch_main_t;
58
59 typedef struct {
60   u32 buffer_index;
61   u32 next_index;
62   u32 sw_if_index;
63 } handoff_dispatch_trace_t;
64
65 /* packet trace format function */
66 static u8 * format_handoff_dispatch_trace (u8 * s, va_list * args)
67 {
68   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
69   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
70   handoff_dispatch_trace_t * t = va_arg (*args, handoff_dispatch_trace_t *);
71   
72   s = format (s, "HANDOFF_DISPATCH: sw_if_index %d next_index %d buffer 0x%x",
73       t->sw_if_index,
74       t->next_index,
75       t->buffer_index);
76   return s;
77 }
78
79 handoff_dispatch_main_t handoff_dispatch_main;
80
81 vlib_node_registration_t handoff_dispatch_node;
82
83 #define foreach_handoff_dispatch_error \
84 _(EXAMPLE, "example packets")
85
86 typedef enum {
87 #define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym,
88   foreach_handoff_dispatch_error
89 #undef _
90   HANDOFF_DISPATCH_N_ERROR,
91 } handoff_dispatch_error_t;
92
93 static char * handoff_dispatch_error_strings[] = {
94 #define _(sym,string) string,
95   foreach_handoff_dispatch_error
96 #undef _
97 };
98
99 static inline
100 void vlib_put_handoff_queue_elt (vlib_frame_queue_elt_t * hf)
101 {
102   CLIB_MEMORY_BARRIER();
103   hf->valid = 1;
104 }
105
106 static uword
107 handoff_dispatch_node_fn (vlib_main_t * vm,
108                   vlib_node_runtime_t * node,
109                   vlib_frame_t * frame)
110 {
111   u32 n_left_from, * from, * to_next;
112   dpdk_rx_next_t next_index;
113
114   from = vlib_frame_vector_args (frame);
115   n_left_from = frame->n_vectors;
116   next_index = node->cached_next_index;
117
118   while (n_left_from > 0)
119     {
120       u32 n_left_to_next;
121
122       vlib_get_next_frame (vm, node, next_index,
123                            to_next, n_left_to_next);
124
125       while (n_left_from >= 4 && n_left_to_next >= 2)
126         {
127           u32 bi0, bi1;
128           vlib_buffer_t * b0, * b1;
129           u32 next0, next1;
130           u32 sw_if_index0, sw_if_index1;
131           
132           /* Prefetch next iteration. */
133           {
134             vlib_buffer_t * p2, * p3;
135             
136             p2 = vlib_get_buffer (vm, from[2]);
137             p3 = vlib_get_buffer (vm, from[3]);
138             
139             vlib_prefetch_buffer_header (p2, LOAD);
140             vlib_prefetch_buffer_header (p3, LOAD);
141           }
142
143           /* speculatively enqueue b0 and b1 to the current next frame */
144           to_next[0] = bi0 = from[0];
145           to_next[1] = bi1 = from[1];
146           from += 2;
147           to_next += 2;
148           n_left_from -= 2;
149           n_left_to_next -= 2;
150
151           b0 = vlib_get_buffer (vm, bi0);
152           b1 = vlib_get_buffer (vm, bi1);
153
154           next0 = vnet_buffer(b0)->io_handoff.next_index;
155           next1 = vnet_buffer(b1)->io_handoff.next_index;
156
157           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
158             {
159               vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
160               handoff_dispatch_trace_t *t =
161                 vlib_add_trace (vm, node, b0, sizeof (*t));
162               sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
163               t->sw_if_index = sw_if_index0;
164               t->next_index = next0;
165               t->buffer_index = bi0;
166             }
167           if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
168             {
169               vlib_trace_buffer (vm, node, next1, b1, /* follow_chain */ 0);
170               handoff_dispatch_trace_t *t =
171                 vlib_add_trace (vm, node, b1, sizeof (*t));
172               sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
173               t->sw_if_index = sw_if_index1;
174               t->next_index = next1;
175               t->buffer_index = bi1;
176             }
177             
178           /* verify speculative enqueues, maybe switch current next frame */
179           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
180                                            to_next, n_left_to_next,
181                                            bi0, bi1, next0, next1);
182         }
183       
184       while (n_left_from > 0 && n_left_to_next > 0)
185         {
186           u32 bi0;
187           vlib_buffer_t * b0;
188           u32 next0;
189           u32 sw_if_index0;
190
191           /* speculatively enqueue b0 to the current next frame */
192           bi0 = from[0];
193           to_next[0] = bi0;
194           from += 1;
195           to_next += 1;
196           n_left_from -= 1;
197           n_left_to_next -= 1;
198
199           b0 = vlib_get_buffer (vm, bi0);
200
201           next0 = vnet_buffer(b0)->io_handoff.next_index;
202
203           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
204             {
205               vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
206               handoff_dispatch_trace_t *t =
207                 vlib_add_trace (vm, node, b0, sizeof (*t));
208               sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
209               t->sw_if_index = sw_if_index0;
210               t->next_index = next0;
211               t->buffer_index = bi0;
212            }
213
214           /* verify speculative enqueue, maybe switch current next frame */
215           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
216                                            to_next, n_left_to_next,
217                                            bi0, next0);
218         }
219
220       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
221     }
222
223   return frame->n_vectors;
224 }
225
226 VLIB_REGISTER_NODE (handoff_dispatch_node) = {
227   .function = handoff_dispatch_node_fn,
228   .name = "handoff-dispatch",
229   .vector_size = sizeof (u32),
230   .format_trace = format_handoff_dispatch_trace,
231   .type = VLIB_NODE_TYPE_INTERNAL,
232   .flags = VLIB_NODE_FLAG_IS_HANDOFF,
233   
234   .n_errors = ARRAY_LEN(handoff_dispatch_error_strings),
235   .error_strings = handoff_dispatch_error_strings,
236
237   .n_next_nodes = DPDK_RX_N_NEXT,
238
239   .next_nodes = {
240         [DPDK_RX_NEXT_DROP] = "error-drop",
241         [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
242         [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input",
243         [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
244         [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
245   },
246 };
247
248 clib_error_t *handoff_dispatch_init (vlib_main_t *vm)
249 {
250   handoff_dispatch_main_t * mp = &handoff_dispatch_main;
251     
252   mp->vlib_main = vm;
253   mp->vnet_main = &vnet_main;
254
255   return 0;
256 }
257
258 VLIB_INIT_FUNCTION (handoff_dispatch_init);
259
260 u32 dpdk_get_handoff_node_index (void)
261 {
262   return handoff_dispatch_node.index;
263 }
264
265 static char * dpdk_error_strings[] = {
266 #define _(n,s) s,
267     foreach_dpdk_error
268 #undef _
269 };
270
271 typedef struct {
272   u32 buffer_index;
273   u16 device_index;
274   u16 queue_index;
275   struct rte_mbuf mb;
276   vlib_buffer_t buffer; /* Copy of VLIB buffer; pkt data stored in pre_data. */
277 } dpdk_rx_dma_trace_t;
278
279 static u8 * format_dpdk_rx_dma_trace (u8 * s, va_list * va)
280 {
281   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
282   CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
283   CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main();
284   dpdk_rx_dma_trace_t * t = va_arg (*va, dpdk_rx_dma_trace_t *);
285   dpdk_main_t * dm = &dpdk_main;
286   dpdk_device_t * xd = vec_elt_at_index (dm->devices, t->device_index);
287   format_function_t * f;
288   uword indent = format_get_indent (s);
289   vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
290
291   s = format (s, "%U rx queue %d",
292               format_vnet_sw_interface_name, vnm, sw,
293               t->queue_index);
294
295   s = format (s, "\n%Ubuffer 0x%x: %U",
296               format_white_space, indent,
297               t->buffer_index,
298               format_vlib_buffer, &t->buffer);
299
300 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
301   s = format (s, "\n%U%U",
302               format_white_space, indent,
303               format_dpdk_rx_rte_mbuf, &t->mb);
304 #else
305   s = format (s, "\n%U%U",
306               format_white_space, indent,
307               format_dpdk_rte_mbuf, &t->mb);
308 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
309   f = node->format_buffer;
310   if (!f)
311     f = format_hex_bytes;
312   s = format (s, "\n%U%U", format_white_space, indent,
313               f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
314
315   return s;
316 }
317
318 always_inline void
319 dpdk_rx_next_and_error_from_mb_flags_x1 (dpdk_device_t *xd, struct rte_mbuf *mb,
320                                          vlib_buffer_t *b0,
321                                          u8 * next0, u8 * error0)
322 {
323   u8 is0_ip4, is0_ip6, is0_mpls, n0;
324   uint16_t mb_flags = mb->ol_flags;
325
326   if (PREDICT_FALSE(mb_flags & (
327 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
328        PKT_EXT_RX_PKT_ERROR | PKT_EXT_RX_BAD_FCS   |
329 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
330         PKT_RX_IP_CKSUM_BAD  | PKT_RX_L4_CKSUM_BAD
331     ))) 
332     {
333       /* some error was flagged. determine the drop reason */ 
334       n0 = DPDK_RX_NEXT_DROP;
335       *error0 = 
336 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
337         (mb_flags & PKT_EXT_RX_PKT_ERROR) ? DPDK_ERROR_RX_PACKET_ERROR : 
338         (mb_flags & PKT_EXT_RX_BAD_FCS) ? DPDK_ERROR_RX_BAD_FCS : 
339 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
340         (mb_flags & PKT_RX_IP_CKSUM_BAD) ? DPDK_ERROR_IP_CHECKSUM_ERROR : 
341         (mb_flags & PKT_RX_L4_CKSUM_BAD) ? DPDK_ERROR_L4_CHECKSUM_ERROR : 
342         DPDK_ERROR_NONE;
343     }
344   else
345     {
346       *error0 = DPDK_ERROR_NONE;
347       if (xd->per_interface_next_index != ~0)
348         n0 = xd->per_interface_next_index;
349       else if (mb_flags & PKT_RX_VLAN_PKT)
350         n0 = DPDK_RX_NEXT_ETHERNET_INPUT;
351       else
352         {
353           n0 = DPDK_RX_NEXT_ETHERNET_INPUT;
354 #if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0)
355           is0_ip4 = (mb->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4_EXT)) != 0;
356 #else
357           is0_ip4 = (mb_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV4_HDR_EXT)) != 0;
358 #endif
359
360           if (PREDICT_TRUE(is0_ip4))
361             n0 = DPDK_RX_NEXT_IP4_INPUT;
362           else
363             {
364 #if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0)
365               is0_ip6 =
366               (mb->packet_type & (RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV6_EXT)) != 0;
367 #else
368               is0_ip6 = 
369                       (mb_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) != 0;
370 #endif
371               if (PREDICT_TRUE(is0_ip6))
372                 n0 = DPDK_RX_NEXT_IP6_INPUT;
373               else
374                 {
375                   ethernet_header_t *h0 = (ethernet_header_t *) b0->data;
376                   is0_mpls = (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST));
377                   n0 = is0_mpls ? DPDK_RX_NEXT_MPLS_INPUT : n0;
378                 }
379             }
380         }
381     }
382   *next0 = n0;
383 }
384
385 void dpdk_rx_trace (dpdk_main_t * dm,
386                     vlib_node_runtime_t * node,
387                     dpdk_device_t * xd,
388                     u16 queue_id,
389                     u32 * buffers,
390                     uword n_buffers)
391 {
392   vlib_main_t * vm = vlib_get_main();
393   u32 * b, n_left;
394   u8 next0;
395
396   n_left = n_buffers;
397   b = buffers;
398
399   while (n_left >= 1)
400     {
401       u32 bi0;
402       vlib_buffer_t * b0;
403       dpdk_rx_dma_trace_t * t0;
404       struct rte_mbuf *mb;
405       u8 error0;
406
407       bi0 = b[0];
408       n_left -= 1;
409
410       b0 = vlib_get_buffer (vm, bi0);
411       mb = ((struct rte_mbuf *)b0) - 1;
412       dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
413                                                &next0, &error0);
414       vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
415       t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
416       t0->queue_index = queue_id;
417       t0->device_index = xd->device_index;
418       t0->buffer_index = bi0;
419
420       memcpy (&t0->mb, mb, sizeof (t0->mb));
421       memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
422       memcpy (t0->buffer.pre_data, b0->data, sizeof (t0->buffer.pre_data));
423
424 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
425       /*
426        * Clear overloaded TX offload flags when a DPDK driver
427        * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
428        */
429       mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
430 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
431
432       b += 1;
433     }
434 }
435
436 /*
437  * dpdk_efd_update_counters()
438  * Update EFD (early-fast-discard) counters
439  */
440 void dpdk_efd_update_counters (dpdk_device_t *xd,
441                                u32 n_buffers,
442                                u16 enabled)
443 {
444   if (enabled & DPDK_EFD_MONITOR_ENABLED)
445     {
446       u64 now = clib_cpu_time_now();
447       if (xd->efd_agent.last_poll_time > 0)
448         {
449           u64 elapsed_time = (now - xd->efd_agent.last_poll_time);
450           if (elapsed_time > xd->efd_agent.max_poll_delay)
451             xd->efd_agent.max_poll_delay = elapsed_time;
452         }
453       xd->efd_agent.last_poll_time = now;
454     }
455   
456   xd->efd_agent.total_packet_cnt += n_buffers;
457   xd->efd_agent.last_burst_sz = n_buffers;
458
459   if (n_buffers > xd->efd_agent.max_burst_sz)
460     xd->efd_agent.max_burst_sz = n_buffers;
461
462   if (PREDICT_FALSE(n_buffers == VLIB_FRAME_SIZE))
463     {
464       xd->efd_agent.full_frames_cnt++;
465       xd->efd_agent.consec_full_frames_cnt++;
466     }
467   else
468     {
469       xd->efd_agent.consec_full_frames_cnt = 0;
470     }
471 }
472
473 /* is_efd_discardable()
474  *   returns non zero DPDK error if packet meets early-fast-discard criteria,
475  *           zero otherwise
476  */
477 u32 is_efd_discardable (vlib_thread_main_t *tm,
478                         vlib_buffer_t * b0,
479                         struct rte_mbuf *mb)
480 {
481   ethernet_header_t *eh = (ethernet_header_t *) b0->data;
482
483   if (eh->type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
484     {
485       ip4_header_t *ipv4 =
486           (ip4_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
487       u8 pkt_prec = (ipv4->tos >> 5);
488           
489       return (tm->efd.ip_prec_bitmap & (1 << pkt_prec) ?
490                   DPDK_ERROR_IPV4_EFD_DROP_PKTS : DPDK_ERROR_NONE);
491     }
492   else if (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_IP6))
493     {
494       ip6_header_t *ipv6 =
495           (ip6_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
496       u8 pkt_tclass =
497           ((ipv6->ip_version_traffic_class_and_flow_label >> 20) & 0xff);
498           
499       return (tm->efd.ip_prec_bitmap & (1 << pkt_tclass) ?
500                   DPDK_ERROR_IPV6_EFD_DROP_PKTS : DPDK_ERROR_NONE);
501     }
502   else if (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_MPLS_UNICAST))
503     {
504       mpls_unicast_header_t *mpls =
505           (mpls_unicast_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
506       u8 pkt_exp = ((mpls->label_exp_s_ttl >> 9) & 0x07);
507
508       return (tm->efd.mpls_exp_bitmap & (1 << pkt_exp) ?
509                   DPDK_ERROR_MPLS_EFD_DROP_PKTS : DPDK_ERROR_NONE);
510     }
511   else if ((eh->type == clib_net_to_host_u16(ETHERNET_TYPE_VLAN)) ||
512            (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_DOT1AD)))
513     {
514       ethernet_vlan_header_t *vlan =
515           (ethernet_vlan_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
516       u8 pkt_cos = ((vlan->priority_cfi_and_id >> 13) & 0x07);
517
518       return (tm->efd.vlan_cos_bitmap & (1 << pkt_cos) ?
519                   DPDK_ERROR_VLAN_EFD_DROP_PKTS : DPDK_ERROR_NONE);
520     }
521
522   return DPDK_ERROR_NONE;
523 }
524
525 /*
526  * This function is used when there are no worker threads.
527  * The main thread performs IO and forwards the packets. 
528  */
529 static inline u32 dpdk_device_input ( dpdk_main_t * dm, 
530                                       dpdk_device_t * xd,
531                                       vlib_node_runtime_t * node,
532                                       u32 cpu_index,
533                                       u16 queue_id)
534 {
535   u32 n_buffers;
536   u32 next_index = DPDK_RX_NEXT_ETHERNET_INPUT;
537   u32 n_left_to_next, * to_next;
538   u32 mb_index;
539   vlib_main_t * vm = vlib_get_main();
540   uword n_rx_bytes = 0;
541   u32 n_trace, trace_cnt __attribute__((unused));
542   vlib_buffer_free_list_t * fl;
543   u8 efd_discard_burst = 0;
544   u16 ip_align_offset = 0;
545
546   if (xd->admin_up == 0)
547     return 0;
548
549   n_buffers = dpdk_rx_burst(dm, xd, queue_id);
550
551   if (n_buffers == 0)
552     {
553       /* check if EFD (dpdk) is enabled */
554       if (PREDICT_FALSE(dm->efd.enabled))
555         {
556           /* reset a few stats */
557           xd->efd_agent.last_poll_time = 0;
558           xd->efd_agent.last_burst_sz = 0;
559         }
560       return 0;
561     }
562
563   if (xd->pmd == VNET_DPDK_PMD_THUNDERX)
564       ip_align_offset = 6;
565
566   vec_reset_length (xd->d_trace_buffers);
567   trace_cnt = n_trace = vlib_get_trace_count (vm, node);
568
569   fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
570
571   /*
572    * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
573    * therefore fake the stop in the dpdk driver by
574    * silently dropping all of the incoming pkts instead of 
575    * stopping the driver / hardware.
576    */
577   if (PREDICT_FALSE(xd->admin_up != 1))
578     {
579       for (mb_index = 0; mb_index < n_buffers; mb_index++)
580         rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
581       
582       return 0;
583     }
584
585   /* Check for congestion if EFD (Early-Fast-Discard) is enabled
586    * in any mode (e.g. dpdk, monitor, or drop_all)
587    */
588   if (PREDICT_FALSE(dm->efd.enabled))
589     {
590       /* update EFD counters */
591       dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
592
593       if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
594         {
595           /* discard all received packets */
596           for (mb_index = 0; mb_index < n_buffers; mb_index++)
597             rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
598
599           xd->efd_agent.discard_cnt += n_buffers;
600           increment_efd_drop_counter(vm, 
601                                      DPDK_ERROR_VLAN_EFD_DROP_PKTS,
602                                      n_buffers);
603
604           return 0;
605         }
606       
607       if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
608                         dm->efd.consec_full_frames_hi_thresh))
609         {
610           u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
611                                                        queue_id);
612           if (device_queue_sz >= dm->efd.queue_hi_thresh)
613             {
614               /* dpdk device queue has reached the critical threshold */
615               xd->efd_agent.congestion_cnt++;
616
617               /* apply EFD to packets from the burst */
618               efd_discard_burst = 1;
619             }
620         }
621     }
622   
623   mb_index = 0;
624
625   while (n_buffers > 0)
626     {
627       u32 bi0;
628       u8 next0, error0;
629       u32 l3_offset0;
630       vlib_buffer_t * b0, * b_seg, * b_chain = 0;
631       u32 cntr_type;
632
633       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
634
635       while (n_buffers > 0 && n_left_to_next > 0)
636         {
637           u8 nb_seg = 1;
638           struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
639           struct rte_mbuf *mb_seg = mb->next;
640
641           if (PREDICT_TRUE(n_buffers > 2))
642           {
643               struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
644               vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
645               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, STORE);
646               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
647           }
648
649           ASSERT(mb);
650
651           b0 = (vlib_buffer_t *)(mb+1);
652
653           /* check whether EFD is looking for packets to discard */
654           if (PREDICT_FALSE(efd_discard_burst))
655             {
656               vlib_thread_main_t * tm = vlib_get_thread_main();
657               
658               if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
659                 {
660                   rte_pktmbuf_free(mb);
661                   xd->efd_agent.discard_cnt++;
662                   increment_efd_drop_counter(vm, 
663                                              cntr_type,
664                                              1);
665                   n_buffers--;
666                   mb_index++;
667                   continue;
668                 }
669             }
670
671           /* Prefetch one next segment if it exists. */
672           if (PREDICT_FALSE(mb->nb_segs > 1))
673             {
674               struct rte_mbuf *pfmb = mb->next;
675               vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
676               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
677               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
678               b_chain = b0;
679             }
680
681           vlib_buffer_init_for_free_list (b0, fl);
682           b0->clone_count = 0;
683           
684           bi0 = vlib_get_buffer_index (vm, b0);
685
686           to_next[0] = bi0;
687           to_next++;
688           n_left_to_next--;
689           
690           dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
691                                                    &next0, &error0);
692 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
693           /*
694            * Clear overloaded TX offload flags when a DPDK driver
695            * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
696            */
697
698           if (PREDICT_TRUE(trace_cnt == 0))
699             mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
700           else
701             trace_cnt--;
702 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
703
704           b0->error = node->errors[error0];
705
706           l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
707                          next0 == DPDK_RX_NEXT_IP6_INPUT ||
708                          next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
709                         sizeof (ethernet_header_t) : 0);
710
711           b0->current_data = l3_offset0;
712           b0->current_length = mb->data_len - l3_offset0;
713
714           if (PREDICT_FALSE (ip_align_offset != 0))
715             {
716               if (next0 == DPDK_RX_NEXT_IP4_INPUT ||
717                   next0 == DPDK_RX_NEXT_IP6_INPUT)
718                 b0->current_data += ip_align_offset;
719             }
720              
721           b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
722
723           if (VMWARE_LENGTH_BUG_WORKAROUND)
724               b0->current_length -= 4;
725
726           vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
727           vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
728           n_rx_bytes += mb->pkt_len;
729
730           /* Process subsequent segments of multi-segment packets */
731           while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
732             {
733               ASSERT(mb_seg != 0);
734
735               b_seg = (vlib_buffer_t *)(mb_seg+1);
736               vlib_buffer_init_for_free_list (b_seg, fl);
737               b_seg->clone_count = 0;
738
739               ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
740               ASSERT(b_seg->current_data == 0);
741
742               /*
743                * The driver (e.g. virtio) may not put the packet data at the start
744                * of the segment, so don't assume b_seg->current_data == 0 is correct.
745                */
746               b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
747
748               b_seg->current_length = mb_seg->data_len;
749               b0->total_length_not_including_first_buffer +=
750                 mb_seg->data_len;
751
752               b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
753               b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
754
755               b_chain = b_seg;
756               mb_seg = mb_seg->next;
757               nb_seg++;
758             } 
759
760           /*
761            * Turn this on if you run into
762            * "bad monkey" contexts, and you want to know exactly
763            * which nodes they've visited... See main.c...
764            */
765           VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
766
767           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
768                                            to_next, n_left_to_next,
769                                            bi0, next0);
770           if (PREDICT_FALSE (n_trace > mb_index))
771             vec_add1 (xd->d_trace_buffers, bi0);
772           n_buffers--;
773           mb_index++;
774         }
775       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
776     }
777
778   if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
779     {
780       dpdk_rx_trace (dm, node, xd, queue_id, xd->d_trace_buffers,
781                      vec_len (xd->d_trace_buffers));
782       vlib_set_trace_count (vm, node, n_trace - vec_len (xd->d_trace_buffers));
783     }
784   
785   vlib_increment_combined_counter 
786     (vnet_get_main()->interface_main.combined_sw_if_counters
787      + VNET_INTERFACE_COUNTER_RX,
788      cpu_index, 
789      xd->vlib_sw_if_index,
790      mb_index, n_rx_bytes);
791
792   dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
793   dw->aggregate_rx_packets += mb_index;
794
795   return mb_index;
796 }
797
798 #if VIRL > 0
799 #define VIRL_SPEED_LIMIT()                         \
800   /* Limit the input rate to 1000 vectors / sec */ \
801   {                                                \
802     struct timespec ts, tsrem;                     \
803                                                    \
804     ts.tv_sec = 0;                                 \
805     ts.tv_nsec = 1000*1000; /* 1ms */              \
806                                                    \
807     while (nanosleep(&ts, &tsrem) < 0)             \
808       {                                            \
809         ts = tsrem;                                \
810       }                                            \
811   }
812 #else
813 #define VIRL_SPEED_LIMIT()
814 #endif
815
816
817 static uword
818 dpdk_input (vlib_main_t * vm,
819             vlib_node_runtime_t * node,
820             vlib_frame_t * f)
821 {
822   dpdk_main_t * dm = &dpdk_main;
823   dpdk_device_t * xd;
824   uword n_rx_packets = 0;
825   dpdk_device_and_queue_t * dq;
826   u32 cpu_index = os_get_cpu_number();
827
828   /*
829    * Poll all devices on this cpu for input/interrupts.
830    */
831   vec_foreach (dq, dm->devices_by_cpu[cpu_index])
832     {
833       xd = vec_elt_at_index(dm->devices, dq->device);
834       ASSERT(dq->queue_id == 0);
835       n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, 0);
836     }
837
838   VIRL_SPEED_LIMIT()
839
840   return n_rx_packets;
841 }
842
843 uword
844 dpdk_input_rss (vlib_main_t * vm,
845       vlib_node_runtime_t * node,
846       vlib_frame_t * f)
847 {
848   dpdk_main_t * dm = &dpdk_main;
849   dpdk_device_t * xd;
850   uword n_rx_packets = 0;
851   dpdk_device_and_queue_t * dq;
852   u32 cpu_index = os_get_cpu_number();
853
854   /*
855    * Poll all devices on this cpu for input/interrupts.
856    */
857   vec_foreach (dq, dm->devices_by_cpu[cpu_index])
858     {
859       xd = vec_elt_at_index(dm->devices, dq->device);
860       n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id);
861     }
862
863   VIRL_SPEED_LIMIT()
864
865   return n_rx_packets;
866 }
867
868 VLIB_REGISTER_NODE (dpdk_input_node) = {
869   .function = dpdk_input,
870   .type = VLIB_NODE_TYPE_INPUT,
871   .name = "dpdk-input",
872
873   /* Will be enabled if/when hardware is detected. */
874   .state = VLIB_NODE_STATE_DISABLED,
875
876   .format_buffer = format_ethernet_header_with_length,
877   .format_trace = format_dpdk_rx_dma_trace,
878
879   .n_errors = DPDK_N_ERROR,
880   .error_strings = dpdk_error_strings,
881
882   .n_next_nodes = DPDK_RX_N_NEXT,
883   .next_nodes = {
884     [DPDK_RX_NEXT_DROP] = "error-drop",
885     [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
886     [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
887     [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
888     [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
889   },
890 };
891
892 /*
893  * Override the next nodes for the dpdk input nodes.
894  * Must be invoked prior to VLIB_INIT_FUNCTION calls.
895  */
896 void dpdk_set_next_node (dpdk_rx_next_t next, char *name)
897 {
898   vlib_node_registration_t *r = &dpdk_input_node;
899   vlib_node_registration_t *r_io = &dpdk_io_input_node;
900   vlib_node_registration_t *r_handoff = &handoff_dispatch_node;
901
902   switch (next)
903     {
904     case DPDK_RX_NEXT_IP4_INPUT:
905     case DPDK_RX_NEXT_IP6_INPUT:
906     case DPDK_RX_NEXT_MPLS_INPUT:
907     case DPDK_RX_NEXT_ETHERNET_INPUT:
908       r->next_nodes[next] = name;
909       r_io->next_nodes[next] = name;
910       r_handoff->next_nodes[next] = name;
911       break;
912
913     default:
914       clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
915       break;
916     }
917 }
918
919 inline vlib_frame_queue_elt_t * 
920 vlib_get_handoff_queue_elt (u32 vlib_worker_index) 
921 {
922   vlib_frame_queue_t *fq;
923   vlib_frame_queue_elt_t *elt;
924   u64 new_tail;
925   
926   fq = vlib_frame_queues[vlib_worker_index];
927   ASSERT (fq);
928
929   new_tail = __sync_add_and_fetch (&fq->tail, 1);
930
931   /* Wait until a ring slot is available */
932   while (new_tail >= fq->head_hint + fq->nelts)
933       vlib_worker_thread_barrier_check ();
934
935   elt = fq->elts + (new_tail & (fq->nelts-1));
936
937   /* this would be very bad... */
938   while (elt->valid) 
939     ;
940
941   elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME;
942   elt->last_n_vectors = elt->n_vectors = 0;
943
944   return elt;
945 }
946
947 static inline vlib_frame_queue_elt_t *
948 dpdk_get_handoff_queue_elt ( 
949     u32 vlib_worker_index, 
950     vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index)
951 {
952   vlib_frame_queue_elt_t *elt;
953
954   if (handoff_queue_elt_by_worker_index [vlib_worker_index])
955       return handoff_queue_elt_by_worker_index [vlib_worker_index];
956
957   elt = vlib_get_handoff_queue_elt (vlib_worker_index);
958
959   handoff_queue_elt_by_worker_index [vlib_worker_index] = elt;
960
961   return elt;
962 }
963
964 static inline vlib_frame_queue_t *
965 is_vlib_handoff_queue_congested (
966     u32 vlib_worker_index,
967     u32 queue_hi_thresh,
968     vlib_frame_queue_t ** handoff_queue_by_worker_index)
969 {
970   vlib_frame_queue_t *fq;
971
972   fq = handoff_queue_by_worker_index [vlib_worker_index];
973   if (fq != (vlib_frame_queue_t *)(~0)) 
974       return fq;
975   
976   fq = vlib_frame_queues[vlib_worker_index];
977   ASSERT (fq);
978
979   if (PREDICT_FALSE(fq->tail >= (fq->head_hint + queue_hi_thresh))) {
980     /* a valid entry in the array will indicate the queue has reached
981      * the specified threshold and is congested
982      */
983     handoff_queue_by_worker_index [vlib_worker_index] = fq;
984     fq->enqueue_full_events++;
985     return fq;
986   }
987
988   return NULL;
989 }
990
991 static inline u64 ipv4_get_key (ip4_header_t *ip)
992 {
993    u64  hash_key;
994
995    hash_key = *((u64*)(&ip->address_pair)) ^ ip->protocol;
996
997    return hash_key;
998 }
999
1000 static inline u64 ipv6_get_key (ip6_header_t *ip)
1001 {
1002    u64  hash_key;
1003
1004    hash_key = ip->src_address.as_u64[0] ^
1005               ip->src_address.as_u64[1] ^
1006               ip->dst_address.as_u64[0] ^
1007               ip->dst_address.as_u64[1] ^
1008               ip->protocol;
1009
1010    return hash_key;
1011 }
1012
1013
1014 #define MPLS_BOTTOM_OF_STACK_BIT_MASK   0x00000100U
1015 #define MPLS_LABEL_MASK                 0xFFFFF000U
1016
1017 static inline u64 mpls_get_key (mpls_unicast_header_t *m)
1018 {
1019    u64                     hash_key;
1020    u8                      ip_ver;
1021
1022
1023    /* find the bottom of the MPLS label stack. */
1024    if (PREDICT_TRUE(m->label_exp_s_ttl & 
1025                     clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
1026        goto bottom_lbl_found;
1027    }
1028    m++;
1029
1030    if (PREDICT_TRUE(m->label_exp_s_ttl & 
1031                     clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
1032        goto bottom_lbl_found;
1033    }
1034    m++;
1035
1036    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1037        goto bottom_lbl_found;
1038    }
1039    m++;
1040
1041    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1042        goto bottom_lbl_found;
1043    }
1044    m++;
1045
1046    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1047        goto bottom_lbl_found;
1048    }
1049    
1050    /* the bottom label was not found - use the last label */
1051    hash_key = m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
1052
1053    return hash_key;
1054    
1055
1056 bottom_lbl_found:
1057    m++;
1058    ip_ver = (*((u8 *)m) >> 4);
1059
1060    /* find out if it is IPV4 or IPV6 header */
1061    if (PREDICT_TRUE(ip_ver == 4)) {
1062        hash_key = ipv4_get_key((ip4_header_t *)m);
1063    } else if (PREDICT_TRUE(ip_ver == 6)) {
1064        hash_key = ipv6_get_key((ip6_header_t *)m);
1065    } else {
1066        /* use the bottom label */
1067        hash_key = (m-1)->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
1068    }
1069
1070    return hash_key;
1071
1072 }
1073
1074 static inline u64 eth_get_key (ethernet_header_t *h0)
1075 {
1076    u64 hash_key;
1077
1078
1079    if (PREDICT_TRUE(h0->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
1080        hash_key = ipv4_get_key((ip4_header_t *)(h0+1));
1081    } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6)) {
1082        hash_key = ipv6_get_key((ip6_header_t *)(h0+1));
1083    } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1084        hash_key = mpls_get_key((mpls_unicast_header_t *)(h0+1));
1085    } else if ((h0->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) || 
1086               (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_DOT1AD))) {
1087        ethernet_vlan_header_t * outer = (ethernet_vlan_header_t *)(h0 + 1);
1088        
1089        outer = (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ? 
1090                                   outer+1 : outer;
1091        if (PREDICT_TRUE(outer->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
1092            hash_key = ipv4_get_key((ip4_header_t *)(outer+1));
1093        } else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)) {
1094            hash_key = ipv6_get_key((ip6_header_t *)(outer+1));
1095        } else if (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1096            hash_key = mpls_get_key((mpls_unicast_header_t *)(outer+1));
1097        }  else {
1098            hash_key = outer->type; 
1099        }
1100    } else {
1101        hash_key  = 0;
1102    }
1103
1104    return hash_key;
1105 }
1106
1107 /*
1108  * This function is used when dedicated IO threads feed the worker threads.
1109  *
1110  * Devices are allocated to this thread based on instances and instance_id.
1111  * If instances==0 then the function automatically determines the number
1112  * of instances of this thread, and allocates devices between them. 
1113  * If instances != 0, then instance_id must be in the range 0..instances-1.
1114  * The function allocates devices among the specified number of instances,
1115  * with this thread having the given instance id. This option is used for 
1116  * splitting devices among differently named "io"-type threads.
1117  */
1118 void dpdk_io_thread (vlib_worker_thread_t * w,
1119                      u32 instances,
1120                      u32 instance_id,
1121                      char *worker_name,
1122                      dpdk_io_thread_callback_t callback)
1123 {
1124   vlib_main_t * vm = vlib_get_main();
1125   vlib_thread_main_t * tm = vlib_get_thread_main();
1126   vlib_thread_registration_t * tr;
1127   dpdk_main_t * dm = &dpdk_main;
1128   char *io_name = w->registration->name;
1129   dpdk_device_t * xd;
1130   dpdk_device_t ** my_devices = 0;
1131   vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index = 0;
1132   vlib_frame_queue_t ** congested_handoff_queue_by_worker_index = 0;
1133   vlib_frame_queue_elt_t * hf = 0;
1134   int i;
1135   u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1136   u32 next_worker_index = 0;
1137   u32 current_worker_index = ~0;
1138   u32 cpu_index = os_get_cpu_number();
1139   u32 num_workers = 0;
1140   u32 num_devices = 0;
1141   uword * p;
1142   u16 queue_id = 0;
1143   vlib_node_runtime_t * node_trace;
1144   u32 first_worker_index = 0;
1145   
1146   /* Wait until the dpdk init sequence is complete */
1147   while (dm->io_thread_release == 0)
1148     vlib_worker_thread_barrier_check();
1149
1150   clib_time_init (&vm->clib_time);
1151
1152   p = hash_get_mem (tm->thread_registrations_by_name, worker_name);
1153   ASSERT (p);
1154   tr = (vlib_thread_registration_t *) p[0];
1155   if (tr) 
1156     {
1157       num_workers = tr->count;
1158       first_worker_index = tr->first_index;
1159     }
1160
1161   /* Allocate devices to this thread */
1162   if (instances == 0) 
1163     {
1164       /* auto-assign */
1165       instance_id = w->instance_id;
1166
1167       p = hash_get_mem (tm->thread_registrations_by_name, io_name);
1168       tr = (vlib_thread_registration_t *) p[0];
1169       /* Otherwise, how did we get here */
1170       ASSERT (tr && tr->count);
1171       instances = tr->count;
1172     }
1173   else
1174     {
1175       /* manually assign */
1176       ASSERT (instance_id < instances);
1177     }
1178
1179   vec_validate (handoff_queue_elt_by_worker_index,
1180                 first_worker_index + num_workers - 1);
1181
1182   vec_validate_init_empty (congested_handoff_queue_by_worker_index,
1183                            first_worker_index + num_workers - 1,
1184                            (vlib_frame_queue_t *)(~0));
1185
1186   /* packet tracing is triggered on the dpdk-input node for ease-of-use */
1187   node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
1188
1189   /* And handle them... */
1190   while (1)
1191     {
1192       u32 n_buffers;
1193       u32 mb_index;
1194       uword n_rx_bytes = 0;
1195       u32 n_trace, trace_cnt __attribute__((unused));
1196       vlib_buffer_free_list_t * fl;
1197       u32 hash;
1198       u64 hash_key;
1199       u8 efd_discard_burst;
1200
1201       vlib_worker_thread_barrier_check ();
1202
1203       /* Invoke callback if supplied */
1204       if (PREDICT_FALSE(callback != NULL))
1205           callback(vm);
1206
1207       if (PREDICT_FALSE(vec_len(dm->devices) != num_devices))
1208       {
1209         vec_reset_length(my_devices);
1210         vec_foreach (xd, dm->devices)
1211           {
1212             if (((xd - dm->devices) % tr->count) == instance_id)
1213               {
1214                 fprintf(stderr, "i/o thread %d (cpu %d) takes port %d\n",
1215                         instance_id, (int) os_get_cpu_number(), (int) (xd - dm->devices));
1216                 vec_add1 (my_devices, xd);
1217               }
1218           }
1219         num_devices = vec_len(dm->devices);
1220       }
1221
1222       for (i = 0; i < vec_len (my_devices); i++)
1223       {
1224           xd = my_devices[i];
1225
1226           if (!xd->admin_up)
1227             continue;
1228
1229           n_buffers = dpdk_rx_burst(dm, xd, 0 /* queue_id */);
1230
1231           if (n_buffers == 0)
1232             {
1233               /* check if EFD (dpdk) is enabled */
1234               if (PREDICT_FALSE(dm->efd.enabled))
1235                 {
1236                   /* reset a few stats */
1237                   xd->efd_agent.last_poll_time = 0;
1238                   xd->efd_agent.last_burst_sz = 0;
1239                 }
1240               continue;
1241             }
1242
1243           vec_reset_length (xd->d_trace_buffers);
1244           trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
1245         
1246           /*
1247            * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
1248            * therefore fake the stop in the dpdk driver by
1249            * silently dropping all of the incoming pkts instead of 
1250            * stopping the driver / hardware.
1251            */
1252           if (PREDICT_FALSE(xd->admin_up != 1))
1253             {
1254               for (mb_index = 0; mb_index < n_buffers; mb_index++)
1255                 rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
1256               continue;
1257             }
1258
1259           /* reset EFD action for the burst */
1260           efd_discard_burst = 0;
1261           
1262           /* Check for congestion if EFD (Early-Fast-Discard) is enabled
1263            * in any mode (e.g. dpdk, monitor, or drop_all)
1264            */
1265           if (PREDICT_FALSE(dm->efd.enabled))
1266             {
1267               /* update EFD counters */
1268               dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
1269
1270               if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
1271                 {
1272                   /* drop all received packets */
1273                   for (mb_index = 0; mb_index < n_buffers; mb_index++)
1274                     rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
1275
1276                   xd->efd_agent.discard_cnt += n_buffers;
1277                   increment_efd_drop_counter(vm, 
1278                                              DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1279                                              n_buffers);
1280
1281                   continue;
1282                 }
1283
1284               if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
1285                                 dm->efd.consec_full_frames_hi_thresh))
1286                 {
1287                   u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
1288                                                                queue_id);
1289                   if (device_queue_sz >= dm->efd.queue_hi_thresh)
1290                     {
1291                       /* dpdk device queue has reached the critical threshold */
1292                       xd->efd_agent.congestion_cnt++;
1293
1294                       /* apply EFD to packets from the burst */
1295                       efd_discard_burst = 1;
1296                     }
1297                 }
1298             }
1299
1300           fl = vlib_buffer_get_free_list 
1301             (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
1302         
1303           mb_index = 0;
1304
1305           while (n_buffers > 0)
1306             {
1307               u32 bi0;
1308               u8 next0, error0;
1309               u32 l3_offset0;
1310               vlib_buffer_t * b0, * b_seg, * b_chain = 0;
1311               ethernet_header_t * h0;
1312               u8 nb_seg = 1;
1313               struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
1314               struct rte_mbuf *mb_seg = mb->next;
1315                 
1316               if (PREDICT_TRUE(n_buffers > 1))
1317                 {
1318                   struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
1319                   vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1320                   CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1321                   CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1322                   CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
1323                 }
1324                 
1325               b0 = (vlib_buffer_t *)(mb+1);
1326
1327               /* check whether EFD is looking for packets to discard */
1328               if (PREDICT_FALSE(efd_discard_burst))
1329                 {
1330                   u32 cntr_type;
1331                   if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
1332                     {
1333                       rte_pktmbuf_free(mb);
1334                       xd->efd_agent.discard_cnt++;
1335                       increment_efd_drop_counter(vm, 
1336                                                  cntr_type,
1337                                                  1);
1338
1339                       n_buffers--;
1340                       mb_index++;
1341                       continue;
1342                     }
1343                 }
1344               
1345               /* Prefetch one next segment if it exists */
1346               if (PREDICT_FALSE(mb->nb_segs > 1))
1347                 {
1348                   struct rte_mbuf *pfmb = mb->next;
1349                   vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1350                   CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1351                   CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1352                   b_chain = b0;
1353                 }
1354
1355               bi0 = vlib_get_buffer_index (vm, b0);
1356               vlib_buffer_init_for_free_list (b0, fl);
1357               b0->clone_count = 0;
1358
1359               dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
1360                                                        &next0, &error0);
1361 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
1362               /*
1363                * Clear overloaded TX offload flags when a DPDK driver
1364                * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
1365                */
1366               if (PREDICT_TRUE(trace_cnt == 0))
1367                 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1368               else
1369                 trace_cnt--;
1370 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
1371
1372               if (error0)
1373                   clib_warning ("bi %d error %d", bi0, error0);
1374
1375               b0->error = 0;
1376
1377               l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
1378                              next0 == DPDK_RX_NEXT_IP6_INPUT || 
1379                              next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
1380                             sizeof (ethernet_header_t) : 0);
1381
1382               b0->current_data = l3_offset0;
1383               b0->current_length = mb->data_len - l3_offset0;
1384
1385               b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
1386
1387               if (VMWARE_LENGTH_BUG_WORKAROUND)
1388                   b0->current_length -= 4;
1389                 
1390               vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1391               vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
1392               vnet_buffer(b0)->io_handoff.next_index = next0;
1393               n_rx_bytes += mb->pkt_len;
1394
1395               /* Process subsequent segments of multi-segment packets */
1396               while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1397                 {
1398                   ASSERT(mb_seg != 0);
1399  
1400                   b_seg = (vlib_buffer_t *)(mb_seg+1);
1401                   vlib_buffer_init_for_free_list (b_seg, fl);
1402                   b_seg->clone_count = 0;
1403  
1404                   ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1405                   ASSERT(b_seg->current_data == 0);
1406  
1407                   /*
1408                    * The driver (e.g. virtio) may not put the packet data at the start
1409                    * of the segment, so don't assume b_seg->current_data == 0 is correct.
1410                    */
1411                   b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
1412
1413                   b_seg->current_length = mb_seg->data_len;
1414                   b0->total_length_not_including_first_buffer +=
1415                     mb_seg->data_len;
1416  
1417                   b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
1418                   b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
1419  
1420                   b_chain = b_seg;
1421                   mb_seg = mb_seg->next;
1422                   nb_seg++;
1423                 }
1424
1425               /*
1426                * Turn this on if you run into
1427                * "bad monkey" contexts, and you want to know exactly
1428                * which nodes they've visited... See main.c...
1429                */
1430               VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
1431  
1432               if (PREDICT_FALSE (n_trace > mb_index))
1433                 vec_add1 (xd->d_trace_buffers, bi0);
1434
1435               next_worker_index = first_worker_index;
1436
1437               /* 
1438                * Force unknown traffic onto worker 0, 
1439                * and into ethernet-input. $$$$ add more hashes.
1440                */
1441               h0 = (ethernet_header_t *) b0->data;
1442
1443               /* Compute ingress LB hash */
1444               hash_key = eth_get_key(h0);
1445               hash = (u32)clib_xxhash(hash_key);
1446
1447               if (PREDICT_TRUE (is_pow2(num_workers)))
1448                 next_worker_index += hash & (num_workers - 1);
1449               else
1450                 next_worker_index += hash % num_workers;
1451
1452               /* if EFD is enabled and not already discarding from dpdk,
1453                * check the worker ring/queue for congestion
1454                */
1455               if (PREDICT_FALSE(tm->efd.enabled && !efd_discard_burst))
1456                 {
1457                   vlib_frame_queue_t *fq;
1458
1459                   /* fq will be valid if the ring is congested */
1460                   fq = is_vlib_handoff_queue_congested(
1461                       next_worker_index, tm->efd.queue_hi_thresh,
1462                       congested_handoff_queue_by_worker_index);
1463                   
1464                   if (PREDICT_FALSE(fq != NULL))
1465                     {
1466                       u32 cntr_type;
1467                       if (PREDICT_TRUE(cntr_type =
1468                                        is_efd_discardable(tm, b0, mb)))
1469                         {
1470                           /* discard the packet */
1471                           fq->enqueue_efd_discards++;
1472                           increment_efd_drop_counter(vm, cntr_type, 1);
1473                           rte_pktmbuf_free(mb);
1474                           n_buffers--;
1475                           mb_index++;
1476                           continue;
1477                         }
1478                     }
1479                 }
1480               
1481               if (next_worker_index != current_worker_index)
1482                 {
1483                   if (hf)
1484                     hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1485
1486                   hf = dpdk_get_handoff_queue_elt(
1487                            next_worker_index,
1488                            handoff_queue_elt_by_worker_index);
1489                       
1490                   n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
1491                   to_next_worker = &hf->buffer_index[hf->n_vectors];
1492                   current_worker_index = next_worker_index;
1493                 }
1494               
1495               /* enqueue to correct worker thread */
1496               to_next_worker[0] = bi0;
1497               to_next_worker++;
1498               n_left_to_next_worker--;
1499
1500               if (n_left_to_next_worker == 0)
1501                 {
1502                   hf->n_vectors = VLIB_FRAME_SIZE;
1503                   vlib_put_handoff_queue_elt(hf);
1504                   current_worker_index = ~0;
1505                   handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1506                   hf = 0;
1507                 }
1508                   
1509               n_buffers--;
1510               mb_index++;
1511             }
1512
1513           if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
1514             {
1515               /* credit the trace to the trace node */
1516               dpdk_rx_trace (dm, node_trace, xd, queue_id, xd->d_trace_buffers,
1517                              vec_len (xd->d_trace_buffers));
1518               vlib_set_trace_count (vm, node_trace, n_trace - vec_len (xd->d_trace_buffers));
1519             }
1520
1521           vlib_increment_combined_counter 
1522             (vnet_get_main()->interface_main.combined_sw_if_counters
1523              + VNET_INTERFACE_COUNTER_RX,
1524              cpu_index, 
1525              xd->vlib_sw_if_index,
1526              mb_index, n_rx_bytes);
1527
1528           dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
1529           dw->aggregate_rx_packets += mb_index;
1530         }
1531
1532       if (hf)
1533         hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1534
1535       /* Ship frames to the worker nodes */
1536       for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
1537         {
1538           if (handoff_queue_elt_by_worker_index[i])
1539             {
1540               hf = handoff_queue_elt_by_worker_index[i];
1541               /* 
1542                * It works better to let the handoff node
1543                * rate-adapt, always ship the handoff queue element.
1544                */
1545               if (1 || hf->n_vectors == hf->last_n_vectors)
1546                 {
1547                   vlib_put_handoff_queue_elt(hf);
1548                   handoff_queue_elt_by_worker_index[i] = 0;
1549                 }
1550               else
1551                 hf->last_n_vectors = hf->n_vectors;
1552             }
1553           congested_handoff_queue_by_worker_index[i] = (vlib_frame_queue_t *)(~0);
1554         }
1555       hf = 0;
1556       current_worker_index = ~0;
1557
1558       vlib_increment_main_loop_counter (vm);
1559     }
1560 }
1561
1562 /*
1563  * This function is used when the main thread performs IO and feeds the
1564  * worker threads.
1565  */
1566 static uword
1567 dpdk_io_input (vlib_main_t * vm,
1568                vlib_node_runtime_t * node,
1569                vlib_frame_t * f)
1570 {
1571   dpdk_main_t * dm = &dpdk_main;
1572   dpdk_device_t * xd;
1573   vlib_thread_main_t * tm = vlib_get_thread_main();
1574   uword n_rx_packets = 0;
1575   static vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index;
1576   static vlib_frame_queue_t ** congested_handoff_queue_by_worker_index = 0;
1577   vlib_frame_queue_elt_t * hf = 0;
1578   int i;
1579   u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1580   u32 next_worker_index = 0;
1581   u32 current_worker_index = ~0;
1582   u32 cpu_index = os_get_cpu_number();
1583   static int num_workers_set;
1584   static u32 num_workers;
1585   u16 queue_id = 0;
1586   vlib_node_runtime_t * node_trace;
1587   static u32 first_worker_index;
1588
1589   if (PREDICT_FALSE(num_workers_set == 0))
1590     {
1591       uword * p;
1592       vlib_thread_registration_t * tr;
1593       /* Only the standard vnet worker threads are supported */
1594       p = hash_get_mem (tm->thread_registrations_by_name, "workers");
1595       tr = (vlib_thread_registration_t *) p[0];
1596       if (tr) 
1597         {
1598           num_workers = tr->count;
1599           first_worker_index = tr->first_index;
1600         }
1601       num_workers_set = 1;
1602     }
1603
1604   if (PREDICT_FALSE(handoff_queue_elt_by_worker_index == 0))
1605     {
1606       vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
1607       
1608       vec_validate_init_empty (congested_handoff_queue_by_worker_index,
1609                                first_worker_index + num_workers - 1,
1610                                (vlib_frame_queue_t *)(~0));
1611     }
1612
1613   /* packet tracing is triggered on the dpdk-input node for ease-of-use */
1614   node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
1615
1616   vec_foreach (xd, dm->devices)
1617     {
1618       u32 n_buffers;
1619       u32 mb_index;
1620       uword n_rx_bytes = 0;
1621       u32 n_trace, trace_cnt __attribute__((unused));
1622       vlib_buffer_free_list_t * fl;
1623       u32 hash;
1624       u64 hash_key;
1625       u8 efd_discard_burst = 0;
1626
1627       if (!xd->admin_up)
1628         continue;
1629
1630       n_buffers = dpdk_rx_burst(dm, xd, queue_id );
1631
1632       if (n_buffers == 0)
1633         {
1634           /* check if EFD (dpdk) is enabled */
1635           if (PREDICT_FALSE(dm->efd.enabled))
1636             {
1637               /* reset a few stats */
1638               xd->efd_agent.last_poll_time = 0;
1639               xd->efd_agent.last_burst_sz = 0;
1640             }
1641           continue;
1642         }
1643
1644       vec_reset_length (xd->d_trace_buffers);
1645       trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
1646         
1647       /*
1648        * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
1649        * therefore fake the stop in the dpdk driver by
1650        * silently dropping all of the incoming pkts instead of 
1651        * stopping the driver / hardware.
1652        */
1653       if (PREDICT_FALSE(xd->admin_up != 1))
1654         {
1655           for (mb_index = 0; mb_index < n_buffers; mb_index++)
1656             rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
1657           continue;
1658         }
1659
1660       /* Check for congestion if EFD (Early-Fast-Discard) is enabled
1661        * in any mode (e.g. dpdk, monitor, or drop_all)
1662        */
1663       if (PREDICT_FALSE(dm->efd.enabled))
1664         {
1665           /* update EFD counters */
1666           dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
1667
1668           if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
1669             {
1670               /* discard all received packets */
1671               for (mb_index = 0; mb_index < n_buffers; mb_index++)
1672                 rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
1673
1674               xd->efd_agent.discard_cnt += n_buffers;
1675               increment_efd_drop_counter(vm, 
1676                                          DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1677                                          n_buffers);
1678             
1679               continue;
1680             }
1681           
1682           if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
1683                             dm->efd.consec_full_frames_hi_thresh))
1684             {
1685               u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
1686                                                            queue_id);
1687               if (device_queue_sz >= dm->efd.queue_hi_thresh)
1688                 {
1689                   /* dpdk device queue has reached the critical threshold */
1690                   xd->efd_agent.congestion_cnt++;
1691
1692                   /* apply EFD to packets from the burst */
1693                   efd_discard_burst = 1;
1694                 }
1695             }
1696         }
1697       
1698       fl = vlib_buffer_get_free_list 
1699         (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
1700           
1701       mb_index = 0;
1702
1703       while (n_buffers > 0)
1704         {
1705           u32 bi0;
1706           u8 next0, error0;
1707           u32 l3_offset0;
1708           vlib_buffer_t * b0, * b_seg, * b_chain = 0;
1709           ethernet_header_t * h0;
1710           u8 nb_seg = 1;
1711           struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
1712           struct rte_mbuf *mb_seg = mb->next;
1713
1714           if (PREDICT_TRUE(n_buffers > 1))
1715             {
1716               struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
1717               vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1718               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1719               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1720               CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
1721             }
1722                 
1723           b0 = (vlib_buffer_t *)(mb+1);
1724                 
1725           /* check whether EFD is looking for packets to discard */
1726           if (PREDICT_FALSE(efd_discard_burst))
1727             {
1728               u32 cntr_type;
1729               if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
1730                 {
1731                   rte_pktmbuf_free(mb);
1732                   xd->efd_agent.discard_cnt++;
1733                   increment_efd_drop_counter(vm, 
1734                                              cntr_type,
1735                                              1);
1736
1737                   n_buffers--;
1738                   mb_index++;
1739                   continue;
1740                 }
1741             }
1742
1743           /* Prefetch one next segment if it exists */
1744           if (PREDICT_FALSE(mb->nb_segs > 1))
1745             {
1746               struct rte_mbuf *pfmb = mb->next;
1747               vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
1748               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1749               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1750               b_chain = b0;
1751             }
1752
1753           bi0 = vlib_get_buffer_index (vm, b0);
1754           vlib_buffer_init_for_free_list (b0, fl);
1755           b0->clone_count = 0;
1756
1757           dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
1758                                                    &next0, &error0);
1759 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
1760           /*
1761            * Clear overloaded TX offload flags when a DPDK driver
1762            * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
1763            */
1764           if (PREDICT_TRUE(trace_cnt == 0))
1765             mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1766           else
1767             trace_cnt--;
1768 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
1769
1770           if (error0)
1771             clib_warning ("bi %d error %d", bi0, error0);
1772
1773           b0->error = 0;
1774
1775           l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
1776                          next0 == DPDK_RX_NEXT_IP6_INPUT || 
1777                          next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
1778                         sizeof (ethernet_header_t) : 0);
1779
1780           b0->current_data = l3_offset0;
1781           b0->current_length = mb->data_len - l3_offset0;
1782
1783           b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
1784                 
1785           if (VMWARE_LENGTH_BUG_WORKAROUND)
1786               b0->current_length -= 4;
1787
1788           vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1789           vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
1790           vnet_buffer(b0)->io_handoff.next_index = next0;
1791           n_rx_bytes += mb->pkt_len;
1792
1793           /* Process subsequent segments of multi-segment packets */
1794           while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1795             {
1796               ASSERT(mb_seg != 0);
1797  
1798               b_seg = (vlib_buffer_t *)(mb_seg+1);
1799               vlib_buffer_init_for_free_list (b_seg, fl);
1800               b_seg->clone_count = 0;
1801  
1802               ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1803               ASSERT(b_seg->current_data == 0);
1804  
1805               /*
1806                * The driver (e.g. virtio) may not put the packet data at the start
1807                * of the segment, so don't assume b_seg->current_data == 0 is correct.
1808                */
1809               b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
1810
1811               b_seg->current_length = mb_seg->data_len;
1812               b0->total_length_not_including_first_buffer +=
1813                 mb_seg->data_len;
1814  
1815               b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
1816               b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
1817  
1818               b_chain = b_seg;
1819               mb_seg = mb_seg->next;
1820               nb_seg++;
1821             }
1822  
1823           /*
1824            * Turn this on if you run into
1825            * "bad monkey" contexts, and you want to know exactly
1826            * which nodes they've visited... See main.c...
1827            */
1828           VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
1829  
1830           if (PREDICT_FALSE (n_trace > mb_index))
1831             vec_add1 (xd->d_trace_buffers, bi0);
1832
1833           next_worker_index = first_worker_index;
1834
1835           /* 
1836            * Force unknown traffic onto worker 0, 
1837            * and into ethernet-input. $$$$ add more hashes.
1838            */
1839           h0 = (ethernet_header_t *) b0->data;
1840
1841           /* Compute ingress LB hash */
1842           hash_key = eth_get_key(h0);
1843           hash = (u32)clib_xxhash(hash_key);
1844
1845           if (PREDICT_TRUE (is_pow2(num_workers)))
1846             next_worker_index += hash & (num_workers - 1);
1847           else
1848             next_worker_index += hash % num_workers;
1849
1850           /* if EFD is enabled and not already discarding from dpdk,
1851            * check the worker ring/queue for congestion
1852            */
1853           if (PREDICT_FALSE(tm->efd.enabled && !efd_discard_burst))
1854             {
1855               vlib_frame_queue_t *fq;
1856
1857               /* fq will be valid if the ring is congested */
1858               fq = is_vlib_handoff_queue_congested(
1859                   next_worker_index, tm->efd.queue_hi_thresh,
1860                   congested_handoff_queue_by_worker_index);
1861               
1862               if (PREDICT_FALSE(fq != NULL))
1863                 {
1864                   u32 cntr_type;
1865                   if (PREDICT_TRUE(cntr_type =
1866                                    is_efd_discardable(tm, b0, mb)))
1867                     {
1868                       /* discard the packet */
1869                       fq->enqueue_efd_discards++;
1870                       increment_efd_drop_counter(vm, cntr_type, 1);
1871                       rte_pktmbuf_free(mb);
1872                       n_buffers--;
1873                       mb_index++;
1874                       continue;
1875                     }
1876                 }
1877             }
1878           
1879           if (next_worker_index != current_worker_index)
1880             {
1881               if (hf)
1882                 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1883
1884               hf = dpdk_get_handoff_queue_elt(
1885                      next_worker_index,
1886                      handoff_queue_elt_by_worker_index);
1887
1888               n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
1889               to_next_worker = &hf->buffer_index[hf->n_vectors];
1890               current_worker_index = next_worker_index;
1891             }
1892           
1893           /* enqueue to correct worker thread */
1894           to_next_worker[0] = bi0;
1895           to_next_worker++;
1896           n_left_to_next_worker--;
1897
1898           if (n_left_to_next_worker == 0)
1899             {
1900               hf->n_vectors = VLIB_FRAME_SIZE;
1901               vlib_put_handoff_queue_elt(hf);
1902               current_worker_index = ~0;
1903               handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1904               hf = 0;
1905             }
1906           
1907           n_buffers--;
1908           mb_index++;
1909         }
1910
1911       if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
1912         {
1913           /* credit the trace to the trace node */
1914           dpdk_rx_trace (dm, node_trace, xd, queue_id, xd->d_trace_buffers,
1915                          vec_len (xd->d_trace_buffers));
1916           vlib_set_trace_count (vm, node_trace, n_trace - vec_len (xd->d_trace_buffers));
1917         }
1918
1919       vlib_increment_combined_counter 
1920         (vnet_get_main()->interface_main.combined_sw_if_counters
1921          + VNET_INTERFACE_COUNTER_RX,
1922          cpu_index, 
1923          xd->vlib_sw_if_index,
1924          mb_index, n_rx_bytes);
1925
1926       dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
1927       dw->aggregate_rx_packets += mb_index;
1928       n_rx_packets += mb_index;
1929     }
1930
1931   if (hf)
1932     hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1933   
1934   /* Ship frames to the worker nodes */
1935   for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
1936     {
1937       if (handoff_queue_elt_by_worker_index[i])
1938         {
1939           hf = handoff_queue_elt_by_worker_index[i];
1940           /* 
1941            * It works better to let the handoff node
1942            * rate-adapt, always ship the handoff queue element.
1943            */
1944           if (1 || hf->n_vectors == hf->last_n_vectors)
1945             {
1946               vlib_put_handoff_queue_elt(hf);
1947               handoff_queue_elt_by_worker_index[i] = 0;
1948             }
1949           else
1950             hf->last_n_vectors = hf->n_vectors;
1951         }
1952       congested_handoff_queue_by_worker_index[i] = (vlib_frame_queue_t *)(~0);
1953     }
1954   hf = 0;
1955   current_worker_index = ~0;
1956   return n_rx_packets;
1957 }
1958
1959 VLIB_REGISTER_NODE (dpdk_io_input_node) = {
1960   .function = dpdk_io_input,
1961   .type = VLIB_NODE_TYPE_INPUT,
1962   .name = "dpdk-io-input",
1963
1964   /* Will be enabled if/when hardware is detected. */
1965   .state = VLIB_NODE_STATE_DISABLED,
1966
1967   .format_buffer = format_ethernet_header_with_length,
1968   .format_trace = format_dpdk_rx_dma_trace,
1969
1970   .n_errors = DPDK_N_ERROR,
1971   .error_strings = dpdk_error_strings,
1972
1973   .n_next_nodes = DPDK_RX_N_NEXT,
1974   .next_nodes = {
1975     [DPDK_RX_NEXT_DROP] = "error-drop",
1976     [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
1977     [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1978     [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
1979     [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
1980   },
1981 };
1982
1983 /*
1984  * set_efd_bitmap()
1985  * Based on the operation type, set lower/upper bits for the given index value
1986  */
1987 void
1988 set_efd_bitmap (u8 *bitmap, u32 value, u32 op)
1989 {
1990     int ix;
1991
1992     *bitmap = 0;
1993     for (ix = 0; ix < 8; ix++) {
1994         if (((op == EFD_OPERATION_LESS_THAN) && (ix < value)) ||
1995             ((op == EFD_OPERATION_GREATER_OR_EQUAL) && (ix >= value))){
1996             (*bitmap) |= (1 << ix);
1997         }
1998     }
1999 }
2000
2001 void
2002 efd_config (u32 enabled, 
2003             u32 ip_prec,  u32 ip_op,
2004             u32 mpls_exp, u32 mpls_op,
2005             u32 vlan_cos, u32 vlan_op)
2006 {
2007    vlib_thread_main_t * tm = vlib_get_thread_main();
2008    dpdk_main_t * dm = &dpdk_main;
2009
2010    if (enabled) {
2011        tm->efd.enabled |= VLIB_EFD_DISCARD_ENABLED;
2012        dm->efd.enabled |= DPDK_EFD_DISCARD_ENABLED;
2013    } else {
2014        tm->efd.enabled &= ~VLIB_EFD_DISCARD_ENABLED;
2015        dm->efd.enabled &= ~DPDK_EFD_DISCARD_ENABLED;
2016    }
2017
2018    set_efd_bitmap(&tm->efd.ip_prec_bitmap, ip_prec, ip_op);
2019    set_efd_bitmap(&tm->efd.mpls_exp_bitmap, mpls_exp, mpls_op);
2020    set_efd_bitmap(&tm->efd.vlan_cos_bitmap, vlan_cos, vlan_op);
2021
2022 }