Add verbose flag for packet trace, show hexdump in verbose dpdk input trace
[vpp.git] / vnet / vnet / devices / dpdk / node.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/error.h>
18 #include <vppinfra/format.h>
19 #include <vppinfra/xxhash.h>
20
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/dpdk/dpdk.h>
23 #include <vnet/classify/vnet_classify.h>
24 #include <vnet/mpls-gre/packet.h>
25
26 #include "dpdk_priv.h"
27
28 #ifndef MAX
29 #define MAX(a,b) ((a) < (b) ? (b) : (a))
30 #endif
31
32 #ifndef MIN
33 #define MIN(a,b) ((a) < (b) ? (a) : (b))
34 #endif
35
36 /*
37  * At least in certain versions of ESXi, vmware e1000's don't honor the
38  * "strip rx CRC" bit. Set this flag to work around that bug FOR UNIT TEST ONLY.
39  *
40  * If wireshark complains like so:
41  *
42  * "Frame check sequence: 0x00000000 [incorrect, should be <hex-num>]"
43  * and you're using ESXi emulated e1000's, set this flag FOR UNIT TEST ONLY.
44  *
45  * Note: do NOT check in this file with this workaround enabled! You'll lose
46  * actual data from e.g. 10xGE interfaces. The extra 4 bytes annoy
47  * wireshark, but they're harmless...
48  */
49 #define VMWARE_LENGTH_BUG_WORKAROUND 0
50
51 typedef struct {
52   u32 cached_next_index;
53
54   /* convenience variables */
55   vlib_main_t * vlib_main;
56   vnet_main_t * vnet_main;
57 } handoff_dispatch_main_t;
58
59 typedef struct {
60   u32 buffer_index;
61   u32 next_index;
62   u32 sw_if_index;
63 } handoff_dispatch_trace_t;
64
65 /* packet trace format function */
66 static u8 * format_handoff_dispatch_trace (u8 * s, va_list * args)
67 {
68   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
69   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
70   handoff_dispatch_trace_t * t = va_arg (*args, handoff_dispatch_trace_t *);
71
72   s = format (s, "HANDOFF_DISPATCH: sw_if_index %d next_index %d buffer 0x%x",
73       t->sw_if_index,
74       t->next_index,
75       t->buffer_index);
76   return s;
77 }
78
79 handoff_dispatch_main_t handoff_dispatch_main;
80
81 vlib_node_registration_t handoff_dispatch_node;
82
83 #define foreach_handoff_dispatch_error \
84 _(EXAMPLE, "example packets")
85
86 typedef enum {
87 #define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym,
88   foreach_handoff_dispatch_error
89 #undef _
90   HANDOFF_DISPATCH_N_ERROR,
91 } handoff_dispatch_error_t;
92
93 static char * handoff_dispatch_error_strings[] = {
94 #define _(sym,string) string,
95   foreach_handoff_dispatch_error
96 #undef _
97 };
98
99 static inline
100 void vlib_put_handoff_queue_elt (vlib_frame_queue_elt_t * hf)
101 {
102   CLIB_MEMORY_BARRIER();
103   hf->valid = 1;
104 }
105
106 static uword
107 handoff_dispatch_node_fn (vlib_main_t * vm,
108                   vlib_node_runtime_t * node,
109                   vlib_frame_t * frame)
110 {
111   u32 n_left_from, * from, * to_next;
112   dpdk_rx_next_t next_index;
113
114   from = vlib_frame_vector_args (frame);
115   n_left_from = frame->n_vectors;
116   next_index = node->cached_next_index;
117
118   while (n_left_from > 0)
119     {
120       u32 n_left_to_next;
121
122       vlib_get_next_frame (vm, node, next_index,
123                            to_next, n_left_to_next);
124
125       while (n_left_from >= 4 && n_left_to_next >= 2)
126         {
127           u32 bi0, bi1;
128           vlib_buffer_t * b0, * b1;
129           u32 next0, next1;
130           u32 sw_if_index0, sw_if_index1;
131           
132           /* Prefetch next iteration. */
133           {
134             vlib_buffer_t * p2, * p3;
135             
136             p2 = vlib_get_buffer (vm, from[2]);
137             p3 = vlib_get_buffer (vm, from[3]);
138             
139             vlib_prefetch_buffer_header (p2, LOAD);
140             vlib_prefetch_buffer_header (p3, LOAD);
141           }
142
143           /* speculatively enqueue b0 and b1 to the current next frame */
144           to_next[0] = bi0 = from[0];
145           to_next[1] = bi1 = from[1];
146           from += 2;
147           to_next += 2;
148           n_left_from -= 2;
149           n_left_to_next -= 2;
150
151           b0 = vlib_get_buffer (vm, bi0);
152           b1 = vlib_get_buffer (vm, bi1);
153
154           next0 = vnet_buffer(b0)->io_handoff.next_index;
155           next1 = vnet_buffer(b1)->io_handoff.next_index;
156
157           if (PREDICT_FALSE(vm->trace_main.trace_active_hint))
158             {
159             if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
160               {
161                 vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
162                 handoff_dispatch_trace_t *t =
163                   vlib_add_trace (vm, node, b0, sizeof (*t));
164                 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
165                 t->sw_if_index = sw_if_index0;
166                 t->next_index = next0;
167                 t->buffer_index = bi0;
168               }
169             if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
170               {
171                 vlib_trace_buffer (vm, node, next1, b1, /* follow_chain */ 0);
172                 handoff_dispatch_trace_t *t =
173                   vlib_add_trace (vm, node, b1, sizeof (*t));
174                 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
175                 t->sw_if_index = sw_if_index1;
176                 t->next_index = next1;
177                 t->buffer_index = bi1;
178               }
179             }
180             
181           /* verify speculative enqueues, maybe switch current next frame */
182           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
183                                            to_next, n_left_to_next,
184                                            bi0, bi1, next0, next1);
185         }
186       
187       while (n_left_from > 0 && n_left_to_next > 0)
188         {
189           u32 bi0;
190           vlib_buffer_t * b0;
191           u32 next0;
192           u32 sw_if_index0;
193
194           /* speculatively enqueue b0 to the current next frame */
195           bi0 = from[0];
196           to_next[0] = bi0;
197           from += 1;
198           to_next += 1;
199           n_left_from -= 1;
200           n_left_to_next -= 1;
201
202           b0 = vlib_get_buffer (vm, bi0);
203
204           next0 = vnet_buffer(b0)->io_handoff.next_index;
205
206           if (PREDICT_FALSE(vm->trace_main.trace_active_hint))
207             {
208             if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
209               {
210                 vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
211                 handoff_dispatch_trace_t *t =
212                   vlib_add_trace (vm, node, b0, sizeof (*t));
213                 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
214                 t->sw_if_index = sw_if_index0;
215                 t->next_index = next0;
216                 t->buffer_index = bi0;
217               }
218             }
219
220           /* verify speculative enqueue, maybe switch current next frame */
221           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
222                                            to_next, n_left_to_next,
223                                            bi0, next0);
224         }
225
226       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
227     }
228
229   return frame->n_vectors;
230 }
231
232 VLIB_REGISTER_NODE (handoff_dispatch_node) = {
233   .function = handoff_dispatch_node_fn,
234   .name = "handoff-dispatch",
235   .vector_size = sizeof (u32),
236   .format_trace = format_handoff_dispatch_trace,
237   .type = VLIB_NODE_TYPE_INTERNAL,
238   .flags = VLIB_NODE_FLAG_IS_HANDOFF,
239   
240   .n_errors = ARRAY_LEN(handoff_dispatch_error_strings),
241   .error_strings = handoff_dispatch_error_strings,
242
243   .n_next_nodes = DPDK_RX_N_NEXT,
244
245   .next_nodes = {
246         [DPDK_RX_NEXT_DROP] = "error-drop",
247         [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
248         [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input",
249         [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
250         [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
251   },
252 };
253
254 VLIB_NODE_FUNCTION_MULTIARCH (handoff_dispatch_node, handoff_dispatch_node_fn)
255
256 clib_error_t *handoff_dispatch_init (vlib_main_t *vm)
257 {
258   handoff_dispatch_main_t * mp = &handoff_dispatch_main;
259     
260   mp->vlib_main = vm;
261   mp->vnet_main = &vnet_main;
262
263   return 0;
264 }
265
266 VLIB_INIT_FUNCTION (handoff_dispatch_init);
267
268 u32 dpdk_get_handoff_node_index (void)
269 {
270   return handoff_dispatch_node.index;
271 }
272
273 static char * dpdk_error_strings[] = {
274 #define _(n,s) s,
275     foreach_dpdk_error
276 #undef _
277 };
278
279 always_inline void
280 dpdk_rx_next_and_error_from_mb_flags_x1 (dpdk_device_t *xd, struct rte_mbuf *mb,
281                                          vlib_buffer_t *b0,
282                                          u8 * next0, u8 * error0)
283 {
284   u8 is0_ip4, is0_ip6, is0_mpls, n0;
285   uint16_t mb_flags = mb->ol_flags;
286
287   if (PREDICT_FALSE(mb_flags & (
288 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
289        PKT_EXT_RX_PKT_ERROR | PKT_EXT_RX_BAD_FCS   |
290 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
291         PKT_RX_IP_CKSUM_BAD  | PKT_RX_L4_CKSUM_BAD
292     ))) 
293     {
294       /* some error was flagged. determine the drop reason */ 
295       n0 = DPDK_RX_NEXT_DROP;
296       *error0 = 
297 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
298         (mb_flags & PKT_EXT_RX_PKT_ERROR) ? DPDK_ERROR_RX_PACKET_ERROR : 
299         (mb_flags & PKT_EXT_RX_BAD_FCS) ? DPDK_ERROR_RX_BAD_FCS : 
300 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
301         (mb_flags & PKT_RX_IP_CKSUM_BAD) ? DPDK_ERROR_IP_CHECKSUM_ERROR : 
302         (mb_flags & PKT_RX_L4_CKSUM_BAD) ? DPDK_ERROR_L4_CHECKSUM_ERROR : 
303         DPDK_ERROR_NONE;
304     }
305   else
306     {
307       *error0 = DPDK_ERROR_NONE;
308       if (PREDICT_FALSE(xd->per_interface_next_index != ~0))
309         n0 = xd->per_interface_next_index;
310       else if (PREDICT_FALSE(xd->vlan_subifs || (mb_flags & PKT_RX_VLAN_PKT)))
311         n0 = DPDK_RX_NEXT_ETHERNET_INPUT;
312       else
313         {
314           n0 = DPDK_RX_NEXT_ETHERNET_INPUT;
315 #if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0)
316           is0_ip4 = RTE_ETH_IS_IPV4_HDR(mb->packet_type) != 0;
317 #else
318           is0_ip4 = (mb_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV4_HDR_EXT)) != 0;
319 #endif
320
321           if (PREDICT_TRUE(is0_ip4))
322             n0 = DPDK_RX_NEXT_IP4_INPUT;
323           else
324             {
325 #if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0)
326               is0_ip6 = RTE_ETH_IS_IPV6_HDR(mb->packet_type) != 0;
327 #else
328               is0_ip6 = 
329                       (mb_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) != 0;
330 #endif
331               if (PREDICT_TRUE(is0_ip6))
332                 n0 = DPDK_RX_NEXT_IP6_INPUT;
333               else
334                 {
335                   ethernet_header_t *h0 = (ethernet_header_t *) b0->data;
336                   is0_mpls = (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST));
337                   n0 = is0_mpls ? DPDK_RX_NEXT_MPLS_INPUT : n0;
338                 }
339             }
340         }
341     }
342   *next0 = n0;
343 }
344
345 void dpdk_rx_trace (dpdk_main_t * dm,
346                     vlib_node_runtime_t * node,
347                     dpdk_device_t * xd,
348                     u16 queue_id,
349                     u32 * buffers,
350                     uword n_buffers)
351 {
352   vlib_main_t * vm = vlib_get_main();
353   u32 * b, n_left;
354   u8 next0;
355
356   n_left = n_buffers;
357   b = buffers;
358
359   while (n_left >= 1)
360     {
361       u32 bi0;
362       vlib_buffer_t * b0;
363       dpdk_rx_dma_trace_t * t0;
364       struct rte_mbuf *mb;
365       u8 error0;
366
367       bi0 = b[0];
368       n_left -= 1;
369
370       b0 = vlib_get_buffer (vm, bi0);
371       mb = rte_mbuf_from_vlib_buffer(b0);
372       dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
373                                                &next0, &error0);
374       vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
375       t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
376       t0->queue_index = queue_id;
377       t0->device_index = xd->device_index;
378       t0->buffer_index = bi0;
379
380       clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
381       clib_memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
382       clib_memcpy (t0->buffer.pre_data, b0->data, sizeof (t0->buffer.pre_data));
383       clib_memcpy (&t0->data, mb->buf_addr + mb->data_off, sizeof (t0->data));
384
385 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
386       /*
387        * Clear overloaded TX offload flags when a DPDK driver
388        * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
389        */
390       mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
391 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
392
393       b += 1;
394     }
395 }
396
397 /*
398  * dpdk_efd_update_counters()
399  * Update EFD (early-fast-discard) counters
400  */
401 void dpdk_efd_update_counters (dpdk_device_t *xd,
402                                u32 n_buffers,
403                                u16 enabled)
404 {
405   if (enabled & DPDK_EFD_MONITOR_ENABLED)
406     {
407       u64 now = clib_cpu_time_now();
408       if (xd->efd_agent.last_poll_time > 0)
409         {
410           u64 elapsed_time = (now - xd->efd_agent.last_poll_time);
411           if (elapsed_time > xd->efd_agent.max_poll_delay)
412             xd->efd_agent.max_poll_delay = elapsed_time;
413         }
414       xd->efd_agent.last_poll_time = now;
415     }
416   
417   xd->efd_agent.total_packet_cnt += n_buffers;
418   xd->efd_agent.last_burst_sz = n_buffers;
419
420   if (n_buffers > xd->efd_agent.max_burst_sz)
421     xd->efd_agent.max_burst_sz = n_buffers;
422
423   if (PREDICT_FALSE(n_buffers == VLIB_FRAME_SIZE))
424     {
425       xd->efd_agent.full_frames_cnt++;
426       xd->efd_agent.consec_full_frames_cnt++;
427     }
428   else
429     {
430       xd->efd_agent.consec_full_frames_cnt = 0;
431     }
432 }
433
434 /* is_efd_discardable()
435  *   returns non zero DPDK error if packet meets early-fast-discard criteria,
436  *           zero otherwise
437  */
438 u32 is_efd_discardable (vlib_thread_main_t *tm,
439                         vlib_buffer_t * b0,
440                         struct rte_mbuf *mb)
441 {
442   ethernet_header_t *eh = (ethernet_header_t *) b0->data;
443
444   if (eh->type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
445     {
446       ip4_header_t *ipv4 =
447           (ip4_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
448       u8 pkt_prec = (ipv4->tos >> 5);
449           
450       return (tm->efd.ip_prec_bitmap & (1 << pkt_prec) ?
451                   DPDK_ERROR_IPV4_EFD_DROP_PKTS : DPDK_ERROR_NONE);
452     }
453   else if (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_IP6))
454     {
455       ip6_header_t *ipv6 =
456           (ip6_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
457       u8 pkt_tclass =
458           ((ipv6->ip_version_traffic_class_and_flow_label >> 20) & 0xff);
459           
460       return (tm->efd.ip_prec_bitmap & (1 << pkt_tclass) ?
461                   DPDK_ERROR_IPV6_EFD_DROP_PKTS : DPDK_ERROR_NONE);
462     }
463   else if (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_MPLS_UNICAST))
464     {
465       mpls_unicast_header_t *mpls =
466           (mpls_unicast_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
467       u8 pkt_exp = ((mpls->label_exp_s_ttl >> 9) & 0x07);
468
469       return (tm->efd.mpls_exp_bitmap & (1 << pkt_exp) ?
470                   DPDK_ERROR_MPLS_EFD_DROP_PKTS : DPDK_ERROR_NONE);
471     }
472   else if ((eh->type == clib_net_to_host_u16(ETHERNET_TYPE_VLAN)) ||
473            (eh->type == clib_net_to_host_u16(ETHERNET_TYPE_DOT1AD)))
474     {
475       ethernet_vlan_header_t *vlan =
476           (ethernet_vlan_header_t *)&(b0->data[sizeof(ethernet_header_t)]);
477       u8 pkt_cos = ((vlan->priority_cfi_and_id >> 13) & 0x07);
478
479       return (tm->efd.vlan_cos_bitmap & (1 << pkt_cos) ?
480                   DPDK_ERROR_VLAN_EFD_DROP_PKTS : DPDK_ERROR_NONE);
481     }
482
483   return DPDK_ERROR_NONE;
484 }
485
486 /*
487  * This function is used when there are no worker threads.
488  * The main thread performs IO and forwards the packets. 
489  */
490 static inline u32 dpdk_device_input ( dpdk_main_t * dm, 
491                                       dpdk_device_t * xd,
492                                       vlib_node_runtime_t * node,
493                                       u32 cpu_index,
494                                       u16 queue_id,
495                                       int use_efd)
496 {
497   u32 n_buffers;
498   u32 next_index = DPDK_RX_NEXT_ETHERNET_INPUT;
499   u32 n_left_to_next, * to_next;
500   u32 mb_index;
501   vlib_main_t * vm = vlib_get_main();
502   uword n_rx_bytes = 0;
503   u32 n_trace, trace_cnt __attribute__((unused));
504   vlib_buffer_free_list_t * fl;
505   u8 efd_discard_burst = 0;
506   u32 buffer_flags_template;
507   
508   if (xd->admin_up == 0)
509     return 0;
510
511   n_buffers = dpdk_rx_burst(dm, xd, queue_id);
512
513   if (n_buffers == 0)
514     {
515       /* check if EFD (dpdk) is enabled */
516       if (PREDICT_FALSE(use_efd && dm->efd.enabled))
517         {
518           /* reset a few stats */
519           xd->efd_agent.last_poll_time = 0;
520           xd->efd_agent.last_burst_sz = 0;
521         }
522       return 0;
523     }
524
525   buffer_flags_template = dm->buffer_flags_template;
526
527   vec_reset_length (xd->d_trace_buffers);
528   trace_cnt = n_trace = vlib_get_trace_count (vm, node);
529
530   fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
531
532   /*
533    * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
534    * therefore fake the stop in the dpdk driver by
535    * silently dropping all of the incoming pkts instead of 
536    * stopping the driver / hardware.
537    */
538   if (PREDICT_FALSE(xd->admin_up != 1))
539     {
540       for (mb_index = 0; mb_index < n_buffers; mb_index++)
541         rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
542       
543       return 0;
544     }
545
546   /* Check for congestion if EFD (Early-Fast-Discard) is enabled
547    * in any mode (e.g. dpdk, monitor, or drop_all)
548    */
549   if (PREDICT_FALSE(use_efd && dm->efd.enabled))
550     {
551       /* update EFD counters */
552       dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
553
554       if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
555         {
556           /* discard all received packets */
557           for (mb_index = 0; mb_index < n_buffers; mb_index++)
558             rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
559
560           xd->efd_agent.discard_cnt += n_buffers;
561           increment_efd_drop_counter(vm, 
562                                      DPDK_ERROR_VLAN_EFD_DROP_PKTS,
563                                      n_buffers);
564
565           return 0;
566         }
567       
568       if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
569                         dm->efd.consec_full_frames_hi_thresh))
570         {
571           u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
572                                                        queue_id);
573           if (device_queue_sz >= dm->efd.queue_hi_thresh)
574             {
575               /* dpdk device queue has reached the critical threshold */
576               xd->efd_agent.congestion_cnt++;
577
578               /* apply EFD to packets from the burst */
579               efd_discard_burst = 1;
580             }
581         }
582     }
583   
584   mb_index = 0;
585
586   while (n_buffers > 0)
587     {
588       u32 bi0;
589       u8 next0, error0;
590       u32 l3_offset0;
591       vlib_buffer_t * b0, * b_seg, * b_chain = 0;
592       u32 cntr_type;
593
594       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
595
596       while (n_buffers > 0 && n_left_to_next > 0)
597         {
598           u8 nb_seg = 1;
599           struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
600           struct rte_mbuf *mb_seg = mb->next;
601
602           if (PREDICT_TRUE(n_buffers > 2))
603           {
604               struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
605               vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
606               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, STORE);
607               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
608           }
609
610           ASSERT(mb);
611
612           b0 = vlib_buffer_from_rte_mbuf(mb);
613
614           /* check whether EFD is looking for packets to discard */
615           if (PREDICT_FALSE(efd_discard_burst))
616             {
617               vlib_thread_main_t * tm = vlib_get_thread_main();
618
619               if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
620                 {
621                   rte_pktmbuf_free(mb);
622                   xd->efd_agent.discard_cnt++;
623                   increment_efd_drop_counter(vm, 
624                                              cntr_type,
625                                              1);
626                   n_buffers--;
627                   mb_index++;
628                   continue;
629                 }
630             }
631
632           /* Prefetch one next segment if it exists. */
633           if (PREDICT_FALSE(mb->nb_segs > 1))
634             {
635               struct rte_mbuf *pfmb = mb->next;
636               vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
637               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
638               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
639               b_chain = b0;
640             }
641
642           vlib_buffer_init_for_free_list (b0, fl);
643           b0->clone_count = 0;
644           
645           bi0 = vlib_get_buffer_index (vm, b0);
646
647           to_next[0] = bi0;
648           to_next++;
649           n_left_to_next--;
650           
651           dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
652                                                    &next0, &error0);
653 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
654           /*
655            * Clear overloaded TX offload flags when a DPDK driver
656            * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
657            */
658
659           if (PREDICT_TRUE(trace_cnt == 0))
660             mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
661           else
662             trace_cnt--;
663 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
664
665           b0->error = node->errors[error0];
666
667           l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
668                          next0 == DPDK_RX_NEXT_IP6_INPUT ||
669                          next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
670                         sizeof (ethernet_header_t) : 0);
671
672           b0->current_data = l3_offset0;
673           /* Some drivers like fm10k receive frames with
674              mb->data_off > RTE_PKTMBUF_HEADROOM */
675           b0->current_data += mb->data_off - RTE_PKTMBUF_HEADROOM;
676           b0->current_length = mb->data_len - l3_offset0;
677
678           b0->flags = buffer_flags_template;
679
680           if (VMWARE_LENGTH_BUG_WORKAROUND)
681               b0->current_length -= 4;
682
683           vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
684           vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
685           n_rx_bytes += mb->pkt_len;
686
687           /* Process subsequent segments of multi-segment packets */
688           while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
689             {
690               ASSERT(mb_seg != 0);
691
692               b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
693               vlib_buffer_init_for_free_list (b_seg, fl);
694               b_seg->clone_count = 0;
695
696               ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
697               ASSERT(b_seg->current_data == 0);
698
699               /*
700                * The driver (e.g. virtio) may not put the packet data at the start
701                * of the segment, so don't assume b_seg->current_data == 0 is correct.
702                */
703               b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
704
705               b_seg->current_length = mb_seg->data_len;
706               b0->total_length_not_including_first_buffer +=
707                 mb_seg->data_len;
708
709               b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
710               b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
711
712               b_chain = b_seg;
713               mb_seg = mb_seg->next;
714               nb_seg++;
715             } 
716
717           /*
718            * Turn this on if you run into
719            * "bad monkey" contexts, and you want to know exactly
720            * which nodes they've visited... See main.c...
721            */
722           VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
723
724           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
725                                            to_next, n_left_to_next,
726                                            bi0, next0);
727           if (PREDICT_FALSE (n_trace > mb_index))
728             vec_add1 (xd->d_trace_buffers, bi0);
729           n_buffers--;
730           mb_index++;
731         }
732       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
733     }
734
735   if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
736     {
737       dpdk_rx_trace (dm, node, xd, queue_id, xd->d_trace_buffers,
738                      vec_len (xd->d_trace_buffers));
739       vlib_set_trace_count (vm, node, n_trace - vec_len (xd->d_trace_buffers));
740     }
741   
742   vlib_increment_combined_counter 
743     (vnet_get_main()->interface_main.combined_sw_if_counters
744      + VNET_INTERFACE_COUNTER_RX,
745      cpu_index, 
746      xd->vlib_sw_if_index,
747      mb_index, n_rx_bytes);
748
749   dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
750   dw->aggregate_rx_packets += mb_index;
751
752   return mb_index;
753 }
754
755 static inline void poll_rate_limit(dpdk_main_t * dm)
756 {
757   /* Limit the poll rate by sleeping for N msec between polls */
758   if (PREDICT_FALSE (dm->poll_sleep != 0))
759   {
760     struct timespec ts, tsrem;
761
762     ts.tv_sec = 0;
763     ts.tv_nsec = 1000*1000*dm->poll_sleep; /* 1ms */
764
765     while (nanosleep(&ts, &tsrem) < 0)
766       {
767         ts = tsrem;
768       }
769   }
770 }
771
772 static uword
773 dpdk_input (vlib_main_t * vm,
774             vlib_node_runtime_t * node,
775             vlib_frame_t * f)
776 {
777   dpdk_main_t * dm = &dpdk_main;
778   dpdk_device_t * xd;
779   uword n_rx_packets = 0;
780   dpdk_device_and_queue_t * dq;
781   u32 cpu_index = os_get_cpu_number();
782
783   /*
784    * Poll all devices on this cpu for input/interrupts.
785    */
786   vec_foreach (dq, dm->devices_by_cpu[cpu_index])
787     {
788       xd = vec_elt_at_index(dm->devices, dq->device);
789       ASSERT(dq->queue_id == 0);
790       n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, 0, 0);
791     }
792
793   poll_rate_limit(dm);
794
795   return n_rx_packets;
796 }
797
798 uword
799 dpdk_input_rss (vlib_main_t * vm,
800       vlib_node_runtime_t * node,
801       vlib_frame_t * f)
802 {
803   dpdk_main_t * dm = &dpdk_main;
804   dpdk_device_t * xd;
805   uword n_rx_packets = 0;
806   dpdk_device_and_queue_t * dq;
807   u32 cpu_index = os_get_cpu_number();
808
809   /*
810    * Poll all devices on this cpu for input/interrupts.
811    */
812   vec_foreach (dq, dm->devices_by_cpu[cpu_index])
813     {
814       xd = vec_elt_at_index(dm->devices, dq->device);
815       n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, 0);
816     }
817
818   poll_rate_limit(dm);
819
820   return n_rx_packets;
821 }
822
823 uword
824 dpdk_input_efd (vlib_main_t * vm,
825       vlib_node_runtime_t * node,
826       vlib_frame_t * f)
827 {
828   dpdk_main_t * dm = &dpdk_main;
829   dpdk_device_t * xd;
830   uword n_rx_packets = 0;
831   dpdk_device_and_queue_t * dq;
832   u32 cpu_index = os_get_cpu_number();
833
834   /*
835    * Poll all devices on this cpu for input/interrupts.
836    */
837   vec_foreach (dq, dm->devices_by_cpu[cpu_index])
838     {
839       xd = vec_elt_at_index(dm->devices, dq->device);
840       n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, 1);
841     }
842
843   poll_rate_limit(dm);
844
845   return n_rx_packets;
846 }
847
848
849 VLIB_REGISTER_NODE (dpdk_input_node) = {
850   .function = dpdk_input,
851   .type = VLIB_NODE_TYPE_INPUT,
852   .name = "dpdk-input",
853
854   /* Will be enabled if/when hardware is detected. */
855   .state = VLIB_NODE_STATE_DISABLED,
856
857   .format_buffer = format_ethernet_header_with_length,
858   .format_trace = format_dpdk_rx_dma_trace,
859
860   .n_errors = DPDK_N_ERROR,
861   .error_strings = dpdk_error_strings,
862
863   .n_next_nodes = DPDK_RX_N_NEXT,
864   .next_nodes = {
865     [DPDK_RX_NEXT_DROP] = "error-drop",
866     [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
867     [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
868     [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
869     [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
870   },
871 };
872
873
874 /* handle dpdk_input_rss alternative function */
875 VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input)
876 VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input_rss)
877 VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input_efd)
878
879 /* this macro defines dpdk_input_rss_multiarch_select() */
880 CLIB_MULTIARCH_SELECT_FN(dpdk_input);
881 CLIB_MULTIARCH_SELECT_FN(dpdk_input_rss);
882 CLIB_MULTIARCH_SELECT_FN(dpdk_input_efd);
883
884 /*
885  * Override the next nodes for the dpdk input nodes.
886  * Must be invoked prior to VLIB_INIT_FUNCTION calls.
887  */
888 void dpdk_set_next_node (dpdk_rx_next_t next, char *name)
889 {
890   vlib_node_registration_t *r = &dpdk_input_node;
891   vlib_node_registration_t *r_io = &dpdk_io_input_node;
892   vlib_node_registration_t *r_handoff = &handoff_dispatch_node;
893
894   switch (next)
895     {
896     case DPDK_RX_NEXT_IP4_INPUT:
897     case DPDK_RX_NEXT_IP6_INPUT:
898     case DPDK_RX_NEXT_MPLS_INPUT:
899     case DPDK_RX_NEXT_ETHERNET_INPUT:
900       r->next_nodes[next] = name;
901       r_io->next_nodes[next] = name;
902       r_handoff->next_nodes[next] = name;
903       break;
904
905     default:
906       clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
907       break;
908     }
909 }
910
911 inline vlib_frame_queue_elt_t * 
912 vlib_get_handoff_queue_elt (u32 vlib_worker_index) 
913 {
914   vlib_frame_queue_t *fq;
915   vlib_frame_queue_elt_t *elt;
916   u64 new_tail;
917   
918   fq = vlib_frame_queues[vlib_worker_index];
919   ASSERT (fq);
920
921   new_tail = __sync_add_and_fetch (&fq->tail, 1);
922
923   /* Wait until a ring slot is available */
924   while (new_tail >= fq->head_hint + fq->nelts)
925       vlib_worker_thread_barrier_check ();
926
927   elt = fq->elts + (new_tail & (fq->nelts-1));
928
929   /* this would be very bad... */
930   while (elt->valid) 
931     ;
932
933   elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME;
934   elt->last_n_vectors = elt->n_vectors = 0;
935
936   return elt;
937 }
938
939 static inline vlib_frame_queue_elt_t *
940 dpdk_get_handoff_queue_elt ( 
941     u32 vlib_worker_index, 
942     vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index)
943 {
944   vlib_frame_queue_elt_t *elt;
945
946   if (handoff_queue_elt_by_worker_index [vlib_worker_index])
947       return handoff_queue_elt_by_worker_index [vlib_worker_index];
948
949   elt = vlib_get_handoff_queue_elt (vlib_worker_index);
950
951   handoff_queue_elt_by_worker_index [vlib_worker_index] = elt;
952
953   return elt;
954 }
955
956 static inline vlib_frame_queue_t *
957 is_vlib_handoff_queue_congested (
958     u32 vlib_worker_index,
959     u32 queue_hi_thresh,
960     vlib_frame_queue_t ** handoff_queue_by_worker_index)
961 {
962   vlib_frame_queue_t *fq;
963
964   fq = handoff_queue_by_worker_index [vlib_worker_index];
965   if (fq != (vlib_frame_queue_t *)(~0)) 
966       return fq;
967   
968   fq = vlib_frame_queues[vlib_worker_index];
969   ASSERT (fq);
970
971   if (PREDICT_FALSE(fq->tail >= (fq->head_hint + queue_hi_thresh))) {
972     /* a valid entry in the array will indicate the queue has reached
973      * the specified threshold and is congested
974      */
975     handoff_queue_by_worker_index [vlib_worker_index] = fq;
976     fq->enqueue_full_events++;
977     return fq;
978   }
979
980   return NULL;
981 }
982
983 static inline u64 ipv4_get_key (ip4_header_t *ip)
984 {
985    u64  hash_key;
986
987    hash_key = *((u64*)(&ip->address_pair)) ^ ip->protocol;
988
989    return hash_key;
990 }
991
992 static inline u64 ipv6_get_key (ip6_header_t *ip)
993 {
994    u64  hash_key;
995
996    hash_key = ip->src_address.as_u64[0] ^
997               rotate_left(ip->src_address.as_u64[1],13) ^
998               rotate_left(ip->dst_address.as_u64[0],26) ^
999               rotate_left(ip->dst_address.as_u64[1],39) ^
1000               ip->protocol;
1001
1002    return hash_key;
1003 }
1004
1005
1006 #define MPLS_BOTTOM_OF_STACK_BIT_MASK   0x00000100U
1007 #define MPLS_LABEL_MASK                 0xFFFFF000U
1008
1009 static inline u64 mpls_get_key (mpls_unicast_header_t *m)
1010 {
1011    u64                     hash_key;
1012    u8                      ip_ver;
1013
1014
1015    /* find the bottom of the MPLS label stack. */
1016    if (PREDICT_TRUE(m->label_exp_s_ttl & 
1017                     clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
1018        goto bottom_lbl_found;
1019    }
1020    m++;
1021
1022    if (PREDICT_TRUE(m->label_exp_s_ttl & 
1023                     clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK))) {
1024        goto bottom_lbl_found;
1025    }
1026    m++;
1027
1028    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1029        goto bottom_lbl_found;
1030    }
1031    m++;
1032
1033    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1034        goto bottom_lbl_found;
1035    }
1036    m++;
1037
1038    if (m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_BOTTOM_OF_STACK_BIT_MASK)) {
1039        goto bottom_lbl_found;
1040    }
1041    
1042    /* the bottom label was not found - use the last label */
1043    hash_key = m->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
1044
1045    return hash_key;
1046    
1047
1048 bottom_lbl_found:
1049    m++;
1050    ip_ver = (*((u8 *)m) >> 4);
1051
1052    /* find out if it is IPV4 or IPV6 header */
1053    if (PREDICT_TRUE(ip_ver == 4)) {
1054        hash_key = ipv4_get_key((ip4_header_t *)m);
1055    } else if (PREDICT_TRUE(ip_ver == 6)) {
1056        hash_key = ipv6_get_key((ip6_header_t *)m);
1057    } else {
1058        /* use the bottom label */
1059        hash_key = (m-1)->label_exp_s_ttl & clib_net_to_host_u32(MPLS_LABEL_MASK);
1060    }
1061
1062    return hash_key;
1063
1064 }
1065
1066 static inline u64 eth_get_key (ethernet_header_t *h0)
1067 {
1068    u64 hash_key;
1069
1070
1071    if (PREDICT_TRUE(h0->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
1072        hash_key = ipv4_get_key((ip4_header_t *)(h0+1));
1073    } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_IP6)) {
1074        hash_key = ipv6_get_key((ip6_header_t *)(h0+1));
1075    } else if (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1076        hash_key = mpls_get_key((mpls_unicast_header_t *)(h0+1));
1077    } else if ((h0->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) || 
1078               (h0->type == clib_host_to_net_u16(ETHERNET_TYPE_DOT1AD))) {
1079        ethernet_vlan_header_t * outer = (ethernet_vlan_header_t *)(h0 + 1);
1080        
1081        outer = (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ? 
1082                                   outer+1 : outer;
1083        if (PREDICT_TRUE(outer->type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
1084            hash_key = ipv4_get_key((ip4_header_t *)(outer+1));
1085        } else if (outer->type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)) {
1086            hash_key = ipv6_get_key((ip6_header_t *)(outer+1));
1087        } else if (outer->type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1088            hash_key = mpls_get_key((mpls_unicast_header_t *)(outer+1));
1089        }  else {
1090            hash_key = outer->type; 
1091        }
1092    } else {
1093        hash_key  = 0;
1094    }
1095
1096    return hash_key;
1097 }
1098
1099 /*
1100  * This function is used when dedicated IO threads feed the worker threads.
1101  *
1102  * Devices are allocated to this thread based on instances and instance_id.
1103  * If instances==0 then the function automatically determines the number
1104  * of instances of this thread, and allocates devices between them. 
1105  * If instances != 0, then instance_id must be in the range 0..instances-1.
1106  * The function allocates devices among the specified number of instances,
1107  * with this thread having the given instance id. This option is used for 
1108  * splitting devices among differently named "io"-type threads.
1109  */
1110 void dpdk_io_thread (vlib_worker_thread_t * w,
1111                      u32 instances,
1112                      u32 instance_id,
1113                      char *worker_name,
1114                      dpdk_io_thread_callback_t callback)
1115 {
1116   vlib_main_t * vm = vlib_get_main();
1117   vlib_thread_main_t * tm = vlib_get_thread_main();
1118   vlib_thread_registration_t * tr;
1119   dpdk_main_t * dm = &dpdk_main;
1120   char *io_name = w->registration->name;
1121   dpdk_device_t * xd;
1122   dpdk_device_t ** my_devices = 0;
1123   vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index = 0;
1124   vlib_frame_queue_t ** congested_handoff_queue_by_worker_index = 0;
1125   vlib_frame_queue_elt_t * hf = 0;
1126   int i;
1127   u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1128   u32 next_worker_index = 0;
1129   u32 current_worker_index = ~0;
1130   u32 cpu_index = os_get_cpu_number();
1131   u32 num_workers = 0;
1132   u32 num_devices = 0;
1133   uword * p;
1134   u16 queue_id = 0;
1135   vlib_node_runtime_t * node_trace = 0;
1136   u32 first_worker_index = 0;
1137   u32 buffer_flags_template;
1138   
1139   /* Wait until the dpdk init sequence is complete */
1140   while (dm->io_thread_release == 0)
1141     vlib_worker_thread_barrier_check();
1142
1143   clib_time_init (&vm->clib_time);
1144
1145   p = hash_get_mem (tm->thread_registrations_by_name, worker_name);
1146   ASSERT (p);
1147   tr = (vlib_thread_registration_t *) p[0];
1148   if (tr) 
1149     {
1150       num_workers = tr->count;
1151       first_worker_index = tr->first_index;
1152     }
1153
1154   /* Allocate devices to this thread */
1155   if (instances == 0) 
1156     {
1157       /* auto-assign */
1158       instance_id = w->instance_id;
1159
1160       p = hash_get_mem (tm->thread_registrations_by_name, io_name);
1161       tr = (vlib_thread_registration_t *) p[0];
1162       /* Otherwise, how did we get here */
1163       ASSERT (tr && tr->count);
1164       instances = tr->count;
1165     }
1166   else
1167     {
1168       /* manually assign */
1169       ASSERT (instance_id < instances);
1170     }
1171
1172   vec_validate (handoff_queue_elt_by_worker_index,
1173                 first_worker_index + num_workers - 1);
1174
1175   vec_validate_init_empty (congested_handoff_queue_by_worker_index,
1176                            first_worker_index + num_workers - 1,
1177                            (vlib_frame_queue_t *)(~0));
1178
1179   buffer_flags_template = dm->buffer_flags_template;
1180
1181   /* And handle them... */
1182   while (1)
1183     {
1184       u32 n_buffers;
1185       u32 mb_index;
1186       uword n_rx_bytes = 0;
1187       u32 n_trace, trace_cnt __attribute__((unused));
1188       vlib_buffer_free_list_t * fl;
1189       u32 hash;
1190       u64 hash_key;
1191       u8 efd_discard_burst;
1192
1193       vlib_worker_thread_barrier_check ();
1194
1195       /* Invoke callback if supplied */
1196       if (PREDICT_FALSE(callback != NULL))
1197           callback(vm);
1198
1199       if (PREDICT_FALSE(vec_len(dm->devices) != num_devices))
1200       {
1201         vec_reset_length(my_devices);
1202         vec_foreach (xd, dm->devices)
1203           {
1204             if (((xd - dm->devices) % tr->count) == instance_id)
1205               {
1206                 fprintf(stderr, "i/o thread %d (cpu %d) takes port %d\n",
1207                         instance_id, (int) os_get_cpu_number(), (int) (xd - dm->devices));
1208                 vec_add1 (my_devices, xd);
1209               }
1210           }
1211         num_devices = vec_len(dm->devices);
1212       }
1213
1214       for (i = 0; i < vec_len (my_devices); i++)
1215       {
1216           xd = my_devices[i];
1217
1218           if (!xd->admin_up)
1219             continue;
1220
1221           n_buffers = dpdk_rx_burst(dm, xd, 0 /* queue_id */);
1222
1223           if (n_buffers == 0)
1224             {
1225               /* check if EFD (dpdk) is enabled */
1226               if (PREDICT_FALSE(dm->efd.enabled))
1227                 {
1228                   /* reset a few stats */
1229                   xd->efd_agent.last_poll_time = 0;
1230                   xd->efd_agent.last_burst_sz = 0;
1231                 }
1232               continue;
1233             }
1234
1235           trace_cnt = n_trace = 0;
1236           if (PREDICT_FALSE(vm->trace_main.trace_active_hint))
1237             {
1238               /*
1239                * packet tracing is triggered on the dpdk-input node for
1240                * ease-of-use. Re-fetch the node_runtime for dpdk-input
1241                * in case it has changed.
1242                */
1243               node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
1244
1245               vec_reset_length (xd->d_trace_buffers);
1246               trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
1247             }
1248         
1249           /*
1250            * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
1251            * therefore fake the stop in the dpdk driver by
1252            * silently dropping all of the incoming pkts instead of 
1253            * stopping the driver / hardware.
1254            */
1255           if (PREDICT_FALSE(xd->admin_up != 1))
1256             {
1257               for (mb_index = 0; mb_index < n_buffers; mb_index++)
1258                 rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
1259               continue;
1260             }
1261
1262           /* reset EFD action for the burst */
1263           efd_discard_burst = 0;
1264           
1265           /* Check for congestion if EFD (Early-Fast-Discard) is enabled
1266            * in any mode (e.g. dpdk, monitor, or drop_all)
1267            */
1268           if (PREDICT_FALSE(dm->efd.enabled))
1269             {
1270               /* update EFD counters */
1271               dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
1272
1273               if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
1274                 {
1275                   /* drop all received packets */
1276                   for (mb_index = 0; mb_index < n_buffers; mb_index++)
1277                     rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
1278
1279                   xd->efd_agent.discard_cnt += n_buffers;
1280                   increment_efd_drop_counter(vm, 
1281                                              DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1282                                              n_buffers);
1283
1284                   continue;
1285                 }
1286
1287               if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
1288                                 dm->efd.consec_full_frames_hi_thresh))
1289                 {
1290                   u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
1291                                                                queue_id);
1292                   if (device_queue_sz >= dm->efd.queue_hi_thresh)
1293                     {
1294                       /* dpdk device queue has reached the critical threshold */
1295                       xd->efd_agent.congestion_cnt++;
1296
1297                       /* apply EFD to packets from the burst */
1298                       efd_discard_burst = 1;
1299                     }
1300                 }
1301             }
1302
1303           fl = vlib_buffer_get_free_list 
1304             (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
1305         
1306           mb_index = 0;
1307
1308           while (n_buffers > 0)
1309             {
1310               u32 bi0;
1311               u8 next0, error0;
1312               u32 l3_offset0;
1313               vlib_buffer_t * b0, * b_seg, * b_chain = 0;
1314               ethernet_header_t * h0;
1315               u8 nb_seg = 1;
1316               struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
1317               struct rte_mbuf *mb_seg = mb->next;
1318                 
1319               if (PREDICT_TRUE(n_buffers > 1))
1320                 {
1321                   struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
1322                   vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
1323                   CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1324                   CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1325                   CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
1326                 }
1327                 
1328               b0 = vlib_buffer_from_rte_mbuf(mb);
1329
1330               /* check whether EFD is looking for packets to discard */
1331               if (PREDICT_FALSE(efd_discard_burst))
1332                 {
1333                   u32 cntr_type;
1334                   if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
1335                     {
1336                       rte_pktmbuf_free(mb);
1337                       xd->efd_agent.discard_cnt++;
1338                       increment_efd_drop_counter(vm, 
1339                                                  cntr_type,
1340                                                  1);
1341
1342                       n_buffers--;
1343                       mb_index++;
1344                       continue;
1345                     }
1346                 }
1347               
1348               /* Prefetch one next segment if it exists */
1349               if (PREDICT_FALSE(mb->nb_segs > 1))
1350                 {
1351                   struct rte_mbuf *pfmb = mb->next;
1352                   vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
1353                   CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1354                   CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1355                   b_chain = b0;
1356                 }
1357
1358               bi0 = vlib_get_buffer_index (vm, b0);
1359               vlib_buffer_init_for_free_list (b0, fl);
1360               b0->clone_count = 0;
1361
1362               dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
1363                                                        &next0, &error0);
1364 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
1365               /*
1366                * Clear overloaded TX offload flags when a DPDK driver
1367                * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
1368                */
1369               if (PREDICT_TRUE(trace_cnt == 0))
1370                 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1371               else
1372                 trace_cnt--;
1373 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
1374
1375               if (error0)
1376                   clib_warning ("bi %d error %d", bi0, error0);
1377
1378               b0->error = 0;
1379
1380               l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
1381                              next0 == DPDK_RX_NEXT_IP6_INPUT || 
1382                              next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
1383                             sizeof (ethernet_header_t) : 0);
1384
1385               b0->current_data = l3_offset0;
1386               /* Some drivers like fm10k receive frames with
1387                  mb->data_off > RTE_PKTMBUF_HEADROOM */
1388               b0->current_data += mb->data_off - RTE_PKTMBUF_HEADROOM;
1389               b0->current_length = mb->data_len - l3_offset0;
1390
1391               b0->flags = buffer_flags_template;
1392
1393               if (VMWARE_LENGTH_BUG_WORKAROUND)
1394                   b0->current_length -= 4;
1395                 
1396               vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1397               vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
1398               vnet_buffer(b0)->io_handoff.next_index = next0;
1399               n_rx_bytes += mb->pkt_len;
1400
1401               /* Process subsequent segments of multi-segment packets */
1402               while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1403                 {
1404                   ASSERT(mb_seg != 0);
1405  
1406                   b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
1407                   vlib_buffer_init_for_free_list (b_seg, fl);
1408                   b_seg->clone_count = 0;
1409  
1410                   ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1411                   ASSERT(b_seg->current_data == 0);
1412  
1413                   /*
1414                    * The driver (e.g. virtio) may not put the packet data at the start
1415                    * of the segment, so don't assume b_seg->current_data == 0 is correct.
1416                    */
1417                   b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
1418
1419                   b_seg->current_length = mb_seg->data_len;
1420                   b0->total_length_not_including_first_buffer +=
1421                     mb_seg->data_len;
1422  
1423                   b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
1424                   b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
1425  
1426                   b_chain = b_seg;
1427                   mb_seg = mb_seg->next;
1428                   nb_seg++;
1429                 }
1430
1431               /*
1432                * Turn this on if you run into
1433                * "bad monkey" contexts, and you want to know exactly
1434                * which nodes they've visited... See main.c...
1435                */
1436               VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
1437  
1438               if (PREDICT_FALSE (n_trace > mb_index))
1439                 vec_add1 (xd->d_trace_buffers, bi0);
1440
1441               next_worker_index = first_worker_index;
1442
1443               /* 
1444                * Force unknown traffic onto worker 0, 
1445                * and into ethernet-input. $$$$ add more hashes.
1446                */
1447               h0 = (ethernet_header_t *) b0->data;
1448
1449               /* Compute ingress LB hash */
1450               hash_key = eth_get_key(h0);
1451               hash = (u32)clib_xxhash(hash_key);
1452
1453               if (PREDICT_TRUE (is_pow2(num_workers)))
1454                 next_worker_index += hash & (num_workers - 1);
1455               else
1456                 next_worker_index += hash % num_workers;
1457
1458               /* if EFD is enabled and not already discarding from dpdk,
1459                * check the worker ring/queue for congestion
1460                */
1461               if (PREDICT_FALSE(tm->efd.enabled && !efd_discard_burst))
1462                 {
1463                   vlib_frame_queue_t *fq;
1464
1465                   /* fq will be valid if the ring is congested */
1466                   fq = is_vlib_handoff_queue_congested(
1467                       next_worker_index, tm->efd.queue_hi_thresh,
1468                       congested_handoff_queue_by_worker_index);
1469                   
1470                   if (PREDICT_FALSE(fq != NULL))
1471                     {
1472                       u32 cntr_type;
1473                       if (PREDICT_TRUE(cntr_type =
1474                                        is_efd_discardable(tm, b0, mb)))
1475                         {
1476                           /* discard the packet */
1477                           fq->enqueue_efd_discards++;
1478                           increment_efd_drop_counter(vm, cntr_type, 1);
1479                           rte_pktmbuf_free(mb);
1480                           n_buffers--;
1481                           mb_index++;
1482                           continue;
1483                         }
1484                     }
1485                 }
1486               
1487               if (next_worker_index != current_worker_index)
1488                 {
1489                   if (hf)
1490                     hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1491
1492                   hf = dpdk_get_handoff_queue_elt(
1493                            next_worker_index,
1494                            handoff_queue_elt_by_worker_index);
1495                       
1496                   n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
1497                   to_next_worker = &hf->buffer_index[hf->n_vectors];
1498                   current_worker_index = next_worker_index;
1499                 }
1500               
1501               /* enqueue to correct worker thread */
1502               to_next_worker[0] = bi0;
1503               to_next_worker++;
1504               n_left_to_next_worker--;
1505
1506               if (n_left_to_next_worker == 0)
1507                 {
1508                   hf->n_vectors = VLIB_FRAME_SIZE;
1509                   vlib_put_handoff_queue_elt(hf);
1510                   current_worker_index = ~0;
1511                   handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1512                   hf = 0;
1513                 }
1514                   
1515               n_buffers--;
1516               mb_index++;
1517             }
1518
1519           if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
1520             {
1521               /* credit the trace to the trace node */
1522               dpdk_rx_trace (dm, node_trace, xd, queue_id, xd->d_trace_buffers,
1523                              vec_len (xd->d_trace_buffers));
1524               vlib_set_trace_count (vm, node_trace, n_trace - vec_len (xd->d_trace_buffers));
1525             }
1526
1527           vlib_increment_combined_counter 
1528             (vnet_get_main()->interface_main.combined_sw_if_counters
1529              + VNET_INTERFACE_COUNTER_RX,
1530              cpu_index, 
1531              xd->vlib_sw_if_index,
1532              mb_index, n_rx_bytes);
1533
1534           dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
1535           dw->aggregate_rx_packets += mb_index;
1536         }
1537
1538       if (hf)
1539         hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1540
1541       /* Ship frames to the worker nodes */
1542       for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
1543         {
1544           if (handoff_queue_elt_by_worker_index[i])
1545             {
1546               hf = handoff_queue_elt_by_worker_index[i];
1547               /* 
1548                * It works better to let the handoff node
1549                * rate-adapt, always ship the handoff queue element.
1550                */
1551               if (1 || hf->n_vectors == hf->last_n_vectors)
1552                 {
1553                   vlib_put_handoff_queue_elt(hf);
1554                   handoff_queue_elt_by_worker_index[i] = 0;
1555                 }
1556               else
1557                 hf->last_n_vectors = hf->n_vectors;
1558             }
1559           congested_handoff_queue_by_worker_index[i] = (vlib_frame_queue_t *)(~0);
1560         }
1561       hf = 0;
1562       current_worker_index = ~0;
1563
1564       vlib_increment_main_loop_counter (vm);
1565     }
1566 }
1567
1568 /*
1569  * This function is used when the main thread performs IO and feeds the
1570  * worker threads.
1571  */
1572 static uword
1573 dpdk_io_input (vlib_main_t * vm,
1574                vlib_node_runtime_t * node,
1575                vlib_frame_t * f)
1576 {
1577   dpdk_main_t * dm = &dpdk_main;
1578   dpdk_device_t * xd;
1579   vlib_thread_main_t * tm = vlib_get_thread_main();
1580   uword n_rx_packets = 0;
1581   static vlib_frame_queue_elt_t ** handoff_queue_elt_by_worker_index;
1582   static vlib_frame_queue_t ** congested_handoff_queue_by_worker_index = 0;
1583   vlib_frame_queue_elt_t * hf = 0;
1584   int i;
1585   u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1586   u32 next_worker_index = 0;
1587   u32 current_worker_index = ~0;
1588   u32 cpu_index = os_get_cpu_number();
1589   static int num_workers_set;
1590   static u32 num_workers;
1591   u16 queue_id = 0;
1592   vlib_node_runtime_t * node_trace;
1593   static u32 first_worker_index;
1594   u32 buffer_flags_template;
1595
1596   if (PREDICT_FALSE(num_workers_set == 0))
1597     {
1598       uword * p;
1599       vlib_thread_registration_t * tr;
1600       /* Only the standard vnet worker threads are supported */
1601       p = hash_get_mem (tm->thread_registrations_by_name, "workers");
1602       tr = (vlib_thread_registration_t *) p[0];
1603       if (tr) 
1604         {
1605           num_workers = tr->count;
1606           first_worker_index = tr->first_index;
1607         }
1608       num_workers_set = 1;
1609     }
1610
1611   if (PREDICT_FALSE(handoff_queue_elt_by_worker_index == 0))
1612     {
1613       vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
1614       
1615       vec_validate_init_empty (congested_handoff_queue_by_worker_index,
1616                                first_worker_index + num_workers - 1,
1617                                (vlib_frame_queue_t *)(~0));
1618     }
1619
1620   /* packet tracing is triggered on the dpdk-input node for ease-of-use */
1621   node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
1622
1623   buffer_flags_template = dm->buffer_flags_template;
1624
1625   vec_foreach (xd, dm->devices)
1626     {
1627       u32 n_buffers;
1628       u32 mb_index;
1629       uword n_rx_bytes = 0;
1630       u32 n_trace, trace_cnt __attribute__((unused));
1631       vlib_buffer_free_list_t * fl;
1632       u32 hash;
1633       u64 hash_key;
1634       u8 efd_discard_burst = 0;
1635
1636       if (!xd->admin_up)
1637         continue;
1638
1639       n_buffers = dpdk_rx_burst(dm, xd, queue_id );
1640
1641       if (n_buffers == 0)
1642         {
1643           /* check if EFD (dpdk) is enabled */
1644           if (PREDICT_FALSE(dm->efd.enabled))
1645             {
1646               /* reset a few stats */
1647               xd->efd_agent.last_poll_time = 0;
1648               xd->efd_agent.last_burst_sz = 0;
1649             }
1650           continue;
1651         }
1652
1653       vec_reset_length (xd->d_trace_buffers);
1654       trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
1655         
1656       /*
1657        * DAW-FIXME: VMXNET3 device stop/start doesn't work, 
1658        * therefore fake the stop in the dpdk driver by
1659        * silently dropping all of the incoming pkts instead of 
1660        * stopping the driver / hardware.
1661        */
1662       if (PREDICT_FALSE(xd->admin_up != 1))
1663         {
1664           for (mb_index = 0; mb_index < n_buffers; mb_index++)
1665             rte_pktmbuf_free (xd->rx_vectors[queue_id][mb_index]);
1666           continue;
1667         }
1668
1669       /* Check for congestion if EFD (Early-Fast-Discard) is enabled
1670        * in any mode (e.g. dpdk, monitor, or drop_all)
1671        */
1672       if (PREDICT_FALSE(dm->efd.enabled))
1673         {
1674           /* update EFD counters */
1675           dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
1676
1677           if (PREDICT_FALSE(dm->efd.enabled & DPDK_EFD_DROPALL_ENABLED))
1678             {
1679               /* discard all received packets */
1680               for (mb_index = 0; mb_index < n_buffers; mb_index++)
1681                 rte_pktmbuf_free(xd->rx_vectors[queue_id][mb_index]);
1682
1683               xd->efd_agent.discard_cnt += n_buffers;
1684               increment_efd_drop_counter(vm, 
1685                                          DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1686                                          n_buffers);
1687             
1688               continue;
1689             }
1690           
1691           if (PREDICT_FALSE(xd->efd_agent.consec_full_frames_cnt >=
1692                             dm->efd.consec_full_frames_hi_thresh))
1693             {
1694               u32 device_queue_sz = rte_eth_rx_queue_count(xd->device_index,
1695                                                            queue_id);
1696               if (device_queue_sz >= dm->efd.queue_hi_thresh)
1697                 {
1698                   /* dpdk device queue has reached the critical threshold */
1699                   xd->efd_agent.congestion_cnt++;
1700
1701                   /* apply EFD to packets from the burst */
1702                   efd_discard_burst = 1;
1703                 }
1704             }
1705         }
1706       
1707       fl = vlib_buffer_get_free_list 
1708         (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
1709           
1710       mb_index = 0;
1711
1712       while (n_buffers > 0)
1713         {
1714           u32 bi0;
1715           u8 next0, error0;
1716           u32 l3_offset0;
1717           vlib_buffer_t * b0, * b_seg, * b_chain = 0;
1718           ethernet_header_t * h0;
1719           u8 nb_seg = 1;
1720           struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index];
1721           struct rte_mbuf *mb_seg = mb->next;
1722
1723           if (PREDICT_TRUE(n_buffers > 1))
1724             {
1725               struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
1726               vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
1727               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1728               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1729               CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
1730             }
1731
1732           b0 = vlib_buffer_from_rte_mbuf(mb);
1733
1734           /* check whether EFD is looking for packets to discard */
1735           if (PREDICT_FALSE(efd_discard_burst))
1736             {
1737               u32 cntr_type;
1738               if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
1739                 {
1740                   rte_pktmbuf_free(mb);
1741                   xd->efd_agent.discard_cnt++;
1742                   increment_efd_drop_counter(vm, 
1743                                              cntr_type,
1744                                              1);
1745
1746                   n_buffers--;
1747                   mb_index++;
1748                   continue;
1749                 }
1750             }
1751
1752           /* Prefetch one next segment if it exists */
1753           if (PREDICT_FALSE(mb->nb_segs > 1))
1754             {
1755               struct rte_mbuf *pfmb = mb->next;
1756               vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
1757               CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
1758               CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
1759               b_chain = b0;
1760             }
1761
1762           bi0 = vlib_get_buffer_index (vm, b0);
1763           vlib_buffer_init_for_free_list (b0, fl);
1764           b0->clone_count = 0;
1765
1766           dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
1767                                                    &next0, &error0);
1768 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
1769           /*
1770            * Clear overloaded TX offload flags when a DPDK driver
1771            * is using them for RX flags (e.g. Cisco VIC Ethernet driver)
1772            */
1773           if (PREDICT_TRUE(trace_cnt == 0))
1774             mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1775           else
1776             trace_cnt--;
1777 #endif /* RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS */
1778
1779           if (error0)
1780             clib_warning ("bi %d error %d", bi0, error0);
1781
1782           b0->error = 0;
1783
1784           l3_offset0 = ((next0 == DPDK_RX_NEXT_IP4_INPUT ||
1785                          next0 == DPDK_RX_NEXT_IP6_INPUT || 
1786                          next0 == DPDK_RX_NEXT_MPLS_INPUT) ? 
1787                         sizeof (ethernet_header_t) : 0);
1788
1789           b0->current_data = l3_offset0;
1790           b0->current_length = mb->data_len - l3_offset0;
1791
1792           b0->flags = buffer_flags_template;
1793                 
1794           if (VMWARE_LENGTH_BUG_WORKAROUND)
1795               b0->current_length -= 4;
1796
1797           vnet_buffer(b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1798           vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
1799           vnet_buffer(b0)->io_handoff.next_index = next0;
1800           n_rx_bytes += mb->pkt_len;
1801
1802           /* Process subsequent segments of multi-segment packets */
1803           while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1804             {
1805               ASSERT(mb_seg != 0);
1806  
1807               b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
1808               vlib_buffer_init_for_free_list (b_seg, fl);
1809               b_seg->clone_count = 0;
1810  
1811               ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
1812               ASSERT(b_seg->current_data == 0);
1813  
1814               /*
1815                * The driver (e.g. virtio) may not put the packet data at the start
1816                * of the segment, so don't assume b_seg->current_data == 0 is correct.
1817                */
1818               b_seg->current_data = (mb_seg->buf_addr + mb_seg->data_off) - (void *)b_seg->data;
1819
1820               b_seg->current_length = mb_seg->data_len;
1821               b0->total_length_not_including_first_buffer +=
1822                 mb_seg->data_len;
1823  
1824               b_chain->flags |= VLIB_BUFFER_NEXT_PRESENT;
1825               b_chain->next_buffer = vlib_get_buffer_index (vm, b_seg);
1826  
1827               b_chain = b_seg;
1828               mb_seg = mb_seg->next;
1829               nb_seg++;
1830             }
1831  
1832           /*
1833            * Turn this on if you run into
1834            * "bad monkey" contexts, and you want to know exactly
1835            * which nodes they've visited... See main.c...
1836            */
1837           VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
1838  
1839           if (PREDICT_FALSE (n_trace > mb_index))
1840             vec_add1 (xd->d_trace_buffers, bi0);
1841
1842           next_worker_index = first_worker_index;
1843
1844           /* 
1845            * Force unknown traffic onto worker 0, 
1846            * and into ethernet-input. $$$$ add more hashes.
1847            */
1848           h0 = (ethernet_header_t *) b0->data;
1849
1850           /* Compute ingress LB hash */
1851           hash_key = eth_get_key(h0);
1852           hash = (u32)clib_xxhash(hash_key);
1853
1854           if (PREDICT_TRUE (is_pow2(num_workers)))
1855             next_worker_index += hash & (num_workers - 1);
1856           else
1857             next_worker_index += hash % num_workers;
1858
1859           /* if EFD is enabled and not already discarding from dpdk,
1860            * check the worker ring/queue for congestion
1861            */
1862           if (PREDICT_FALSE(tm->efd.enabled && !efd_discard_burst))
1863             {
1864               vlib_frame_queue_t *fq;
1865
1866               /* fq will be valid if the ring is congested */
1867               fq = is_vlib_handoff_queue_congested(
1868                   next_worker_index, tm->efd.queue_hi_thresh,
1869                   congested_handoff_queue_by_worker_index);
1870               
1871               if (PREDICT_FALSE(fq != NULL))
1872                 {
1873                   u32 cntr_type;
1874                   if (PREDICT_TRUE(cntr_type =
1875                                    is_efd_discardable(tm, b0, mb)))
1876                     {
1877                       /* discard the packet */
1878                       fq->enqueue_efd_discards++;
1879                       increment_efd_drop_counter(vm, cntr_type, 1);
1880                       rte_pktmbuf_free(mb);
1881                       n_buffers--;
1882                       mb_index++;
1883                       continue;
1884                     }
1885                 }
1886             }
1887           
1888           if (next_worker_index != current_worker_index)
1889             {
1890               if (hf)
1891                 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1892
1893               hf = dpdk_get_handoff_queue_elt(
1894                      next_worker_index,
1895                      handoff_queue_elt_by_worker_index);
1896
1897               n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
1898               to_next_worker = &hf->buffer_index[hf->n_vectors];
1899               current_worker_index = next_worker_index;
1900             }
1901           
1902           /* enqueue to correct worker thread */
1903           to_next_worker[0] = bi0;
1904           to_next_worker++;
1905           n_left_to_next_worker--;
1906
1907           if (n_left_to_next_worker == 0)
1908             {
1909               hf->n_vectors = VLIB_FRAME_SIZE;
1910               vlib_put_handoff_queue_elt(hf);
1911               current_worker_index = ~0;
1912               handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1913               hf = 0;
1914             }
1915           
1916           n_buffers--;
1917           mb_index++;
1918         }
1919
1920       if (PREDICT_FALSE (vec_len (xd->d_trace_buffers) > 0))
1921         {
1922           /* credit the trace to the trace node */
1923           dpdk_rx_trace (dm, node_trace, xd, queue_id, xd->d_trace_buffers,
1924                          vec_len (xd->d_trace_buffers));
1925           vlib_set_trace_count (vm, node_trace, n_trace - vec_len (xd->d_trace_buffers));
1926         }
1927
1928       vlib_increment_combined_counter 
1929         (vnet_get_main()->interface_main.combined_sw_if_counters
1930          + VNET_INTERFACE_COUNTER_RX,
1931          cpu_index, 
1932          xd->vlib_sw_if_index,
1933          mb_index, n_rx_bytes);
1934
1935       dpdk_worker_t * dw = vec_elt_at_index(dm->workers, cpu_index);
1936       dw->aggregate_rx_packets += mb_index;
1937       n_rx_packets += mb_index;
1938     }
1939
1940   if (hf)
1941     hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1942   
1943   /* Ship frames to the worker nodes */
1944   for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
1945     {
1946       if (handoff_queue_elt_by_worker_index[i])
1947         {
1948           hf = handoff_queue_elt_by_worker_index[i];
1949           /* 
1950            * It works better to let the handoff node
1951            * rate-adapt, always ship the handoff queue element.
1952            */
1953           if (1 || hf->n_vectors == hf->last_n_vectors)
1954             {
1955               vlib_put_handoff_queue_elt(hf);
1956               handoff_queue_elt_by_worker_index[i] = 0;
1957             }
1958           else
1959             hf->last_n_vectors = hf->n_vectors;
1960         }
1961       congested_handoff_queue_by_worker_index[i] = (vlib_frame_queue_t *)(~0);
1962     }
1963   hf = 0;
1964   current_worker_index = ~0;
1965   return n_rx_packets;
1966 }
1967
1968 VLIB_REGISTER_NODE (dpdk_io_input_node) = {
1969   .function = dpdk_io_input,
1970   .type = VLIB_NODE_TYPE_INPUT,
1971   .name = "dpdk-io-input",
1972
1973   /* Will be enabled if/when hardware is detected. */
1974   .state = VLIB_NODE_STATE_DISABLED,
1975
1976   .format_buffer = format_ethernet_header_with_length,
1977   .format_trace = format_dpdk_rx_dma_trace,
1978
1979   .n_errors = DPDK_N_ERROR,
1980   .error_strings = dpdk_error_strings,
1981
1982   .n_next_nodes = DPDK_RX_N_NEXT,
1983   .next_nodes = {
1984     [DPDK_RX_NEXT_DROP] = "error-drop",
1985     [DPDK_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
1986     [DPDK_RX_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1987     [DPDK_RX_NEXT_IP6_INPUT] = "ip6-input",
1988     [DPDK_RX_NEXT_MPLS_INPUT] = "mpls-gre-input",
1989   },
1990 };
1991
1992 /*
1993  * set_efd_bitmap()
1994  * Based on the operation type, set lower/upper bits for the given index value
1995  */
1996 void
1997 set_efd_bitmap (u8 *bitmap, u32 value, u32 op)
1998 {
1999     int ix;
2000
2001     *bitmap = 0;
2002     for (ix = 0; ix < 8; ix++) {
2003         if (((op == EFD_OPERATION_LESS_THAN) && (ix < value)) ||
2004             ((op == EFD_OPERATION_GREATER_OR_EQUAL) && (ix >= value))){
2005             (*bitmap) |= (1 << ix);
2006         }
2007     }
2008 }
2009
2010 void
2011 efd_config (u32 enabled, 
2012             u32 ip_prec,  u32 ip_op,
2013             u32 mpls_exp, u32 mpls_op,
2014             u32 vlan_cos, u32 vlan_op)
2015 {
2016    vlib_thread_main_t * tm = vlib_get_thread_main();
2017    dpdk_main_t * dm = &dpdk_main;
2018
2019    if (enabled) {
2020        tm->efd.enabled |= VLIB_EFD_DISCARD_ENABLED;
2021        dm->efd.enabled |= DPDK_EFD_DISCARD_ENABLED;
2022    } else {
2023        tm->efd.enabled &= ~VLIB_EFD_DISCARD_ENABLED;
2024        dm->efd.enabled &= ~DPDK_EFD_DISCARD_ENABLED;
2025    }
2026
2027    set_efd_bitmap(&tm->efd.ip_prec_bitmap, ip_prec, ip_op);
2028    set_efd_bitmap(&tm->efd.mpls_exp_bitmap, mpls_exp, mpls_op);
2029    set_efd_bitmap(&tm->efd.vlan_cos_bitmap, vlan_cos, vlan_op);
2030
2031 }