dpdk: Add support for Mellanox ConnectX-4 devices
[vpp.git] / vnet / vnet / handoff.c
1
2 /*
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <vnet/vnet.h>
18 #include <vppinfra/xxhash.h>
19 #include <vlib/threads.h>
20 #include <vnet/handoff.h>
21 #include <vnet/feature/feature.h>
22
23 typedef struct
24 {
25   uword *workers_bitmap;
26   u32 *workers;
27 } per_inteface_handoff_data_t;
28
29 typedef struct
30 {
31   u32 cached_next_index;
32   u32 num_workers;
33   u32 first_worker_index;
34
35   per_inteface_handoff_data_t *if_data;
36
37   /* Worker handoff index */
38   u32 frame_queue_index;
39
40   /* convenience variables */
41   vlib_main_t *vlib_main;
42   vnet_main_t *vnet_main;
43
44     u64 (*hash_fn) (ethernet_header_t *);
45 } handoff_main_t;
46
47 handoff_main_t handoff_main;
48 vlib_node_registration_t handoff_dispatch_node;
49
50 typedef struct
51 {
52   u32 sw_if_index;
53   u32 next_worker_index;
54   u32 buffer_index;
55 } worker_handoff_trace_t;
56
57 /* packet trace format function */
58 static u8 *
59 format_worker_handoff_trace (u8 * s, va_list * args)
60 {
61   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63   worker_handoff_trace_t *t = va_arg (*args, worker_handoff_trace_t *);
64
65   s =
66     format (s, "worker-handoff: sw_if_index %d, next_worker %d, buffer 0x%x",
67             t->sw_if_index, t->next_worker_index, t->buffer_index);
68   return s;
69 }
70
71 vlib_node_registration_t handoff_node;
72
73 static uword
74 worker_handoff_node_fn (vlib_main_t * vm,
75                         vlib_node_runtime_t * node, vlib_frame_t * frame)
76 {
77   handoff_main_t *hm = &handoff_main;
78   vlib_thread_main_t *tm = vlib_get_thread_main ();
79   u32 n_left_from, *from;
80   static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index;
81   static __thread vlib_frame_queue_t **congested_handoff_queue_by_worker_index
82     = 0;
83   vlib_frame_queue_elt_t *hf = 0;
84   int i;
85   u32 n_left_to_next_worker = 0, *to_next_worker = 0;
86   u32 next_worker_index = 0;
87   u32 current_worker_index = ~0;
88
89   if (PREDICT_FALSE (handoff_queue_elt_by_worker_index == 0))
90     {
91       vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
92
93       vec_validate_init_empty (congested_handoff_queue_by_worker_index,
94                                hm->first_worker_index + hm->num_workers - 1,
95                                (vlib_frame_queue_t *) (~0));
96     }
97
98   from = vlib_frame_vector_args (frame);
99   n_left_from = frame->n_vectors;
100
101   while (n_left_from > 0)
102     {
103       u32 bi0;
104       vlib_buffer_t *b0;
105       u32 sw_if_index0;
106       u32 hash;
107       u64 hash_key;
108       per_inteface_handoff_data_t *ihd0;
109       u32 index0;
110
111       bi0 = from[0];
112       from += 1;
113       n_left_from -= 1;
114
115       b0 = vlib_get_buffer (vm, bi0);
116       sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
117       ASSERT (hm->if_data);
118       ihd0 = vec_elt_at_index (hm->if_data, sw_if_index0);
119
120       next_worker_index = hm->first_worker_index;
121
122       /*
123        * Force unknown traffic onto worker 0,
124        * and into ethernet-input. $$$$ add more hashes.
125        */
126
127       /* Compute ingress LB hash */
128       hash_key = hm->hash_fn ((ethernet_header_t *) b0->data);
129       hash = (u32) clib_xxhash (hash_key);
130
131       /* if input node did not specify next index, then packet
132          should go to eternet-input */
133       if (PREDICT_FALSE ((b0->flags & BUFFER_HANDOFF_NEXT_VALID) == 0))
134         vnet_buffer (b0)->handoff.next_index =
135           HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT;
136       else if (vnet_buffer (b0)->handoff.next_index ==
137                HANDOFF_DISPATCH_NEXT_IP4_INPUT
138                || vnet_buffer (b0)->handoff.next_index ==
139                HANDOFF_DISPATCH_NEXT_IP6_INPUT
140                || vnet_buffer (b0)->handoff.next_index ==
141                HANDOFF_DISPATCH_NEXT_MPLS_INPUT)
142         vlib_buffer_advance (b0, (sizeof (ethernet_header_t)));
143
144       if (PREDICT_TRUE (is_pow2 (vec_len (ihd0->workers))))
145         index0 = hash & (vec_len (ihd0->workers) - 1);
146       else
147         index0 = hash % vec_len (ihd0->workers);
148
149       next_worker_index += ihd0->workers[index0];
150
151       if (next_worker_index != current_worker_index)
152         {
153           if (hf)
154             hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
155
156           hf = vlib_get_worker_handoff_queue_elt (hm->frame_queue_index,
157                                                   next_worker_index,
158                                                   handoff_queue_elt_by_worker_index);
159
160           n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
161           to_next_worker = &hf->buffer_index[hf->n_vectors];
162           current_worker_index = next_worker_index;
163         }
164
165       /* enqueue to correct worker thread */
166       to_next_worker[0] = bi0;
167       to_next_worker++;
168       n_left_to_next_worker--;
169
170       if (n_left_to_next_worker == 0)
171         {
172           hf->n_vectors = VLIB_FRAME_SIZE;
173           vlib_put_frame_queue_elt (hf);
174           current_worker_index = ~0;
175           handoff_queue_elt_by_worker_index[next_worker_index] = 0;
176           hf = 0;
177         }
178
179       if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
180                          && (b0->flags & VLIB_BUFFER_IS_TRACED)))
181         {
182           worker_handoff_trace_t *t =
183             vlib_add_trace (vm, node, b0, sizeof (*t));
184           t->sw_if_index = sw_if_index0;
185           t->next_worker_index = next_worker_index - hm->first_worker_index;
186           t->buffer_index = bi0;
187         }
188
189     }
190
191   if (hf)
192     hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
193
194   /* Ship frames to the worker nodes */
195   for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
196     {
197       if (handoff_queue_elt_by_worker_index[i])
198         {
199           hf = handoff_queue_elt_by_worker_index[i];
200           /*
201            * It works better to let the handoff node
202            * rate-adapt, always ship the handoff queue element.
203            */
204           if (1 || hf->n_vectors == hf->last_n_vectors)
205             {
206               vlib_put_frame_queue_elt (hf);
207               handoff_queue_elt_by_worker_index[i] = 0;
208             }
209           else
210             hf->last_n_vectors = hf->n_vectors;
211         }
212       congested_handoff_queue_by_worker_index[i] =
213         (vlib_frame_queue_t *) (~0);
214     }
215   hf = 0;
216   current_worker_index = ~0;
217   return frame->n_vectors;
218 }
219
220 /* *INDENT-OFF* */
221 VLIB_REGISTER_NODE (worker_handoff_node) = {
222   .function = worker_handoff_node_fn,
223   .name = "worker-handoff",
224   .vector_size = sizeof (u32),
225   .format_trace = format_worker_handoff_trace,
226   .type = VLIB_NODE_TYPE_INTERNAL,
227
228   .n_next_nodes = 1,
229   .next_nodes = {
230     [0] = "error-drop",
231   },
232 };
233
234 VLIB_NODE_FUNCTION_MULTIARCH (worker_handoff_node, worker_handoff_node_fn)
235 /* *INDENT-ON* */
236
237 int
238 interface_handoff_enable_disable (vlib_main_t * vm, u32 sw_if_index,
239                                   uword * bitmap, int enable_disable)
240 {
241   handoff_main_t *hm = &handoff_main;
242   vnet_sw_interface_t *sw;
243   vnet_main_t *vnm = vnet_get_main ();
244   per_inteface_handoff_data_t *d;
245   int i, rv = 0;
246
247   if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
248     return VNET_API_ERROR_INVALID_SW_IF_INDEX;
249
250   sw = vnet_get_sw_interface (vnm, sw_if_index);
251   if (sw->type != VNET_SW_INTERFACE_TYPE_HARDWARE)
252     return VNET_API_ERROR_INVALID_SW_IF_INDEX;
253
254   if (clib_bitmap_last_set (bitmap) >= hm->num_workers)
255     return VNET_API_ERROR_INVALID_WORKER;
256
257   if (hm->frame_queue_index == ~0)
258     hm->frame_queue_index =
259       vlib_frame_queue_main_init (handoff_dispatch_node.index, 0);
260
261   vec_validate (hm->if_data, sw_if_index);
262   d = vec_elt_at_index (hm->if_data, sw_if_index);
263
264   vec_free (d->workers);
265   vec_free (d->workers_bitmap);
266
267   if (enable_disable)
268     {
269       d->workers_bitmap = bitmap;
270       /* *INDENT-OFF* */
271       clib_bitmap_foreach (i, bitmap,
272         ({
273           vec_add1(d->workers, i);
274         }));
275       /* *INDENT-ON* */
276     }
277
278   vnet_feature_enable_disable ("device-input", "worker-handoff",
279                                sw_if_index, enable_disable, 0, 0);
280   return rv;
281 }
282
283 static clib_error_t *
284 set_interface_handoff_command_fn (vlib_main_t * vm,
285                                   unformat_input_t * input,
286                                   vlib_cli_command_t * cmd)
287 {
288   handoff_main_t *hm = &handoff_main;
289   u32 sw_if_index = ~0;
290   int enable_disable = 1;
291   uword *bitmap = 0;
292   u32 sym = ~0;
293
294   int rv = 0;
295
296   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
297     {
298       if (unformat (input, "disable"))
299         enable_disable = 0;
300       else if (unformat (input, "workers %U", unformat_bitmap_list, &bitmap))
301         ;
302       else if (unformat (input, "%U", unformat_vnet_sw_interface,
303                          vnet_get_main (), &sw_if_index))
304         ;
305       else if (unformat (input, "symmetrical"))
306         sym = 1;
307       else if (unformat (input, "asymmetrical"))
308         sym = 0;
309       else
310         break;
311     }
312
313   if (sw_if_index == ~0)
314     return clib_error_return (0, "Please specify an interface...");
315
316   if (bitmap == 0)
317     return clib_error_return (0, "Please specify list of workers...");
318
319   rv =
320     interface_handoff_enable_disable (vm, sw_if_index, bitmap,
321                                       enable_disable);
322
323   switch (rv)
324     {
325     case 0:
326       break;
327
328     case VNET_API_ERROR_INVALID_SW_IF_INDEX:
329       return clib_error_return (0, "Invalid interface");
330       break;
331
332     case VNET_API_ERROR_INVALID_WORKER:
333       return clib_error_return (0, "Invalid worker(s)");
334       break;
335
336     case VNET_API_ERROR_UNIMPLEMENTED:
337       return clib_error_return (0,
338                                 "Device driver doesn't support redirection");
339       break;
340
341     default:
342       return clib_error_return (0, "unknown return value %d", rv);
343     }
344
345   if (sym == 1)
346     hm->hash_fn = eth_get_sym_key;
347   else if (sym == 0)
348     hm->hash_fn = eth_get_key;
349
350   return 0;
351 }
352
353 /* *INDENT-OFF* */
354 VLIB_CLI_COMMAND (set_interface_handoff_command, static) = {
355   .path = "set interface handoff",
356   .short_help =
357   "set interface handoff <interface-name> workers <workers-list> [symmetrical|asymmetrical]",
358   .function = set_interface_handoff_command_fn,
359 };
360 /* *INDENT-ON* */
361
362 typedef struct
363 {
364   u32 buffer_index;
365   u32 next_index;
366   u32 sw_if_index;
367 } handoff_dispatch_trace_t;
368
369 /* packet trace format function */
370 static u8 *
371 format_handoff_dispatch_trace (u8 * s, va_list * args)
372 {
373   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
374   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
375   handoff_dispatch_trace_t *t = va_arg (*args, handoff_dispatch_trace_t *);
376
377   s = format (s, "handoff-dispatch: sw_if_index %d next_index %d buffer 0x%x",
378               t->sw_if_index, t->next_index, t->buffer_index);
379   return s;
380 }
381
382 #define foreach_handoff_dispatch_error \
383 _(EXAMPLE, "example packets")
384
385 typedef enum
386 {
387 #define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym,
388   foreach_handoff_dispatch_error
389 #undef _
390     HANDOFF_DISPATCH_N_ERROR,
391 } handoff_dispatch_error_t;
392
393 static char *handoff_dispatch_error_strings[] = {
394 #define _(sym,string) string,
395   foreach_handoff_dispatch_error
396 #undef _
397 };
398
399 static uword
400 handoff_dispatch_node_fn (vlib_main_t * vm,
401                           vlib_node_runtime_t * node, vlib_frame_t * frame)
402 {
403   u32 n_left_from, *from, *to_next;
404   handoff_dispatch_next_t next_index;
405
406   from = vlib_frame_vector_args (frame);
407   n_left_from = frame->n_vectors;
408   next_index = node->cached_next_index;
409
410   while (n_left_from > 0)
411     {
412       u32 n_left_to_next;
413
414       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
415
416       while (n_left_from >= 4 && n_left_to_next >= 2)
417         {
418           u32 bi0, bi1;
419           vlib_buffer_t *b0, *b1;
420           u32 next0, next1;
421           u32 sw_if_index0, sw_if_index1;
422
423           /* Prefetch next iteration. */
424           {
425             vlib_buffer_t *p2, *p3;
426
427             p2 = vlib_get_buffer (vm, from[2]);
428             p3 = vlib_get_buffer (vm, from[3]);
429
430             vlib_prefetch_buffer_header (p2, LOAD);
431             vlib_prefetch_buffer_header (p3, LOAD);
432           }
433
434           /* speculatively enqueue b0 and b1 to the current next frame */
435           to_next[0] = bi0 = from[0];
436           to_next[1] = bi1 = from[1];
437           from += 2;
438           to_next += 2;
439           n_left_from -= 2;
440           n_left_to_next -= 2;
441
442           b0 = vlib_get_buffer (vm, bi0);
443           b1 = vlib_get_buffer (vm, bi1);
444
445           next0 = vnet_buffer (b0)->handoff.next_index;
446           next1 = vnet_buffer (b1)->handoff.next_index;
447
448           if (PREDICT_FALSE (vm->trace_main.trace_active_hint))
449             {
450               if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
451                 {
452                   vlib_trace_buffer (vm, node, next0, b0,       /* follow_chain */
453                                      0);
454                   handoff_dispatch_trace_t *t =
455                     vlib_add_trace (vm, node, b0, sizeof (*t));
456                   sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
457                   t->sw_if_index = sw_if_index0;
458                   t->next_index = next0;
459                   t->buffer_index = bi0;
460                 }
461               if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
462                 {
463                   vlib_trace_buffer (vm, node, next1, b1,       /* follow_chain */
464                                      0);
465                   handoff_dispatch_trace_t *t =
466                     vlib_add_trace (vm, node, b1, sizeof (*t));
467                   sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
468                   t->sw_if_index = sw_if_index1;
469                   t->next_index = next1;
470                   t->buffer_index = bi1;
471                 }
472             }
473
474           /* verify speculative enqueues, maybe switch current next frame */
475           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
476                                            to_next, n_left_to_next,
477                                            bi0, bi1, next0, next1);
478         }
479
480       while (n_left_from > 0 && n_left_to_next > 0)
481         {
482           u32 bi0;
483           vlib_buffer_t *b0;
484           u32 next0;
485           u32 sw_if_index0;
486
487           /* speculatively enqueue b0 to the current next frame */
488           bi0 = from[0];
489           to_next[0] = bi0;
490           from += 1;
491           to_next += 1;
492           n_left_from -= 1;
493           n_left_to_next -= 1;
494
495           b0 = vlib_get_buffer (vm, bi0);
496
497           next0 = vnet_buffer (b0)->handoff.next_index;
498
499           if (PREDICT_FALSE (vm->trace_main.trace_active_hint))
500             {
501               if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
502                 {
503                   vlib_trace_buffer (vm, node, next0, b0,       /* follow_chain */
504                                      0);
505                   handoff_dispatch_trace_t *t =
506                     vlib_add_trace (vm, node, b0, sizeof (*t));
507                   sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
508                   t->sw_if_index = sw_if_index0;
509                   t->next_index = next0;
510                   t->buffer_index = bi0;
511                 }
512             }
513
514           /* verify speculative enqueue, maybe switch current next frame */
515           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
516                                            to_next, n_left_to_next,
517                                            bi0, next0);
518         }
519
520       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
521     }
522
523   return frame->n_vectors;
524 }
525
526 /* *INDENT-OFF* */
527 VLIB_REGISTER_NODE (handoff_dispatch_node) = {
528   .function = handoff_dispatch_node_fn,
529   .name = "handoff-dispatch",
530   .vector_size = sizeof (u32),
531   .format_trace = format_handoff_dispatch_trace,
532   .type = VLIB_NODE_TYPE_INTERNAL,
533   .flags = VLIB_NODE_FLAG_IS_HANDOFF,
534
535   .n_errors = ARRAY_LEN(handoff_dispatch_error_strings),
536   .error_strings = handoff_dispatch_error_strings,
537
538   .n_next_nodes = HANDOFF_DISPATCH_N_NEXT,
539
540   .next_nodes = {
541         [HANDOFF_DISPATCH_NEXT_DROP] = "error-drop",
542         [HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT] = "ethernet-input",
543         [HANDOFF_DISPATCH_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
544         [HANDOFF_DISPATCH_NEXT_IP6_INPUT] = "ip6-input",
545         [HANDOFF_DISPATCH_NEXT_MPLS_INPUT] = "mpls-input",
546   },
547 };
548
549 VLIB_NODE_FUNCTION_MULTIARCH (handoff_dispatch_node, handoff_dispatch_node_fn)
550 /* *INDENT-ON* */
551
552 clib_error_t *
553 handoff_init (vlib_main_t * vm)
554 {
555   handoff_main_t *hm = &handoff_main;
556   vlib_thread_main_t *tm = vlib_get_thread_main ();
557   clib_error_t *error;
558   uword *p;
559
560   if ((error = vlib_call_init_function (vm, threads_init)))
561     return error;
562
563   vlib_thread_registration_t *tr;
564   /* Only the standard vnet worker threads are supported */
565   p = hash_get_mem (tm->thread_registrations_by_name, "workers");
566   if (p)
567     {
568       tr = (vlib_thread_registration_t *) p[0];
569       if (tr)
570         {
571           hm->num_workers = tr->count;
572           hm->first_worker_index = tr->first_index;
573         }
574     }
575
576   hm->hash_fn = eth_get_key;
577
578   hm->vlib_main = vm;
579   hm->vnet_main = &vnet_main;
580
581   hm->frame_queue_index = ~0;
582
583   return 0;
584 }
585
586 VLIB_INIT_FUNCTION (handoff_init);
587
588 /*
589  * fd.io coding-style-patch-verification: ON
590  *
591  * Local Variables:
592  * eval: (c-set-style "gnu")
593  * End:
594  */