threads: add support for multiple worker handoff queues
[vpp.git] / vnet / vnet / handoff.c
1
2 /*
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <vnet/vnet.h>
18 #include <vppinfra/xxhash.h>
19 #include <vlib/threads.h>
20 #include <vnet/handoff.h>
21 #include <vnet/feature/feature.h>
22
23 typedef struct
24 {
25   uword *workers_bitmap;
26   u32 *workers;
27 } per_inteface_handoff_data_t;
28
29 typedef struct
30 {
31   u32 cached_next_index;
32   u32 num_workers;
33   u32 first_worker_index;
34
35   per_inteface_handoff_data_t *if_data;
36
37   /* Worker handoff index */
38   u32 frame_queue_index;
39
40   /* convenience variables */
41   vlib_main_t *vlib_main;
42   vnet_main_t *vnet_main;
43 } handoff_main_t;
44
45 handoff_main_t handoff_main;
46 vlib_node_registration_t handoff_dispatch_node;
47
48 typedef struct
49 {
50   u32 sw_if_index;
51   u32 next_worker_index;
52   u32 buffer_index;
53 } worker_handoff_trace_t;
54
55 /* packet trace format function */
56 static u8 *
57 format_worker_handoff_trace (u8 * s, va_list * args)
58 {
59   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
60   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
61   worker_handoff_trace_t *t = va_arg (*args, worker_handoff_trace_t *);
62
63   s =
64     format (s, "worker-handoff: sw_if_index %d, next_worker %d, buffer 0x%x",
65             t->sw_if_index, t->next_worker_index, t->buffer_index);
66   return s;
67 }
68
69 vlib_node_registration_t handoff_node;
70
71 static uword
72 worker_handoff_node_fn (vlib_main_t * vm,
73                         vlib_node_runtime_t * node, vlib_frame_t * frame)
74 {
75   handoff_main_t *hm = &handoff_main;
76   vlib_thread_main_t *tm = vlib_get_thread_main ();
77   u32 n_left_from, *from;
78   static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index;
79   static __thread vlib_frame_queue_t **congested_handoff_queue_by_worker_index
80     = 0;
81   vlib_frame_queue_elt_t *hf = 0;
82   int i;
83   u32 n_left_to_next_worker = 0, *to_next_worker = 0;
84   u32 next_worker_index = 0;
85   u32 current_worker_index = ~0;
86
87   if (PREDICT_FALSE (handoff_queue_elt_by_worker_index == 0))
88     {
89       vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
90
91       vec_validate_init_empty (congested_handoff_queue_by_worker_index,
92                                hm->first_worker_index + hm->num_workers - 1,
93                                (vlib_frame_queue_t *) (~0));
94     }
95
96   from = vlib_frame_vector_args (frame);
97   n_left_from = frame->n_vectors;
98
99   while (n_left_from > 0)
100     {
101       u32 bi0;
102       vlib_buffer_t *b0;
103       u32 sw_if_index0;
104       u32 hash;
105       u64 hash_key;
106       per_inteface_handoff_data_t *ihd0;
107       u32 index0;
108
109       bi0 = from[0];
110       from += 1;
111       n_left_from -= 1;
112
113       b0 = vlib_get_buffer (vm, bi0);
114       sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
115       ASSERT (hm->if_data);
116       ihd0 = vec_elt_at_index (hm->if_data, sw_if_index0);
117
118       next_worker_index = hm->first_worker_index;
119
120       /*
121        * Force unknown traffic onto worker 0,
122        * and into ethernet-input. $$$$ add more hashes.
123        */
124
125       /* Compute ingress LB hash */
126       hash_key = eth_get_key ((ethernet_header_t *) b0->data);
127       hash = (u32) clib_xxhash (hash_key);
128
129       /* if input node did not specify next index, then packet
130          should go to eternet-input */
131       if (PREDICT_FALSE ((b0->flags & BUFFER_HANDOFF_NEXT_VALID) == 0))
132         vnet_buffer (b0)->handoff.next_index =
133           HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT;
134       else if (vnet_buffer (b0)->handoff.next_index ==
135                HANDOFF_DISPATCH_NEXT_IP4_INPUT
136                || vnet_buffer (b0)->handoff.next_index ==
137                HANDOFF_DISPATCH_NEXT_IP6_INPUT
138                || vnet_buffer (b0)->handoff.next_index ==
139                HANDOFF_DISPATCH_NEXT_MPLS_INPUT)
140         vlib_buffer_advance (b0, (sizeof (ethernet_header_t)));
141
142       if (PREDICT_TRUE (is_pow2 (vec_len (ihd0->workers))))
143         index0 = hash & (vec_len (ihd0->workers) - 1);
144       else
145         index0 = hash % vec_len (ihd0->workers);
146
147       next_worker_index += ihd0->workers[index0];
148
149       if (next_worker_index != current_worker_index)
150         {
151           if (hf)
152             hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
153
154           hf = vlib_get_worker_handoff_queue_elt (hm->frame_queue_index,
155                                                   next_worker_index,
156                                                   handoff_queue_elt_by_worker_index);
157
158           n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
159           to_next_worker = &hf->buffer_index[hf->n_vectors];
160           current_worker_index = next_worker_index;
161         }
162
163       /* enqueue to correct worker thread */
164       to_next_worker[0] = bi0;
165       to_next_worker++;
166       n_left_to_next_worker--;
167
168       if (n_left_to_next_worker == 0)
169         {
170           hf->n_vectors = VLIB_FRAME_SIZE;
171           vlib_put_frame_queue_elt (hf);
172           current_worker_index = ~0;
173           handoff_queue_elt_by_worker_index[next_worker_index] = 0;
174           hf = 0;
175         }
176
177       if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
178                          && (b0->flags & VLIB_BUFFER_IS_TRACED)))
179         {
180           worker_handoff_trace_t *t =
181             vlib_add_trace (vm, node, b0, sizeof (*t));
182           t->sw_if_index = sw_if_index0;
183           t->next_worker_index = next_worker_index - hm->first_worker_index;
184           t->buffer_index = bi0;
185         }
186
187     }
188
189   if (hf)
190     hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
191
192   /* Ship frames to the worker nodes */
193   for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
194     {
195       if (handoff_queue_elt_by_worker_index[i])
196         {
197           hf = handoff_queue_elt_by_worker_index[i];
198           /*
199            * It works better to let the handoff node
200            * rate-adapt, always ship the handoff queue element.
201            */
202           if (1 || hf->n_vectors == hf->last_n_vectors)
203             {
204               vlib_put_frame_queue_elt (hf);
205               handoff_queue_elt_by_worker_index[i] = 0;
206             }
207           else
208             hf->last_n_vectors = hf->n_vectors;
209         }
210       congested_handoff_queue_by_worker_index[i] =
211         (vlib_frame_queue_t *) (~0);
212     }
213   hf = 0;
214   current_worker_index = ~0;
215   return frame->n_vectors;
216 }
217
218 /* *INDENT-OFF* */
219 VLIB_REGISTER_NODE (worker_handoff_node) = {
220   .function = worker_handoff_node_fn,
221   .name = "worker-handoff",
222   .vector_size = sizeof (u32),
223   .format_trace = format_worker_handoff_trace,
224   .type = VLIB_NODE_TYPE_INTERNAL,
225
226   .n_next_nodes = 1,
227   .next_nodes = {
228     [0] = "error-drop",
229   },
230 };
231
232 VLIB_NODE_FUNCTION_MULTIARCH (worker_handoff_node, worker_handoff_node_fn)
233 /* *INDENT-ON* */
234
235 int
236 interface_handoff_enable_disable (vlib_main_t * vm, u32 sw_if_index,
237                                   uword * bitmap, int enable_disable)
238 {
239   handoff_main_t *hm = &handoff_main;
240   vnet_sw_interface_t *sw;
241   vnet_main_t *vnm = vnet_get_main ();
242   per_inteface_handoff_data_t *d;
243   int i, rv = 0;
244
245   if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
246     return VNET_API_ERROR_INVALID_SW_IF_INDEX;
247
248   sw = vnet_get_sw_interface (vnm, sw_if_index);
249   if (sw->type != VNET_SW_INTERFACE_TYPE_HARDWARE)
250     return VNET_API_ERROR_INVALID_SW_IF_INDEX;
251
252   if (clib_bitmap_last_set (bitmap) >= hm->num_workers)
253     return VNET_API_ERROR_INVALID_WORKER;
254
255   if (hm->frame_queue_index == ~0)
256     hm->frame_queue_index =
257       vlib_frame_queue_main_init (handoff_dispatch_node.index, 0);
258
259   vec_validate (hm->if_data, sw_if_index);
260   d = vec_elt_at_index (hm->if_data, sw_if_index);
261
262   vec_free (d->workers);
263   vec_free (d->workers_bitmap);
264
265   if (enable_disable)
266     {
267       d->workers_bitmap = bitmap;
268       /* *INDENT-OFF* */
269       clib_bitmap_foreach (i, bitmap,
270         ({
271           vec_add1(d->workers, i);
272         }));
273       /* *INDENT-ON* */
274     }
275
276   vnet_feature_enable_disable ("device-input", "worker-handoff",
277                                sw_if_index, enable_disable, 0, 0);
278   return rv;
279 }
280
281 static clib_error_t *
282 set_interface_handoff_command_fn (vlib_main_t * vm,
283                                   unformat_input_t * input,
284                                   vlib_cli_command_t * cmd)
285 {
286   u32 sw_if_index = ~0;
287   int enable_disable = 1;
288   uword *bitmap = 0;
289
290   int rv = 0;
291
292   while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
293     {
294       if (unformat (input, "disable"))
295         enable_disable = 0;
296       else if (unformat (input, "workers %U", unformat_bitmap_list, &bitmap))
297         ;
298       else if (unformat (input, "%U", unformat_vnet_sw_interface,
299                          vnet_get_main (), &sw_if_index))
300         ;
301       else
302         break;
303     }
304
305   if (sw_if_index == ~0)
306     return clib_error_return (0, "Please specify an interface...");
307
308   if (bitmap == 0)
309     return clib_error_return (0, "Please specify list of workers...");
310
311   rv =
312     interface_handoff_enable_disable (vm, sw_if_index, bitmap,
313                                       enable_disable);
314
315   switch (rv)
316     {
317     case 0:
318       break;
319
320     case VNET_API_ERROR_INVALID_SW_IF_INDEX:
321       return clib_error_return (0, "Invalid interface");
322       break;
323
324     case VNET_API_ERROR_INVALID_WORKER:
325       return clib_error_return (0, "Invalid worker(s)");
326       break;
327
328     case VNET_API_ERROR_UNIMPLEMENTED:
329       return clib_error_return (0,
330                                 "Device driver doesn't support redirection");
331       break;
332
333     default:
334       return clib_error_return (0, "unknown return value %d", rv);
335     }
336   return 0;
337 }
338
339 /* *INDENT-OFF* */
340 VLIB_CLI_COMMAND (set_interface_handoff_command, static) = {
341   .path = "set interface handoff",
342   .short_help =
343   "set interface handoff <interface-name> workers <workers-list>",
344   .function = set_interface_handoff_command_fn,
345 };
346 /* *INDENT-ON* */
347
348 typedef struct
349 {
350   u32 buffer_index;
351   u32 next_index;
352   u32 sw_if_index;
353 } handoff_dispatch_trace_t;
354
355 /* packet trace format function */
356 static u8 *
357 format_handoff_dispatch_trace (u8 * s, va_list * args)
358 {
359   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
360   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
361   handoff_dispatch_trace_t *t = va_arg (*args, handoff_dispatch_trace_t *);
362
363   s = format (s, "handoff-dispatch: sw_if_index %d next_index %d buffer 0x%x",
364               t->sw_if_index, t->next_index, t->buffer_index);
365   return s;
366 }
367
368 #define foreach_handoff_dispatch_error \
369 _(EXAMPLE, "example packets")
370
371 typedef enum
372 {
373 #define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym,
374   foreach_handoff_dispatch_error
375 #undef _
376     HANDOFF_DISPATCH_N_ERROR,
377 } handoff_dispatch_error_t;
378
379 static char *handoff_dispatch_error_strings[] = {
380 #define _(sym,string) string,
381   foreach_handoff_dispatch_error
382 #undef _
383 };
384
385 static uword
386 handoff_dispatch_node_fn (vlib_main_t * vm,
387                           vlib_node_runtime_t * node, vlib_frame_t * frame)
388 {
389   u32 n_left_from, *from, *to_next;
390   handoff_dispatch_next_t next_index;
391
392   from = vlib_frame_vector_args (frame);
393   n_left_from = frame->n_vectors;
394   next_index = node->cached_next_index;
395
396   while (n_left_from > 0)
397     {
398       u32 n_left_to_next;
399
400       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
401
402       while (n_left_from >= 4 && n_left_to_next >= 2)
403         {
404           u32 bi0, bi1;
405           vlib_buffer_t *b0, *b1;
406           u32 next0, next1;
407           u32 sw_if_index0, sw_if_index1;
408
409           /* Prefetch next iteration. */
410           {
411             vlib_buffer_t *p2, *p3;
412
413             p2 = vlib_get_buffer (vm, from[2]);
414             p3 = vlib_get_buffer (vm, from[3]);
415
416             vlib_prefetch_buffer_header (p2, LOAD);
417             vlib_prefetch_buffer_header (p3, LOAD);
418           }
419
420           /* speculatively enqueue b0 and b1 to the current next frame */
421           to_next[0] = bi0 = from[0];
422           to_next[1] = bi1 = from[1];
423           from += 2;
424           to_next += 2;
425           n_left_from -= 2;
426           n_left_to_next -= 2;
427
428           b0 = vlib_get_buffer (vm, bi0);
429           b1 = vlib_get_buffer (vm, bi1);
430
431           next0 = vnet_buffer (b0)->handoff.next_index;
432           next1 = vnet_buffer (b1)->handoff.next_index;
433
434           if (PREDICT_FALSE (vm->trace_main.trace_active_hint))
435             {
436               if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
437                 {
438                   vlib_trace_buffer (vm, node, next0, b0,       /* follow_chain */
439                                      0);
440                   handoff_dispatch_trace_t *t =
441                     vlib_add_trace (vm, node, b0, sizeof (*t));
442                   sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
443                   t->sw_if_index = sw_if_index0;
444                   t->next_index = next0;
445                   t->buffer_index = bi0;
446                 }
447               if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
448                 {
449                   vlib_trace_buffer (vm, node, next1, b1,       /* follow_chain */
450                                      0);
451                   handoff_dispatch_trace_t *t =
452                     vlib_add_trace (vm, node, b1, sizeof (*t));
453                   sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
454                   t->sw_if_index = sw_if_index1;
455                   t->next_index = next1;
456                   t->buffer_index = bi1;
457                 }
458             }
459
460           /* verify speculative enqueues, maybe switch current next frame */
461           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
462                                            to_next, n_left_to_next,
463                                            bi0, bi1, next0, next1);
464         }
465
466       while (n_left_from > 0 && n_left_to_next > 0)
467         {
468           u32 bi0;
469           vlib_buffer_t *b0;
470           u32 next0;
471           u32 sw_if_index0;
472
473           /* speculatively enqueue b0 to the current next frame */
474           bi0 = from[0];
475           to_next[0] = bi0;
476           from += 1;
477           to_next += 1;
478           n_left_from -= 1;
479           n_left_to_next -= 1;
480
481           b0 = vlib_get_buffer (vm, bi0);
482
483           next0 = vnet_buffer (b0)->handoff.next_index;
484
485           if (PREDICT_FALSE (vm->trace_main.trace_active_hint))
486             {
487               if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
488                 {
489                   vlib_trace_buffer (vm, node, next0, b0,       /* follow_chain */
490                                      0);
491                   handoff_dispatch_trace_t *t =
492                     vlib_add_trace (vm, node, b0, sizeof (*t));
493                   sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
494                   t->sw_if_index = sw_if_index0;
495                   t->next_index = next0;
496                   t->buffer_index = bi0;
497                 }
498             }
499
500           /* verify speculative enqueue, maybe switch current next frame */
501           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
502                                            to_next, n_left_to_next,
503                                            bi0, next0);
504         }
505
506       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
507     }
508
509   return frame->n_vectors;
510 }
511
512 /* *INDENT-OFF* */
513 VLIB_REGISTER_NODE (handoff_dispatch_node) = {
514   .function = handoff_dispatch_node_fn,
515   .name = "handoff-dispatch",
516   .vector_size = sizeof (u32),
517   .format_trace = format_handoff_dispatch_trace,
518   .type = VLIB_NODE_TYPE_INTERNAL,
519   .flags = VLIB_NODE_FLAG_IS_HANDOFF,
520
521   .n_errors = ARRAY_LEN(handoff_dispatch_error_strings),
522   .error_strings = handoff_dispatch_error_strings,
523
524   .n_next_nodes = HANDOFF_DISPATCH_N_NEXT,
525
526   .next_nodes = {
527         [HANDOFF_DISPATCH_NEXT_DROP] = "error-drop",
528         [HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT] = "ethernet-input",
529         [HANDOFF_DISPATCH_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
530         [HANDOFF_DISPATCH_NEXT_IP6_INPUT] = "ip6-input",
531         [HANDOFF_DISPATCH_NEXT_MPLS_INPUT] = "mpls-input",
532   },
533 };
534
535 VLIB_NODE_FUNCTION_MULTIARCH (handoff_dispatch_node, handoff_dispatch_node_fn)
536 /* *INDENT-ON* */
537
538 clib_error_t *
539 handoff_init (vlib_main_t * vm)
540 {
541   handoff_main_t *hm = &handoff_main;
542   vlib_thread_main_t *tm = vlib_get_thread_main ();
543   clib_error_t *error;
544   uword *p;
545
546   if ((error = vlib_call_init_function (vm, threads_init)))
547     return error;
548
549   vlib_thread_registration_t *tr;
550   /* Only the standard vnet worker threads are supported */
551   p = hash_get_mem (tm->thread_registrations_by_name, "workers");
552   if (p)
553     {
554       tr = (vlib_thread_registration_t *) p[0];
555       if (tr)
556         {
557           hm->num_workers = tr->count;
558           hm->first_worker_index = tr->first_index;
559         }
560     }
561
562   hm->vlib_main = vm;
563   hm->vnet_main = &vnet_main;
564
565   hm->frame_queue_index = ~0;
566
567   return 0;
568 }
569
570 VLIB_INIT_FUNCTION (handoff_init);
571
572 /*
573  * fd.io coding-style-patch-verification: ON
574  *
575  * Local Variables:
576  * eval: (c-set-style "gnu")
577  * End:
578  */