3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <vnet/vnet.h>
18 #include <vppinfra/xxhash.h>
19 #include <vlib/threads.h>
20 #include <vnet/handoff.h>
21 #include <vnet/feature/feature.h>
25 uword *workers_bitmap;
27 } per_inteface_handoff_data_t;
31 u32 cached_next_index;
33 u32 first_worker_index;
35 per_inteface_handoff_data_t *if_data;
37 /* convenience variables */
38 vlib_main_t *vlib_main;
39 vnet_main_t *vnet_main;
42 handoff_main_t handoff_main;
47 u32 next_worker_index;
49 } worker_handoff_trace_t;
51 /* packet trace format function */
53 format_worker_handoff_trace (u8 * s, va_list * args)
55 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
56 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
57 worker_handoff_trace_t *t = va_arg (*args, worker_handoff_trace_t *);
60 format (s, "worker-handoff: sw_if_index %d, next_worker %d, buffer 0x%x",
61 t->sw_if_index, t->next_worker_index, t->buffer_index);
65 vlib_node_registration_t handoff_node;
68 worker_handoff_node_fn (vlib_main_t * vm,
69 vlib_node_runtime_t * node, vlib_frame_t * frame)
71 handoff_main_t *hm = &handoff_main;
72 vlib_thread_main_t *tm = vlib_get_thread_main ();
73 u32 n_left_from, *from;
74 static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index;
75 static __thread vlib_frame_queue_t **congested_handoff_queue_by_worker_index
77 vlib_frame_queue_elt_t *hf = 0;
79 u32 n_left_to_next_worker = 0, *to_next_worker = 0;
80 u32 next_worker_index = 0;
81 u32 current_worker_index = ~0;
83 if (PREDICT_FALSE (handoff_queue_elt_by_worker_index == 0))
85 vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
87 vec_validate_init_empty (congested_handoff_queue_by_worker_index,
88 hm->first_worker_index + hm->num_workers - 1,
89 (vlib_frame_queue_t *) (~0));
92 from = vlib_frame_vector_args (frame);
93 n_left_from = frame->n_vectors;
95 while (n_left_from > 0)
102 per_inteface_handoff_data_t *ihd0;
109 b0 = vlib_get_buffer (vm, bi0);
110 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
111 ASSERT (hm->if_data);
112 ihd0 = vec_elt_at_index (hm->if_data, sw_if_index0);
114 next_worker_index = hm->first_worker_index;
117 * Force unknown traffic onto worker 0,
118 * and into ethernet-input. $$$$ add more hashes.
121 /* Compute ingress LB hash */
122 hash_key = eth_get_key ((ethernet_header_t *) b0->data);
123 hash = (u32) clib_xxhash (hash_key);
125 /* if input node did not specify next index, then packet
126 should go to eternet-input */
127 if (PREDICT_FALSE ((b0->flags & BUFFER_HANDOFF_NEXT_VALID) == 0))
128 vnet_buffer (b0)->handoff.next_index =
129 HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT;
130 else if (vnet_buffer (b0)->handoff.next_index ==
131 HANDOFF_DISPATCH_NEXT_IP4_INPUT
132 || vnet_buffer (b0)->handoff.next_index ==
133 HANDOFF_DISPATCH_NEXT_IP6_INPUT
134 || vnet_buffer (b0)->handoff.next_index ==
135 HANDOFF_DISPATCH_NEXT_MPLS_INPUT)
136 vlib_buffer_advance (b0, (sizeof (ethernet_header_t)));
138 if (PREDICT_TRUE (is_pow2 (vec_len (ihd0->workers))))
139 index0 = hash & (vec_len (ihd0->workers) - 1);
141 index0 = hash % vec_len (ihd0->workers);
143 next_worker_index += ihd0->workers[index0];
145 if (next_worker_index != current_worker_index)
148 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
150 hf = dpdk_get_handoff_queue_elt (next_worker_index,
151 handoff_queue_elt_by_worker_index);
153 n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
154 to_next_worker = &hf->buffer_index[hf->n_vectors];
155 current_worker_index = next_worker_index;
158 /* enqueue to correct worker thread */
159 to_next_worker[0] = bi0;
161 n_left_to_next_worker--;
163 if (n_left_to_next_worker == 0)
165 hf->n_vectors = VLIB_FRAME_SIZE;
166 vlib_put_handoff_queue_elt (hf);
167 current_worker_index = ~0;
168 handoff_queue_elt_by_worker_index[next_worker_index] = 0;
172 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
173 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
175 worker_handoff_trace_t *t =
176 vlib_add_trace (vm, node, b0, sizeof (*t));
177 t->sw_if_index = sw_if_index0;
178 t->next_worker_index = next_worker_index - hm->first_worker_index;
179 t->buffer_index = bi0;
185 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
187 /* Ship frames to the worker nodes */
188 for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
190 if (handoff_queue_elt_by_worker_index[i])
192 hf = handoff_queue_elt_by_worker_index[i];
194 * It works better to let the handoff node
195 * rate-adapt, always ship the handoff queue element.
197 if (1 || hf->n_vectors == hf->last_n_vectors)
199 vlib_put_handoff_queue_elt (hf);
200 handoff_queue_elt_by_worker_index[i] = 0;
203 hf->last_n_vectors = hf->n_vectors;
205 congested_handoff_queue_by_worker_index[i] =
206 (vlib_frame_queue_t *) (~0);
209 current_worker_index = ~0;
210 return frame->n_vectors;
214 VLIB_REGISTER_NODE (worker_handoff_node) = {
215 .function = worker_handoff_node_fn,
216 .name = "worker-handoff",
217 .vector_size = sizeof (u32),
218 .format_trace = format_worker_handoff_trace,
219 .type = VLIB_NODE_TYPE_INTERNAL,
227 VLIB_NODE_FUNCTION_MULTIARCH (worker_handoff_node, worker_handoff_node_fn)
231 interface_handoff_enable_disable (vlib_main_t * vm, u32 sw_if_index,
232 uword * bitmap, int enable_disable)
234 handoff_main_t *hm = &handoff_main;
235 vnet_sw_interface_t *sw;
236 vnet_main_t *vnm = vnet_get_main ();
237 per_inteface_handoff_data_t *d;
240 if (pool_is_free_index (vnm->interface_main.sw_interfaces, sw_if_index))
241 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
243 sw = vnet_get_sw_interface (vnm, sw_if_index);
244 if (sw->type != VNET_SW_INTERFACE_TYPE_HARDWARE)
245 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
247 if (clib_bitmap_last_set (bitmap) >= hm->num_workers)
248 return VNET_API_ERROR_INVALID_WORKER;
250 vec_validate (hm->if_data, sw_if_index);
251 d = vec_elt_at_index (hm->if_data, sw_if_index);
253 vec_free (d->workers);
254 vec_free (d->workers_bitmap);
258 d->workers_bitmap = bitmap;
260 clib_bitmap_foreach (i, bitmap,
262 vec_add1(d->workers, i);
267 vnet_feature_enable_disable ("device-input", "worker-handoff",
268 sw_if_index, enable_disable, 0, 0);
272 static clib_error_t *
273 set_interface_handoff_command_fn (vlib_main_t * vm,
274 unformat_input_t * input,
275 vlib_cli_command_t * cmd)
277 u32 sw_if_index = ~0;
278 int enable_disable = 1;
283 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
285 if (unformat (input, "disable"))
287 else if (unformat (input, "workers %U", unformat_bitmap_list, &bitmap))
289 else if (unformat (input, "%U", unformat_vnet_sw_interface,
290 vnet_get_main (), &sw_if_index))
296 if (sw_if_index == ~0)
297 return clib_error_return (0, "Please specify an interface...");
300 return clib_error_return (0, "Please specify list of workers...");
303 interface_handoff_enable_disable (vm, sw_if_index, bitmap,
311 case VNET_API_ERROR_INVALID_SW_IF_INDEX:
312 return clib_error_return (0, "Invalid interface");
315 case VNET_API_ERROR_INVALID_WORKER:
316 return clib_error_return (0, "Invalid worker(s)");
319 case VNET_API_ERROR_UNIMPLEMENTED:
320 return clib_error_return (0,
321 "Device driver doesn't support redirection");
325 return clib_error_return (0, "unknown return value %d", rv);
331 VLIB_CLI_COMMAND (set_interface_handoff_command, static) = {
332 .path = "set interface handoff",
334 "set interface handoff <interface-name> workers <workers-list>",
335 .function = set_interface_handoff_command_fn,
344 } handoff_dispatch_trace_t;
346 /* packet trace format function */
348 format_handoff_dispatch_trace (u8 * s, va_list * args)
350 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
351 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
352 handoff_dispatch_trace_t *t = va_arg (*args, handoff_dispatch_trace_t *);
354 s = format (s, "handoff-dispatch: sw_if_index %d next_index %d buffer 0x%x",
355 t->sw_if_index, t->next_index, t->buffer_index);
360 vlib_node_registration_t handoff_dispatch_node;
362 #define foreach_handoff_dispatch_error \
363 _(EXAMPLE, "example packets")
367 #define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym,
368 foreach_handoff_dispatch_error
370 HANDOFF_DISPATCH_N_ERROR,
371 } handoff_dispatch_error_t;
373 static char *handoff_dispatch_error_strings[] = {
374 #define _(sym,string) string,
375 foreach_handoff_dispatch_error
380 handoff_dispatch_node_fn (vlib_main_t * vm,
381 vlib_node_runtime_t * node, vlib_frame_t * frame)
383 u32 n_left_from, *from, *to_next;
384 handoff_dispatch_next_t next_index;
386 from = vlib_frame_vector_args (frame);
387 n_left_from = frame->n_vectors;
388 next_index = node->cached_next_index;
390 while (n_left_from > 0)
394 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
396 while (n_left_from >= 4 && n_left_to_next >= 2)
399 vlib_buffer_t *b0, *b1;
401 u32 sw_if_index0, sw_if_index1;
403 /* Prefetch next iteration. */
405 vlib_buffer_t *p2, *p3;
407 p2 = vlib_get_buffer (vm, from[2]);
408 p3 = vlib_get_buffer (vm, from[3]);
410 vlib_prefetch_buffer_header (p2, LOAD);
411 vlib_prefetch_buffer_header (p3, LOAD);
414 /* speculatively enqueue b0 and b1 to the current next frame */
415 to_next[0] = bi0 = from[0];
416 to_next[1] = bi1 = from[1];
422 b0 = vlib_get_buffer (vm, bi0);
423 b1 = vlib_get_buffer (vm, bi1);
425 next0 = vnet_buffer (b0)->handoff.next_index;
426 next1 = vnet_buffer (b1)->handoff.next_index;
428 if (PREDICT_FALSE (vm->trace_main.trace_active_hint))
430 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
432 vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */
434 handoff_dispatch_trace_t *t =
435 vlib_add_trace (vm, node, b0, sizeof (*t));
436 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
437 t->sw_if_index = sw_if_index0;
438 t->next_index = next0;
439 t->buffer_index = bi0;
441 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
443 vlib_trace_buffer (vm, node, next1, b1, /* follow_chain */
445 handoff_dispatch_trace_t *t =
446 vlib_add_trace (vm, node, b1, sizeof (*t));
447 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
448 t->sw_if_index = sw_if_index1;
449 t->next_index = next1;
450 t->buffer_index = bi1;
454 /* verify speculative enqueues, maybe switch current next frame */
455 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
456 to_next, n_left_to_next,
457 bi0, bi1, next0, next1);
460 while (n_left_from > 0 && n_left_to_next > 0)
467 /* speculatively enqueue b0 to the current next frame */
475 b0 = vlib_get_buffer (vm, bi0);
477 next0 = vnet_buffer (b0)->handoff.next_index;
479 if (PREDICT_FALSE (vm->trace_main.trace_active_hint))
481 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
483 vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */
485 handoff_dispatch_trace_t *t =
486 vlib_add_trace (vm, node, b0, sizeof (*t));
487 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
488 t->sw_if_index = sw_if_index0;
489 t->next_index = next0;
490 t->buffer_index = bi0;
494 /* verify speculative enqueue, maybe switch current next frame */
495 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
496 to_next, n_left_to_next,
500 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
503 return frame->n_vectors;
507 VLIB_REGISTER_NODE (handoff_dispatch_node) = {
508 .function = handoff_dispatch_node_fn,
509 .name = "handoff-dispatch",
510 .vector_size = sizeof (u32),
511 .format_trace = format_handoff_dispatch_trace,
512 .type = VLIB_NODE_TYPE_INTERNAL,
513 .flags = VLIB_NODE_FLAG_IS_HANDOFF,
515 .n_errors = ARRAY_LEN(handoff_dispatch_error_strings),
516 .error_strings = handoff_dispatch_error_strings,
518 .n_next_nodes = HANDOFF_DISPATCH_N_NEXT,
521 [HANDOFF_DISPATCH_NEXT_DROP] = "error-drop",
522 [HANDOFF_DISPATCH_NEXT_ETHERNET_INPUT] = "ethernet-input",
523 [HANDOFF_DISPATCH_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
524 [HANDOFF_DISPATCH_NEXT_IP6_INPUT] = "ip6-input",
525 [HANDOFF_DISPATCH_NEXT_MPLS_INPUT] = "mpls-input",
529 VLIB_NODE_FUNCTION_MULTIARCH (handoff_dispatch_node, handoff_dispatch_node_fn)
533 handoff_init (vlib_main_t * vm)
535 handoff_main_t *hm = &handoff_main;
536 vlib_thread_main_t *tm = vlib_get_thread_main ();
540 if ((error = vlib_call_init_function (vm, threads_init)))
543 vlib_thread_registration_t *tr;
544 /* Only the standard vnet worker threads are supported */
545 p = hash_get_mem (tm->thread_registrations_by_name, "workers");
548 tr = (vlib_thread_registration_t *) p[0];
551 hm->num_workers = tr->count;
552 hm->first_worker_index = tr->first_index;
557 hm->vnet_main = &vnet_main;
559 ASSERT (tm->handoff_dispatch_node_index == ~0);
560 tm->handoff_dispatch_node_index = handoff_dispatch_node.index;
565 VLIB_INIT_FUNCTION (handoff_init);
568 * fd.io coding-style-patch-verification: ON
571 * eval: (c-set-style "gnu")