2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/dpo/dvr_dpo.h>
17 #include <vnet/fib/fib_node.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
22 * The 'DB' of DVR DPOs.
23 * There is one per-interface per-L3 proto, so this is a per-interface vector
25 static index_t *dvr_dpo_db[DPO_PROTO_NUM];
32 pool_get(dvr_dpo_pool, dd);
37 static inline dvr_dpo_t *
38 dvr_dpo_get_from_dpo (const dpo_id_t *dpo)
40 ASSERT(DPO_DVR == dpo->dpoi_type);
42 return (dvr_dpo_get(dpo->dpoi_index));
46 dvr_dpo_get_index (dvr_dpo_t *dd)
48 return (dd - dvr_dpo_pool);
52 dvr_dpo_lock (dpo_id_t *dpo)
56 dd = dvr_dpo_get_from_dpo(dpo);
61 dvr_dpo_unlock (dpo_id_t *dpo)
65 dd = dvr_dpo_get_from_dpo(dpo);
68 if (0 == dd->dd_locks)
70 if (DPO_PROTO_IP4 == dd->dd_proto)
72 vnet_feature_enable_disable ("ip4-output", "ip4-dvr-reinject",
73 dd->dd_sw_if_index, 0, 0, 0);
77 vnet_feature_enable_disable ("ip6-output", "ip6-dvr-reinject",
78 dd->dd_sw_if_index, 0, 0, 0);
81 dvr_dpo_db[dd->dd_proto][dd->dd_sw_if_index] = INDEX_INVALID;
82 pool_put(dvr_dpo_pool, dd);
87 dvr_dpo_add_or_lock (u32 sw_if_index,
93 vec_validate_init_empty(dvr_dpo_db[dproto],
97 if (INDEX_INVALID == dvr_dpo_db[dproto][sw_if_index])
101 dd->dd_sw_if_index = sw_if_index;
102 dd->dd_proto = dproto;
104 dvr_dpo_db[dproto][sw_if_index] = dvr_dpo_get_index(dd);
107 * enable the reinject into L2 path feature on the interface
109 if (DPO_PROTO_IP4 == dproto)
110 vnet_feature_enable_disable ("ip4-output", "ip4-dvr-reinject",
111 dd->dd_sw_if_index, 1, 0, 0);
112 else if (DPO_PROTO_IP6 == dproto)
113 vnet_feature_enable_disable ("ip6-output", "ip6-dvr-reinject",
114 dd->dd_sw_if_index, 1, 0, 0);
120 dd = dvr_dpo_get(dvr_dpo_db[dproto][sw_if_index]);
123 dpo_set(dpo, DPO_DVR, dproto, dvr_dpo_get_index(dd));
127 static clib_error_t *
128 dvr_dpo_interface_state_change (vnet_main_t * vnm,
137 VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(
138 dvr_dpo_interface_state_change);
141 * @brief Registered callback for HW interface state changes
143 static clib_error_t *
144 dvr_dpo_hw_interface_state_change (vnet_main_t * vnm,
151 VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(
152 dvr_dpo_hw_interface_state_change);
154 static clib_error_t *
155 dvr_dpo_interface_delete (vnet_main_t * vnm,
162 VNET_SW_INTERFACE_ADD_DEL_FUNCTION(
163 dvr_dpo_interface_delete);
166 format_dvr_dpo (u8* s, va_list *ap)
168 index_t index = va_arg(*ap, index_t);
169 CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
170 vnet_main_t * vnm = vnet_get_main();
171 dvr_dpo_t *dd = dvr_dpo_get(index);
173 return (format(s, "dvr-%U-dpo",
174 format_vnet_sw_interface_name,
176 vnet_get_sw_interface(vnm, dd->dd_sw_if_index)));
180 dvr_dpo_mem_show (void)
182 fib_show_memory_usage("DVR",
183 pool_elts(dvr_dpo_pool),
184 pool_len(dvr_dpo_pool),
189 const static dpo_vft_t dvr_dpo_vft = {
190 .dv_lock = dvr_dpo_lock,
191 .dv_unlock = dvr_dpo_unlock,
192 .dv_format = format_dvr_dpo,
193 .dv_mem_show = dvr_dpo_mem_show,
197 * @brief The per-protocol VLIB graph nodes that are assigned to a glean
200 * this means that these graph nodes are ones from which a glean is the
201 * parent object in the DPO-graph.
203 const static char* const dvr_dpo_ip4_nodes[] =
208 const static char* const dvr_dpo_ip6_nodes[] =
214 const static char* const * const dvr_dpo_nodes[DPO_PROTO_NUM] =
216 [DPO_PROTO_IP4] = dvr_dpo_ip4_nodes,
217 [DPO_PROTO_IP6] = dvr_dpo_ip6_nodes,
221 dvr_dpo_module_init (void)
223 dpo_register(DPO_DVR,
229 * @brief Interface DPO trace data
231 typedef struct dvr_dpo_trace_t_
237 dvr_dpo_inline (vlib_main_t * vm,
238 vlib_node_runtime_t * node,
239 vlib_frame_t * from_frame,
242 u32 n_left_from, next_index, * from, * to_next;
243 ip_lookup_main_t *lm = (is_ip6?
244 &ip6_main.lookup_main:
245 &ip4_main.lookup_main);
247 from = vlib_frame_vector_args (from_frame);
248 n_left_from = from_frame->n_vectors;
250 next_index = node->cached_next_index;
252 while (n_left_from > 0)
256 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
258 while (n_left_from >= 4 && n_left_to_next > 2)
260 const dvr_dpo_t *dd0, *dd1;
261 u32 bi0, ddi0, bi1, ddi1;
262 vlib_buffer_t *b0, *b1;
276 b0 = vlib_get_buffer (vm, bi0);
277 b1 = vlib_get_buffer (vm, bi1);
279 ddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
280 ddi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
281 dd0 = dvr_dpo_get(ddi0);
282 dd1 = dvr_dpo_get(ddi1);
284 vnet_buffer(b0)->sw_if_index[VLIB_TX] = dd0->dd_sw_if_index;
285 vnet_buffer(b1)->sw_if_index[VLIB_TX] = dd1->dd_sw_if_index;
287 len0 = ((u8*)vlib_buffer_get_current(b0) -
288 (u8*)ethernet_buffer_get_header(b0));
289 len1 = ((u8*)vlib_buffer_get_current(b1) -
290 (u8*)ethernet_buffer_get_header(b1));
291 vnet_buffer(b0)->l2.l2_len = len0;
292 vnet_buffer(b1)->l2.l2_len = len1;
293 b0->flags |= VNET_BUFFER_F_IS_DVR;
294 b1->flags |= VNET_BUFFER_F_IS_DVR;
296 vlib_buffer_advance(b0, -len0);
297 vlib_buffer_advance(b1, -len1);
299 vnet_feature_arc_start (lm->output_feature_arc_index,
300 dd0->dd_sw_if_index, &next0, b0);
301 vnet_feature_arc_start (lm->output_feature_arc_index,
302 dd1->dd_sw_if_index, &next1, b1);
304 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
306 dvr_dpo_trace_t *tr0;
308 tr0 = vlib_add_trace (vm, node, b0, sizeof (*tr0));
309 tr0->sw_if_index = dd0->dd_sw_if_index;
311 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
313 dvr_dpo_trace_t *tr1;
315 tr1 = vlib_add_trace (vm, node, b1, sizeof (*tr1));
316 tr1->sw_if_index = dd1->dd_sw_if_index;
319 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
320 n_left_to_next, bi0, bi1,
324 while (n_left_from > 0 && n_left_to_next > 0)
326 const dvr_dpo_t * dd0;
340 b0 = vlib_get_buffer (vm, bi0);
342 ddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
343 dd0 = dvr_dpo_get(ddi0);
345 vnet_buffer(b0)->sw_if_index[VLIB_TX] = dd0->dd_sw_if_index;
348 * take that, rewind it back...
350 len0 = ((u8*)vlib_buffer_get_current(b0) -
351 (u8*)ethernet_buffer_get_header(b0));
352 vnet_buffer(b0)->l2.l2_len = len0;
353 b0->flags |= VNET_BUFFER_F_IS_DVR;
354 vlib_buffer_advance(b0, -len0);
357 * start processing the ipX output features
359 vnet_feature_arc_start(lm->output_feature_arc_index,
360 dd0->dd_sw_if_index, &next0, b0);
362 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
366 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
367 tr->sw_if_index = dd0->dd_sw_if_index;
370 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
374 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
376 return from_frame->n_vectors;
380 format_dvr_dpo_trace (u8 * s, va_list * args)
382 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
383 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
384 dvr_dpo_trace_t * t = va_arg (*args, dvr_dpo_trace_t *);
385 u32 indent = format_get_indent (s);
386 s = format (s, "%U sw_if_index:%d",
387 format_white_space, indent,
393 ip4_dvr_dpo (vlib_main_t * vm,
394 vlib_node_runtime_t * node,
395 vlib_frame_t * from_frame)
397 return (dvr_dpo_inline(vm, node, from_frame, 0));
401 ip6_dvr_dpo (vlib_main_t * vm,
402 vlib_node_runtime_t * node,
403 vlib_frame_t * from_frame)
405 return (dvr_dpo_inline(vm, node, from_frame, 1));
408 VLIB_REGISTER_NODE (ip4_dvr_dpo_node) = {
409 .function = ip4_dvr_dpo,
410 .name = "ip4-dvr-dpo",
411 .vector_size = sizeof (u32),
412 .format_trace = format_dvr_dpo_trace,
413 .sibling_of = "ip4-rewrite",
415 VLIB_REGISTER_NODE (ip6_dvr_dpo_node) = {
416 .function = ip6_dvr_dpo,
417 .name = "ip6-dvr-dpo",
418 .vector_size = sizeof (u32),
419 .format_trace = format_dvr_dpo_trace,
420 .sibling_of = "ip6-rewrite",
423 VLIB_NODE_FUNCTION_MULTIARCH (ip4_dvr_dpo_node, ip4_dvr_dpo)
424 VLIB_NODE_FUNCTION_MULTIARCH (ip6_dvr_dpo_node, ip6_dvr_dpo)
426 typedef enum dvr_reinject_next_t_
428 DVR_REINJECT_OUTPUT = 0,
429 } dvr_reinject_next_t;
432 dvr_reinject_inline (vlib_main_t * vm,
433 vlib_node_runtime_t * node,
434 vlib_frame_t * from_frame)
436 u32 n_left_from, next_index, * from, * to_next;
438 from = vlib_frame_vector_args (from_frame);
439 n_left_from = from_frame->n_vectors;
441 next_index = node->cached_next_index;
443 while (n_left_from > 0)
447 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
449 while (n_left_from >= 4 && n_left_to_next > 2)
451 dvr_reinject_next_t next0, next1;
452 vlib_buffer_t *b0, *b1;
464 b0 = vlib_get_buffer (vm, bi0);
465 b1 = vlib_get_buffer (vm, bi1);
467 if (b0->flags & VNET_BUFFER_F_IS_DVR)
468 next0 = DVR_REINJECT_OUTPUT;
470 vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_TX],
473 if (b1->flags & VNET_BUFFER_F_IS_DVR)
474 next1 = DVR_REINJECT_OUTPUT;
476 vnet_feature_next(vnet_buffer(b1)->sw_if_index[VLIB_TX],
479 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
481 dvr_dpo_trace_t *tr0;
483 tr0 = vlib_add_trace (vm, node, b0, sizeof (*tr0));
484 tr0->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
486 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
488 dvr_dpo_trace_t *tr1;
490 tr1 = vlib_add_trace (vm, node, b1, sizeof (*tr1));
491 tr1->sw_if_index = vnet_buffer(b1)->sw_if_index[VLIB_TX];
494 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
495 n_left_to_next, bi0, bi1,
499 while (n_left_from > 0 && n_left_to_next > 0)
501 dvr_reinject_next_t next0;
512 b0 = vlib_get_buffer (vm, bi0);
514 if (b0->flags & VNET_BUFFER_F_IS_DVR)
515 next0 = DVR_REINJECT_OUTPUT;
517 vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_TX],
520 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
524 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
525 tr->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_TX];
528 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
529 n_left_to_next, bi0, next0);
531 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
533 return from_frame->n_vectors;
537 ip4_dvr_reinject (vlib_main_t * vm,
538 vlib_node_runtime_t * node,
539 vlib_frame_t * from_frame)
541 return (dvr_reinject_inline(vm, node, from_frame));
545 ip6_dvr_reinject (vlib_main_t * vm,
546 vlib_node_runtime_t * node,
547 vlib_frame_t * from_frame)
549 return (dvr_reinject_inline(vm, node, from_frame));
552 VLIB_REGISTER_NODE (ip4_dvr_reinject_node) = {
553 .function = ip4_dvr_reinject,
554 .name = "ip4-dvr-reinject",
555 .vector_size = sizeof (u32),
556 .format_trace = format_dvr_dpo_trace,
560 [DVR_REINJECT_OUTPUT] = "l2-output",
564 VLIB_REGISTER_NODE (ip6_dvr_reinject_node) = {
565 .function = ip6_dvr_reinject,
566 .name = "ip6-dvr-reinject",
567 .vector_size = sizeof (u32),
568 .format_trace = format_dvr_dpo_trace,
572 [DVR_REINJECT_OUTPUT] = "l2-output",
576 VNET_FEATURE_INIT (ip4_dvr_reinject_feat_node, static) =
578 .arc_name = "ip4-output",
579 .node_name = "ip4-dvr-reinject",
580 .runs_after = VNET_FEATURES ("nat44-in2out-output",
581 "acl-plugin-out-ip4-fa"),
583 VNET_FEATURE_INIT (ip6_dvr_reinject_feat_node, static) =
585 .arc_name = "ip6-output",
586 .node_name = "ip6-dvr-reinject",
587 .runs_after = VNET_FEATURES ("acl-plugin-out-ip6-fa"),
590 VLIB_NODE_FUNCTION_MULTIARCH (ip4_dvr_reinject_node, ip4_dvr_reinject)
591 VLIB_NODE_FUNCTION_MULTIARCH (ip6_dvr_reinject_node, ip6_dvr_reinject)