2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/l2/l2_xcrw.h>
19 * General L2 / L3 cross-connect, used to set up
20 * "L2 interface <--> your-favorite-tunnel-encap" tunnels.
22 * We set up a typical L2 cross-connect or (future) bridge
23 * to hook L2 interface(s) up to the L3 stack in arbitrary ways.
25 * Each l2_xcrw adjacency specifies 3 things:
27 * 1. The next graph node (presumably in the L3 stack) to
28 * process the (L2 -> L3) packet
30 * 2. A new value for vnet_buffer(b)->sw_if_index[VLIB_TX]
31 * (i.e. a lookup FIB index),
33 * 3. A rewrite string to apply.
35 * Example: to cross-connect an L2 interface or (future) bridge
36 * to an mpls-o-gre tunnel, set up the L2 rewrite string as shown in
37 * mpls_gre_rewrite, and use "mpls-post-rewrite" to fix the
38 * GRE IP header checksum and length fields.
47 /* packet trace format function */
49 format_l2_xcrw_trace (u8 * s, va_list * args)
51 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
52 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53 l2_xcrw_trace_t *t = va_arg (*args, l2_xcrw_trace_t *);
55 s = format (s, "L2_XCRW: next index %d tx_fib_index %d",
56 t->next_index, t->tx_fib_index);
60 extern l2_xcrw_main_t l2_xcrw_main;
62 #ifndef CLIB_MARCH_VARIANT
63 l2_xcrw_main_t l2_xcrw_main;
64 #endif /* CLIB_MARCH_VARIANT */
66 static char *l2_xcrw_error_strings[] = {
67 #define _(sym,string) string,
72 VLIB_NODE_FN (l2_xcrw_node) (vlib_main_t * vm,
73 vlib_node_runtime_t * node, vlib_frame_t * frame)
75 u32 n_left_from, *from, *to_next;
76 l2_xcrw_next_t next_index;
77 l2_xcrw_main_t *xcm = &l2_xcrw_main;
78 vlib_node_t *n = vlib_get_node (vm, l2_xcrw_node.index);
79 u32 node_counter_base_index = n->error_heap_index;
80 vlib_error_main_t *em = &vm->error_main;
82 from = vlib_frame_vector_args (frame);
83 n_left_from = frame->n_vectors;
84 next_index = node->cached_next_index;
86 while (n_left_from > 0)
90 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
92 while (n_left_from >= 4 && n_left_to_next >= 2)
95 vlib_buffer_t *b0, *b1;
97 u32 sw_if_index0, sw_if_index1;
98 l2_xcrw_adjacency_t *adj0, *adj1;
100 /* Prefetch next iteration. */
102 vlib_buffer_t *p2, *p3;
104 p2 = vlib_get_buffer (vm, from[2]);
105 p3 = vlib_get_buffer (vm, from[3]);
107 vlib_prefetch_buffer_header (p2, LOAD);
108 vlib_prefetch_buffer_header (p3, LOAD);
110 clib_prefetch_store (p2->data);
111 clib_prefetch_store (p3->data);
114 /* speculatively enqueue b0 and b1 to the current next frame */
115 to_next[0] = bi0 = from[0];
116 to_next[1] = bi1 = from[1];
122 b0 = vlib_get_buffer (vm, bi0);
123 b1 = vlib_get_buffer (vm, bi1);
125 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
126 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
128 adj0 = vec_elt_at_index (xcm->adj_by_sw_if_index, sw_if_index0);
129 adj1 = vec_elt_at_index (xcm->adj_by_sw_if_index, sw_if_index1);
131 next0 = adj0->rewrite_header.next_index;
132 vnet_buffer (b0)->sw_if_index[VLIB_TX] =
133 adj0->rewrite_header.sw_if_index;
135 next1 = adj1->rewrite_header.next_index;
136 vnet_buffer (b1)->sw_if_index[VLIB_TX] =
137 adj1->rewrite_header.sw_if_index;
139 em->counters[node_counter_base_index + next1]++;
141 if (PREDICT_TRUE (next0 > 0))
143 u8 *h0 = vlib_buffer_get_current (b0);
144 vnet_rewrite_one_header (adj0[0], h0,
145 adj0->rewrite_header.data_bytes);
146 vlib_buffer_advance (b0, -adj0->rewrite_header.data_bytes);
147 em->counters[node_counter_base_index + L2_XCRW_ERROR_FWD]++;
150 if (PREDICT_TRUE (next1 > 0))
152 u8 *h1 = vlib_buffer_get_current (b1);
153 vnet_rewrite_one_header (adj1[0], h1,
154 adj1->rewrite_header.data_bytes);
155 vlib_buffer_advance (b1, -adj1->rewrite_header.data_bytes);
156 em->counters[node_counter_base_index + L2_XCRW_ERROR_FWD]++;
160 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
162 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
163 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
166 vlib_add_trace (vm, node, b0, sizeof (*t));
167 t->next_index = next0;
168 t->tx_fib_index = adj0->rewrite_header.sw_if_index;
170 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
171 && (b1->flags & VLIB_BUFFER_IS_TRACED)))
174 vlib_add_trace (vm, node, b1, sizeof (*t));
175 t->next_index = next1;
176 t->tx_fib_index = adj1->rewrite_header.sw_if_index;
180 /* verify speculative enqueues, maybe switch current next frame */
181 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
182 to_next, n_left_to_next,
183 bi0, bi1, next0, next1);
186 while (n_left_from > 0 && n_left_to_next > 0)
192 l2_xcrw_adjacency_t *adj0;
194 /* speculatively enqueue b0 to the current next frame */
202 b0 = vlib_get_buffer (vm, bi0);
204 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
206 adj0 = vec_elt_at_index (xcm->adj_by_sw_if_index, sw_if_index0);
208 next0 = adj0->rewrite_header.next_index;
209 vnet_buffer (b0)->sw_if_index[VLIB_TX] =
210 adj0->rewrite_header.sw_if_index;
212 if (PREDICT_TRUE (next0 > 0))
214 u8 *h0 = vlib_buffer_get_current (b0);
215 vnet_rewrite_one_header (adj0[0], h0,
216 adj0->rewrite_header.data_bytes);
217 vlib_buffer_advance (b0, -adj0->rewrite_header.data_bytes);
218 em->counters[node_counter_base_index + L2_XCRW_ERROR_FWD]++;
221 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
222 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
224 l2_xcrw_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
225 t->next_index = next0;
226 t->tx_fib_index = adj0->rewrite_header.sw_if_index;
229 /* verify speculative enqueue, maybe switch current next frame */
230 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
231 to_next, n_left_to_next,
235 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
238 return frame->n_vectors;
241 VLIB_REGISTER_NODE (l2_xcrw_node) = {
243 .vector_size = sizeof (u32),
244 .format_trace = format_l2_xcrw_trace,
245 .type = VLIB_NODE_TYPE_INTERNAL,
247 .n_errors = ARRAY_LEN(l2_xcrw_error_strings),
248 .error_strings = l2_xcrw_error_strings,
250 .n_next_nodes = L2_XCRW_N_NEXT,
252 /* edit / add dispositions here */
254 [L2_XCRW_NEXT_DROP] = "error-drop",
258 #ifndef CLIB_MARCH_VARIANT
260 l2_xcrw_init (vlib_main_t * vm)
262 l2_xcrw_main_t *mp = &l2_xcrw_main;
265 mp->vnet_main = &vnet_main;
266 mp->tunnel_index_by_l2_sw_if_index = hash_create (0, sizeof (uword));
271 VLIB_INIT_FUNCTION (l2_xcrw_init);
274 format_xcrw_name (u8 * s, va_list * args)
276 u32 dev_instance = va_arg (*args, u32);
277 return format (s, "xcrw%d", dev_instance);
280 VNET_DEVICE_CLASS (xcrw_device_class,static) = {
282 .format_device_name = format_xcrw_name,
285 /* Create a sham tunnel interface and return its sw_if_index */
287 create_xcrw_interface (vlib_main_t * vm)
289 vnet_main_t *vnm = vnet_get_main ();
290 vnet_eth_interface_registration_t eir = {};
294 vnet_hw_interface_t *hi;
297 /* mac address doesn't really matter */
298 clib_memset (address, 0, sizeof (address));
301 eir.dev_class_index = xcrw_device_class.index;
302 eir.dev_instance = instance++, eir.address = address;
303 hw_if_index = vnet_eth_register_interface (vnm, &eir);
305 hi = vnet_get_hw_interface (vnm, hw_if_index);
306 sw_if_index = hi->sw_if_index;
307 vnet_sw_interface_set_flags (vnm, sw_if_index,
308 VNET_SW_INTERFACE_FLAG_ADMIN_UP);
310 /* Output to the sham tunnel invokes the encap node */
311 hi->output_node_index = l2_xcrw_node.index;
317 vnet_configure_l2_xcrw (vlib_main_t * vm, vnet_main_t * vnm,
318 u32 l2_sw_if_index, u32 tx_fib_index,
319 u8 * rewrite, u32 next_node_index, int is_add)
321 l2_xcrw_main_t *xcm = &l2_xcrw_main;
322 l2_xcrw_adjacency_t *a;
329 pool_get (xcm->tunnels, t);
331 /* No interface allocated? Do it. Otherwise, set admin up */
332 if (t->tunnel_sw_if_index == 0)
333 t->tunnel_sw_if_index = create_xcrw_interface (vm);
335 vnet_sw_interface_set_flags (vnm, t->tunnel_sw_if_index,
336 VNET_SW_INTERFACE_FLAG_ADMIN_UP);
338 t->l2_sw_if_index = l2_sw_if_index;
340 vec_validate (xcm->adj_by_sw_if_index, t->l2_sw_if_index);
342 a = vec_elt_at_index (xcm->adj_by_sw_if_index, t->l2_sw_if_index);
343 clib_memset (a, 0, sizeof (*a));
345 a->rewrite_header.sw_if_index = tx_fib_index;
348 * Add or find a dynamic disposition for the successor node,
349 * e.g. so we can ship pkts to mpls_post_rewrite...
351 a->rewrite_header.next_index =
352 vlib_node_add_next (vm, l2_xcrw_node.index, next_node_index);
354 if (vec_len (rewrite))
355 vnet_rewrite_set_data (a[0], rewrite, vec_len (rewrite));
357 set_int_l2_mode (vm, vnm, MODE_L2_XC, t->l2_sw_if_index, 0,
358 L2_BD_PORT_TYPE_NORMAL, 0, t->tunnel_sw_if_index);
359 hash_set (xcm->tunnel_index_by_l2_sw_if_index,
360 t->l2_sw_if_index, t - xcm->tunnels);
365 p = hash_get (xcm->tunnel_index_by_l2_sw_if_index, l2_sw_if_index);
367 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
369 t = pool_elt_at_index (xcm->tunnels, p[0]);
371 a = vec_elt_at_index (xcm->adj_by_sw_if_index, t->l2_sw_if_index);
372 /* Reset adj to drop traffic */
373 clib_memset (a, 0, sizeof (*a));
375 set_int_l2_mode (vm, vnm, MODE_L3, t->l2_sw_if_index, 0,
376 L2_BD_PORT_TYPE_NORMAL, 0, 0);
378 vnet_sw_interface_set_flags (vnm, t->tunnel_sw_if_index, 0 /* down */ );
380 hash_unset (xcm->tunnel_index_by_l2_sw_if_index, l2_sw_if_index);
381 pool_put (xcm->tunnels, t);
387 static clib_error_t *
388 set_l2_xcrw_command_fn (vlib_main_t * vm,
389 unformat_input_t * input, vlib_cli_command_t * cmd)
391 unformat_input_t _line_input, *line_input = &_line_input;
393 int is_ipv6 = 0; /* for fib id -> fib index mapping */
395 u32 tx_fib_index = ~0;
396 u32 next_node_index = ~0;
399 vnet_main_t *vnm = vnet_get_main ();
401 clib_error_t *error = NULL;
404 if (!unformat_user (input, unformat_line_input, line_input))
407 if (!unformat (line_input, "%U",
408 unformat_vnet_sw_interface, vnm, &l2_sw_if_index))
410 error = clib_error_return (0, "unknown input '%U'",
411 format_unformat_error, line_input);
415 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
417 if (unformat (line_input, "next %U",
418 unformat_vlib_node, vm, &next_node_index))
420 else if (unformat (line_input, "tx-fib-id %d", &tx_fib_id))
422 else if (unformat (line_input, "del"))
424 else if (unformat (line_input, "ipv6"))
426 else if (unformat (line_input, "rw %U", unformat_hex_string, &rw));
431 if (next_node_index == ~0)
433 error = clib_error_return (0, "next node not specified");
442 p = hash_get (ip6_main.fib_index_by_table_id, tx_fib_id);
444 p = hash_get (ip4_main.fib_index_by_table_id, tx_fib_id);
449 clib_error_return (0, "nonexistent tx_fib_id %d", tx_fib_id);
456 rv = vnet_configure_l2_xcrw (vm, vnm, l2_sw_if_index, tx_fib_index,
457 rw, next_node_index, is_add);
465 case VNET_API_ERROR_INVALID_SW_IF_INDEX:
466 error = clib_error_return (0, "%U not cross-connected",
467 format_vnet_sw_if_index_name,
468 vnm, l2_sw_if_index);
472 error = clib_error_return (0, "vnet_configure_l2_xcrw returned %d", rv);
478 unformat_free (line_input);
484 * Add or delete a Layer 2 to Layer 3 rewrite cross-connect. This is
485 * used to hook Layer 2 interface(s) up to the Layer 3 stack in
486 * arbitrary ways. For example, cross-connect an L2 interface or
487 * (future) bridge to an mpls-o-gre tunnel. Set up the L2 rewrite
488 * string as shown in mpls_gre_rewrite, and use \"mpls-post-rewrite\"
489 * to fix the GRE IP header checksum and length fields.
492 * @todo This is incomplete. This needs a detailed description and a
495 VLIB_CLI_COMMAND (set_l2_xcrw_command, static) = {
496 .path = "set interface l2 xcrw",
498 "set interface l2 xcrw <interface> next <node-name>\n"
499 " [del] [tx-fib-id <id>] [ipv6] rw <hex-bytes>",
500 .function = set_l2_xcrw_command_fn,
503 #endif /* CLIB_MARCH_VARIANT */
506 format_l2xcrw (u8 * s, va_list * args)
508 vnet_main_t *vnm = va_arg (*args, vnet_main_t *);
509 l2_xcrw_tunnel_t *t = va_arg (*args, l2_xcrw_tunnel_t *);
510 l2_xcrw_main_t *xcm = &l2_xcrw_main;
511 vlib_main_t *vm = vlib_get_main ();
512 l2_xcrw_adjacency_t *a;
517 s = format (s, "%-25s%s", "L2 interface", "Tunnel Details");
521 s = format (s, "%-25U %U ",
522 format_vnet_sw_if_index_name, vnm, t->l2_sw_if_index,
523 format_vnet_sw_if_index_name, vnm, t->tunnel_sw_if_index);
525 a = vec_elt_at_index (xcm->adj_by_sw_if_index, t->l2_sw_if_index);
527 s = format (s, "next %U ",
528 format_vlib_next_node_name, vm, l2_xcrw_node.index,
529 a->rewrite_header.next_index);
531 if (a->rewrite_header.sw_if_index != ~0)
532 s = format (s, "tx fib index %d ", a->rewrite_header.sw_if_index);
534 if (a->rewrite_header.data_bytes)
536 rewrite_string = (u8 *) (a + 1);
537 rewrite_string -= a->rewrite_header.data_bytes;
538 s = format (s, "rewrite data: %U ",
539 format_hex_bytes, rewrite_string,
540 a->rewrite_header.data_bytes);
543 s = format (s, "\n");
549 static clib_error_t *
550 show_l2xcrw_command_fn (vlib_main_t * vm,
551 unformat_input_t * input, vlib_cli_command_t * cmd)
553 vnet_main_t *vnm = vnet_get_main ();
554 l2_xcrw_main_t *xcm = &l2_xcrw_main;
557 if (pool_elts (xcm->tunnels) == 0)
559 vlib_cli_output (vm, "No L2 / L3 rewrite cross-connects configured");
563 vlib_cli_output (vm, "%U", format_l2xcrw, 0, 0);
565 pool_foreach (t, xcm->tunnels)
567 vlib_cli_output (vm, "%U", format_l2xcrw, vnm, t);
574 * Display a Layer 2 to Layer 3 rewrite cross-connect. This is used to
575 * hook Layer 2 interface(s) up to the Layer 3 stack in arbitrary ways.
577 * @todo This is incomplete. This needs a detailed description and a
580 VLIB_CLI_COMMAND (show_l2xcrw_command, static) = {
581 .path = "show l2xcrw",
582 .short_help = "show l2xcrw",
583 .function = show_l2xcrw_command_fn,
587 * fd.io coding-style-patch-verification: ON
590 * eval: (c-set-style "gnu")