2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vlib/vlib.h>
16 #include <vnet/vnet.h>
17 #include <vnet/pg/pg.h>
18 #include <vnet/ethernet/ethernet.h>
19 #include <vnet/feature/feature.h>
20 #include <vppinfra/error.h>
24 /* vector of dispositions, indexed by rx_sw_if_index */
25 u32 *tx_next_by_rx_sw_if_index;
26 u32 *tx_sw_if_index_by_rx_sw_if_index;
28 /* convenience variables */
29 vlib_main_t *vlib_main;
30 vnet_main_t *vnet_main;
39 /* packet trace format function */
41 format_l2_patch_trace (u8 * s, va_list * args)
43 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
44 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
45 l2_patch_trace_t *t = va_arg (*args, l2_patch_trace_t *);
47 s = format (s, "L2_PATCH: rx %d tx %d", t->rx_sw_if_index,
52 l2_patch_main_t l2_patch_main;
54 static vlib_node_registration_t l2_patch_node;
56 #define foreach_l2_patch_error \
57 _(PATCHED, "L2 patch packets") \
58 _(DROPPED, "L2 patch misconfigured drops")
62 #define _(sym,str) L2_PATCH_ERROR_##sym,
63 foreach_l2_patch_error
68 static char *l2_patch_error_strings[] = {
69 #define _(sym,string) string,
70 foreach_l2_patch_error
81 l2_patch_node_fn (vlib_main_t * vm,
82 vlib_node_runtime_t * node, vlib_frame_t * frame)
84 u32 n_left_from, *from, *to_next;
85 l2_patch_next_t next_index;
86 l2_patch_main_t *l2pm = &l2_patch_main;
87 vlib_node_t *n = vlib_get_node (vm, l2_patch_node.index);
88 u32 node_counter_base_index = n->error_heap_index;
89 vlib_error_main_t *em = &vm->error_main;
91 from = vlib_frame_vector_args (frame);
92 n_left_from = frame->n_vectors;
93 next_index = node->cached_next_index;
95 while (n_left_from > 0)
99 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
101 while (n_left_from >= 8 && n_left_to_next >= 4)
103 u32 bi0, bi1, bi2, bi3;
104 vlib_buffer_t *b0, *b1, *b2, *b3;
105 u32 next0, next1, next2, next3;
106 u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
108 /* Prefetch next iteration. */
110 vlib_buffer_t *p4, *p5, *p6, *p7;
112 p4 = vlib_get_buffer (vm, from[4]);
113 p5 = vlib_get_buffer (vm, from[5]);
114 p6 = vlib_get_buffer (vm, from[6]);
115 p7 = vlib_get_buffer (vm, from[7]);
117 vlib_prefetch_buffer_header (p4, LOAD);
118 vlib_prefetch_buffer_header (p5, LOAD);
119 vlib_prefetch_buffer_header (p6, LOAD);
120 vlib_prefetch_buffer_header (p7, LOAD);
123 /* speculatively enqueue b0 and b1 to the current next frame */
124 to_next[0] = bi0 = from[0];
125 to_next[1] = bi1 = from[1];
126 to_next[2] = bi2 = from[2];
127 to_next[3] = bi3 = from[3];
133 b0 = vlib_get_buffer (vm, bi0);
134 b1 = vlib_get_buffer (vm, bi1);
135 b2 = vlib_get_buffer (vm, bi2);
136 b3 = vlib_get_buffer (vm, bi3);
138 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
139 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
140 sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX];
141 sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX];
143 ASSERT (l2pm->tx_next_by_rx_sw_if_index[sw_if_index0] != ~0);
144 ASSERT (l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0] != ~0);
145 ASSERT (l2pm->tx_next_by_rx_sw_if_index[sw_if_index1] != ~0);
146 ASSERT (l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index1] != ~0);
147 ASSERT (l2pm->tx_next_by_rx_sw_if_index[sw_if_index2] != ~0);
148 ASSERT (l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index2] != ~0);
149 ASSERT (l2pm->tx_next_by_rx_sw_if_index[sw_if_index3] != ~0);
150 ASSERT (l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index3] != ~0);
152 next0 = l2pm->tx_next_by_rx_sw_if_index[sw_if_index0];
153 next1 = l2pm->tx_next_by_rx_sw_if_index[sw_if_index1];
154 next2 = l2pm->tx_next_by_rx_sw_if_index[sw_if_index2];
155 next3 = l2pm->tx_next_by_rx_sw_if_index[sw_if_index3];
157 vnet_buffer (b0)->sw_if_index[VLIB_TX] =
158 l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0];
159 vnet_buffer (b1)->sw_if_index[VLIB_TX] =
160 l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index1];
161 vnet_buffer (b2)->sw_if_index[VLIB_TX] =
162 l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index2];
163 vnet_buffer (b3)->sw_if_index[VLIB_TX] =
164 l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index3];
166 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
168 if (b0->flags & VLIB_BUFFER_IS_TRACED)
170 l2_patch_trace_t *t =
171 vlib_add_trace (vm, node, b0, sizeof (*t));
172 t->rx_sw_if_index = sw_if_index0;
174 l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0];
176 if (b1->flags & VLIB_BUFFER_IS_TRACED)
178 l2_patch_trace_t *t =
179 vlib_add_trace (vm, node, b1, sizeof (*t));
180 t->rx_sw_if_index = sw_if_index1;
182 l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index1];
184 if (b2->flags & VLIB_BUFFER_IS_TRACED)
186 l2_patch_trace_t *t =
187 vlib_add_trace (vm, node, b2, sizeof (*t));
188 t->rx_sw_if_index = sw_if_index2;
190 l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index2];
192 if (b3->flags & VLIB_BUFFER_IS_TRACED)
194 l2_patch_trace_t *t =
195 vlib_add_trace (vm, node, b3, sizeof (*t));
196 t->rx_sw_if_index = sw_if_index3;
198 l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index3];
202 /* verify speculative enqueues, maybe switch current next frame */
203 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
204 to_next, n_left_to_next,
206 next0, next1, next2, next3);
209 while (n_left_from > 0 && n_left_to_next > 0)
216 /* speculatively enqueue b0 to the current next frame */
224 b0 = vlib_get_buffer (vm, bi0);
226 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
228 ASSERT (l2pm->tx_next_by_rx_sw_if_index[sw_if_index0] != ~0);
229 ASSERT (l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0] != ~0);
231 next0 = l2pm->tx_next_by_rx_sw_if_index[sw_if_index0];
232 vnet_buffer (b0)->sw_if_index[VLIB_TX] =
233 l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0];
235 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
237 if (b0->flags & VLIB_BUFFER_IS_TRACED)
239 l2_patch_trace_t *t =
240 vlib_add_trace (vm, node, b0, sizeof (*t));
241 t->rx_sw_if_index = sw_if_index0;
243 l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0];
247 /* verify speculative enqueue, maybe switch current next frame */
248 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
249 to_next, n_left_to_next,
253 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
256 em->counters[node_counter_base_index + L2_PATCH_ERROR_PATCHED] +=
259 return frame->n_vectors;
263 VLIB_REGISTER_NODE (l2_patch_node, static) = {
264 .function = l2_patch_node_fn,
266 .vector_size = sizeof (u32),
267 .format_trace = format_l2_patch_trace,
268 .type = VLIB_NODE_TYPE_INTERNAL,
270 .n_errors = ARRAY_LEN(l2_patch_error_strings),
271 .error_strings = l2_patch_error_strings,
273 .n_next_nodes = L2_PATCH_N_NEXT,
275 /* edit / add dispositions here */
277 [L2_PATCH_NEXT_DROP] = "error-drop",
282 VLIB_NODE_FUNCTION_MULTIARCH (l2_patch_node, l2_patch_node_fn)
283 int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index,
286 l2_patch_main_t *l2pm = &l2_patch_main;
287 vnet_hw_interface_t *rxhi, *txhi;
291 * We assume that the API msg handler has used 2x VALIDATE_SW_IF_INDEX
295 rxhi = vnet_get_sup_hw_interface (l2pm->vnet_main, rx_sw_if_index);
297 /* Make sure caller didn't pass a vlan subif, etc. */
298 if (rxhi->sw_if_index != rx_sw_if_index)
299 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
301 txhi = vnet_get_sup_hw_interface (l2pm->vnet_main, tx_sw_if_index);
302 if (txhi->sw_if_index != tx_sw_if_index)
303 return VNET_API_ERROR_INVALID_SW_IF_INDEX_2;
307 tx_next_index = vlib_node_add_next (l2pm->vlib_main,
309 txhi->output_node_index);
311 vec_validate_init_empty (l2pm->tx_next_by_rx_sw_if_index,
314 l2pm->tx_next_by_rx_sw_if_index[rx_sw_if_index] = tx_next_index;
315 vec_validate_init_empty (l2pm->tx_sw_if_index_by_rx_sw_if_index,
317 l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index]
320 ethernet_set_flags (l2pm->vnet_main, rxhi->hw_if_index,
321 ETHERNET_INTERFACE_FLAG_ACCEPT_ALL);
323 vnet_feature_enable_disable ("device-input", "l2-patch",
324 rxhi->hw_if_index, 1, 0, 0);
328 ethernet_set_flags (l2pm->vnet_main, rxhi->hw_if_index,
329 0 /* disable promiscuous mode */ );
331 vnet_feature_enable_disable ("device-input", "l2-patch",
332 rxhi->hw_if_index, 0, 0, 0);
333 if (vec_len (l2pm->tx_next_by_rx_sw_if_index) > rx_sw_if_index)
335 l2pm->tx_next_by_rx_sw_if_index[rx_sw_if_index] = ~0;
336 l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index] = ~0;
343 static clib_error_t *
344 test_patch_command_fn (vlib_main_t * vm,
345 unformat_input_t * input, vlib_cli_command_t * cmd)
347 l2_patch_main_t *l2pm = &l2_patch_main;
348 unformat_input_t _line_input, *line_input = &_line_input;
349 u32 rx_sw_if_index, tx_sw_if_index;
354 clib_error_t *error = NULL;
356 /* Get a line of input. */
357 if (!unformat_user (input, unformat_line_input, line_input))
360 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
362 if (unformat (line_input, "rx %U", unformat_vnet_sw_interface,
363 l2pm->vnet_main, &rx_sw_if_index))
365 else if (unformat (line_input, "tx %U", unformat_vnet_sw_interface,
366 l2pm->vnet_main, &tx_sw_if_index))
368 else if (unformat (line_input, "del"))
376 error = clib_error_return (0, "rx interface not set");
382 error = clib_error_return (0, "tx interface not set");
386 rv = vnet_l2_patch_add_del (rx_sw_if_index, tx_sw_if_index, is_add);
393 case VNET_API_ERROR_INVALID_SW_IF_INDEX:
394 error = clib_error_return (0, "rx interface not a physical port");
397 case VNET_API_ERROR_INVALID_SW_IF_INDEX_2:
398 error = clib_error_return (0, "tx interface not a physical port");
402 error = clib_error_return
403 (0, "WARNING: vnet_l2_patch_add_del returned %d", rv);
409 unformat_free (line_input);
415 * Create or delete a Layer 2 patch.
418 * @cliexstart{test l2patch rx <intfc> tx <intfc> [del]}
420 * @todo This is incomplete. This needs a detailed description and a
424 VLIB_CLI_COMMAND (test_patch_command, static) = {
425 .path = "test l2patch",
426 .short_help = "test l2patch rx <intfc> tx <intfc> [del]",
427 .function = test_patch_command_fn,
431 /** Display the contents of the l2patch table. */
432 static clib_error_t *
433 show_l2patch (vlib_main_t * vm,
434 unformat_input_t * input, vlib_cli_command_t * cmd)
436 l2_patch_main_t *l2pm = &l2_patch_main;
440 ASSERT (vec_len (l2pm->tx_next_by_rx_sw_if_index) ==
441 vec_len (l2pm->tx_sw_if_index_by_rx_sw_if_index));
443 for (rx_sw_if_index = 0;
444 rx_sw_if_index < vec_len (l2pm->tx_sw_if_index_by_rx_sw_if_index);
448 l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index];
449 if (tx_sw_if_index != ~0)
452 vlib_cli_output (vm, "%26U -> %U",
453 format_vnet_sw_if_index_name,
454 l2pm->vnet_main, rx_sw_if_index,
455 format_vnet_sw_if_index_name,
456 l2pm->vnet_main, tx_sw_if_index);
461 vlib_cli_output (vm, "no l2patch entries");
467 * Show Layer 2 patch entries.
470 * @cliexstart{show l2patch}
472 * @todo This is incomplete. This needs a detailed description and a
476 VLIB_CLI_COMMAND (show_l2patch_cli, static) = {
477 .path = "show l2patch",
478 .short_help = "Show l2 interface cross-connect entries",
479 .function = show_l2patch,
484 l2_patch_init (vlib_main_t * vm)
486 l2_patch_main_t *mp = &l2_patch_main;
489 mp->vnet_main = vnet_get_main ();
494 VLIB_INIT_FUNCTION (l2_patch_init);
497 * fd.io coding-style-patch-verification: ON
500 * eval: (c-set-style "gnu")