2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vlib/vlib.h>
16 #include <vnet/vnet.h>
17 #include <vnet/pg/pg.h>
18 #include <vnet/ethernet/ethernet.h>
19 #include <vppinfra/error.h>
22 u32 cached_next_index;
23 u32 cached_rx_sw_if_index;
25 /* vector of dispositions, indexed by rx_sw_if_index */
26 u32 *tx_next_by_rx_sw_if_index;
27 u32 *tx_sw_if_index_by_rx_sw_if_index;
29 /* convenience variables */
30 vlib_main_t * vlib_main;
31 vnet_main_t * vnet_main;
39 /* packet trace format function */
40 static u8 * format_l2_patch_trace (u8 * s, va_list * args)
42 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
43 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
44 l2_patch_trace_t * t = va_arg (*args, l2_patch_trace_t *);
46 s = format (s, "L2_PATCH: rx %d tx %d", t->rx_sw_if_index,
51 l2_patch_main_t l2_patch_main;
53 static vlib_node_registration_t l2_patch_node;
55 #define foreach_l2_patch_error \
56 _(PATCHED, "L2 patch packets") \
57 _(DROPPED, "L2 patch misconfigured drops")
60 #define _(sym,str) L2_PATCH_ERROR_##sym,
61 foreach_l2_patch_error
66 static char * l2_patch_error_strings[] = {
67 #define _(sym,string) string,
68 foreach_l2_patch_error
78 l2_patch_node_fn (vlib_main_t * vm,
79 vlib_node_runtime_t * node,
82 u32 n_left_from, * from, * to_next;
83 l2_patch_next_t next_index;
84 l2_patch_main_t * l2pm = &l2_patch_main;
85 vlib_node_t *n = vlib_get_node (vm, l2_patch_node.index);
86 u32 node_counter_base_index = n->error_heap_index;
87 vlib_error_main_t * em = &vm->error_main;
89 from = vlib_frame_vector_args (frame);
90 n_left_from = frame->n_vectors;
91 next_index = node->cached_next_index;
93 while (n_left_from > 0)
97 vlib_get_next_frame (vm, node, next_index,
98 to_next, n_left_to_next);
100 while (n_left_from >= 4 && n_left_to_next >= 2)
103 vlib_buffer_t * b0, * b1;
105 u32 sw_if_index0, sw_if_index1;
107 /* Prefetch next iteration. */
109 vlib_buffer_t * p2, * p3;
111 p2 = vlib_get_buffer (vm, from[2]);
112 p3 = vlib_get_buffer (vm, from[3]);
114 vlib_prefetch_buffer_header (p2, LOAD);
115 vlib_prefetch_buffer_header (p3, LOAD);
117 /* So stupid / simple, we don't need to prefetch data */
120 /* speculatively enqueue b0 and b1 to the current next frame */
121 to_next[0] = bi0 = from[0];
122 to_next[1] = bi1 = from[1];
128 b0 = vlib_get_buffer (vm, bi0);
129 b1 = vlib_get_buffer (vm, bi1);
131 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
132 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
134 ASSERT(l2pm->tx_next_by_rx_sw_if_index[sw_if_index0] != ~0);
135 ASSERT(l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0] != ~0);
136 ASSERT(l2pm->tx_next_by_rx_sw_if_index[sw_if_index1] != ~0);
137 ASSERT(l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index1] != ~0);
139 if (PREDICT_TRUE (sw_if_index0 == l2pm->cached_rx_sw_if_index))
140 next0 = l2pm->cached_next_index;
143 next0 = l2pm->tx_next_by_rx_sw_if_index[sw_if_index0];
144 l2pm->cached_rx_sw_if_index = sw_if_index0;
145 l2pm->cached_next_index = next0;
148 if (PREDICT_TRUE (sw_if_index1 == l2pm->cached_rx_sw_if_index))
149 next1 = l2pm->cached_next_index;
151 next1 = l2pm->tx_next_by_rx_sw_if_index [sw_if_index1];
153 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)))
155 if (b0->flags & VLIB_BUFFER_IS_TRACED)
157 l2_patch_trace_t *t =
158 vlib_add_trace (vm, node, b0, sizeof (*t));
159 t->rx_sw_if_index = sw_if_index0;
161 l2pm->tx_sw_if_index_by_rx_sw_if_index [sw_if_index0];
163 if (b1->flags & VLIB_BUFFER_IS_TRACED)
165 l2_patch_trace_t *t =
166 vlib_add_trace (vm, node, b1, sizeof (*t));
167 t->rx_sw_if_index = sw_if_index1;
169 l2pm->tx_sw_if_index_by_rx_sw_if_index [sw_if_index1];
173 /* verify speculative enqueues, maybe switch current next frame */
174 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
175 to_next, n_left_to_next,
176 bi0, bi1, next0, next1);
179 while (n_left_from > 0 && n_left_to_next > 0)
186 /* speculatively enqueue b0 to the current next frame */
194 b0 = vlib_get_buffer (vm, bi0);
196 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
198 ASSERT(l2pm->tx_next_by_rx_sw_if_index[sw_if_index0] != ~0);
199 ASSERT(l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0] != ~0);
201 if (PREDICT_TRUE (sw_if_index0 == l2pm->cached_rx_sw_if_index))
202 next0 = l2pm->cached_next_index;
205 next0 = l2pm->tx_next_by_rx_sw_if_index [sw_if_index0];
206 l2pm->cached_rx_sw_if_index = sw_if_index0;
207 l2pm->cached_next_index = next0;
210 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)))
212 if (b0->flags & VLIB_BUFFER_IS_TRACED)
214 l2_patch_trace_t *t =
215 vlib_add_trace (vm, node, b0, sizeof (*t));
216 t->rx_sw_if_index = sw_if_index0;
218 l2pm->tx_sw_if_index_by_rx_sw_if_index [sw_if_index0];
222 /* verify speculative enqueue, maybe switch current next frame */
223 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
224 to_next, n_left_to_next,
228 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
231 em->counters[node_counter_base_index + L2_PATCH_ERROR_PATCHED] +=
234 return frame->n_vectors;
237 VLIB_REGISTER_NODE (l2_patch_node, static) = {
238 .function = l2_patch_node_fn,
240 .vector_size = sizeof (u32),
241 .format_trace = format_l2_patch_trace,
242 .type = VLIB_NODE_TYPE_INTERNAL,
244 .n_errors = ARRAY_LEN(l2_patch_error_strings),
245 .error_strings = l2_patch_error_strings,
247 .n_next_nodes = L2_PATCH_N_NEXT,
249 /* edit / add dispositions here */
251 [L2_PATCH_NEXT_DROP] = "error-drop",
255 VLIB_NODE_FUNCTION_MULTIARCH (l2_patch_node, l2_patch_node_fn)
257 int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index, int is_add)
259 l2_patch_main_t * l2pm = &l2_patch_main;
260 vnet_hw_interface_t * rxhi, *txhi;
264 * We assume that the API msg handler has used 2x VALIDATE_SW_IF_INDEX
268 rxhi = vnet_get_sup_hw_interface (l2pm->vnet_main, rx_sw_if_index);
270 /* Make sure caller didn't pass a vlan subif, etc. */
271 if (rxhi->sw_if_index != rx_sw_if_index)
272 return VNET_API_ERROR_INVALID_SW_IF_INDEX;
274 txhi = vnet_get_sup_hw_interface (l2pm->vnet_main, tx_sw_if_index);
275 if (txhi->sw_if_index != tx_sw_if_index)
276 return VNET_API_ERROR_INVALID_SW_IF_INDEX_2;
280 tx_next_index = vlib_node_add_next (l2pm->vlib_main,
282 txhi->output_node_index);
284 vec_validate_init_empty (l2pm->tx_next_by_rx_sw_if_index,
287 l2pm->tx_next_by_rx_sw_if_index[rx_sw_if_index] = tx_next_index;
288 vec_validate_init_empty (l2pm->tx_sw_if_index_by_rx_sw_if_index,
290 l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index]
293 ethernet_set_flags (l2pm->vnet_main, rxhi->hw_if_index,
294 ETHERNET_INTERFACE_FLAG_ACCEPT_ALL);
296 vnet_hw_interface_rx_redirect_to_node (l2pm->vnet_main,
298 l2_patch_node.index);
302 ethernet_set_flags (l2pm->vnet_main, rxhi->hw_if_index,
303 0 /* disable promiscuous mode */);
305 vnet_hw_interface_rx_redirect_to_node (l2pm->vnet_main,
308 if (vec_len (l2pm->tx_next_by_rx_sw_if_index) > rx_sw_if_index)
310 l2pm->tx_next_by_rx_sw_if_index[rx_sw_if_index] = ~0;
311 l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index] = ~0;
318 static clib_error_t *
319 test_patch_command_fn (vlib_main_t * vm,
320 unformat_input_t * input,
321 vlib_cli_command_t * cmd)
323 l2_patch_main_t * l2pm = &l2_patch_main;
324 unformat_input_t _line_input, * line_input = &_line_input;
325 u32 rx_sw_if_index, tx_sw_if_index;
331 /* Get a line of input. */
332 if (! unformat_user (input, unformat_line_input, line_input))
335 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
337 if (unformat (line_input, "rx %U", unformat_vnet_sw_interface,
338 l2pm->vnet_main, &rx_sw_if_index))
340 else if (unformat (line_input, "tx %U", unformat_vnet_sw_interface,
341 l2pm->vnet_main, &tx_sw_if_index))
343 else if (unformat (line_input, "del"))
349 return clib_error_return (0, "rx interface not set");
352 return clib_error_return (0, "tx interface not set");
354 rv = vnet_l2_patch_add_del (rx_sw_if_index, tx_sw_if_index, is_add);
361 case VNET_API_ERROR_INVALID_SW_IF_INDEX:
362 return clib_error_return (0, "rx interface not a physical port");
364 case VNET_API_ERROR_INVALID_SW_IF_INDEX_2:
365 return clib_error_return (0, "tx interface not a physical port");
368 return clib_error_return
369 (0, "WARNING: vnet_l2_patch_add_del returned %d", rv);
375 VLIB_CLI_COMMAND (test_patch_command, static) = {
376 .path = "test l2patch",
378 "rx <intfc> tx <intfc> [del]",
379 .function = test_patch_command_fn,
382 // Display the contents of the l2patch table.
383 static clib_error_t *
384 show_l2patch (vlib_main_t * vm,
385 unformat_input_t * input,
386 vlib_cli_command_t * cmd)
388 l2_patch_main_t * l2pm = &l2_patch_main;
392 ASSERT(vec_len(l2pm->tx_next_by_rx_sw_if_index) ==
393 vec_len(l2pm->tx_sw_if_index_by_rx_sw_if_index));
395 for (rx_sw_if_index = 0;
396 rx_sw_if_index < vec_len (l2pm->tx_sw_if_index_by_rx_sw_if_index);
400 l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index];
401 if (tx_sw_if_index != ~0)
404 vlib_cli_output (vm, "%26U -> %U",
405 format_vnet_sw_if_index_name,
406 l2pm->vnet_main, rx_sw_if_index,
407 format_vnet_sw_if_index_name,
408 l2pm->vnet_main,tx_sw_if_index);
413 vlib_cli_output (vm, "no l2patch entries");
418 VLIB_CLI_COMMAND (show_l2patch_cli, static) = {
419 .path = "show l2patch",
420 .short_help = "Show l2 interface cross-connect entries",
421 .function = show_l2patch,
424 clib_error_t *l2_patch_init (vlib_main_t *vm)
426 l2_patch_main_t * mp = &l2_patch_main;
429 mp->vnet_main = vnet_get_main();
434 VLIB_INIT_FUNCTION (l2_patch_init);