Add support for multiple microarchitectures in single binary
[vpp.git] / vnet / vnet / l2 / l2_patch.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vlib/vlib.h>
16 #include <vnet/vnet.h>
17 #include <vnet/pg/pg.h>
18 #include <vnet/ethernet/ethernet.h>
19 #include <vppinfra/error.h>
20
21 typedef struct {
22   u32 cached_next_index;
23   u32 cached_rx_sw_if_index;
24
25   /* vector of dispositions, indexed by rx_sw_if_index */
26   u32 *tx_next_by_rx_sw_if_index;
27   u32 *tx_sw_if_index_by_rx_sw_if_index;
28
29   /* convenience variables */
30   vlib_main_t * vlib_main;
31   vnet_main_t * vnet_main;
32 } l2_patch_main_t;
33
34 typedef struct {
35   u32 rx_sw_if_index;
36   u32 tx_sw_if_index;
37 } l2_patch_trace_t;
38
39 /* packet trace format function */
40 static u8 * format_l2_patch_trace (u8 * s, va_list * args)
41 {
42   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
43   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
44   l2_patch_trace_t * t = va_arg (*args, l2_patch_trace_t *);
45   
46   s = format (s, "L2_PATCH: rx %d tx %d", t->rx_sw_if_index,
47               t->tx_sw_if_index);
48   return s;
49 }
50
51 l2_patch_main_t l2_patch_main;
52
53 static vlib_node_registration_t l2_patch_node;
54
55 #define foreach_l2_patch_error                  \
56 _(PATCHED, "L2 patch packets")                  \
57 _(DROPPED, "L2 patch misconfigured drops")
58
59 typedef enum {
60 #define _(sym,str) L2_PATCH_ERROR_##sym,
61   foreach_l2_patch_error
62 #undef _
63   L2_PATCH_N_ERROR,
64 } l2_patch_error_t;
65
66 static char * l2_patch_error_strings[] = {
67 #define _(sym,string) string,
68   foreach_l2_patch_error
69 #undef _
70 };
71
72 typedef enum {
73   L2_PATCH_NEXT_DROP,
74   L2_PATCH_N_NEXT,
75 } l2_patch_next_t;
76
77 static uword
78 l2_patch_node_fn (vlib_main_t * vm,
79                   vlib_node_runtime_t * node,
80                   vlib_frame_t * frame)
81 {
82   u32 n_left_from, * from, * to_next;
83   l2_patch_next_t next_index;
84   l2_patch_main_t * l2pm = &l2_patch_main;
85   vlib_node_t *n = vlib_get_node (vm, l2_patch_node.index);
86   u32 node_counter_base_index = n->error_heap_index;
87   vlib_error_main_t * em = &vm->error_main;
88
89   from = vlib_frame_vector_args (frame);
90   n_left_from = frame->n_vectors;
91   next_index = node->cached_next_index;
92
93   while (n_left_from > 0)
94     {
95       u32 n_left_to_next;
96
97       vlib_get_next_frame (vm, node, next_index,
98                            to_next, n_left_to_next);
99
100       while (n_left_from >= 4 && n_left_to_next >= 2)
101         {
102           u32 bi0, bi1;
103           vlib_buffer_t * b0, * b1;
104           u32 next0, next1;
105           u32 sw_if_index0, sw_if_index1;
106           
107           /* Prefetch next iteration. */
108           {
109             vlib_buffer_t * p2, * p3;
110             
111             p2 = vlib_get_buffer (vm, from[2]);
112             p3 = vlib_get_buffer (vm, from[3]);
113             
114             vlib_prefetch_buffer_header (p2, LOAD);
115             vlib_prefetch_buffer_header (p3, LOAD);
116
117             /* So stupid / simple, we don't need to prefetch data */
118           }
119
120           /* speculatively enqueue b0 and b1 to the current next frame */
121           to_next[0] = bi0 = from[0];
122           to_next[1] = bi1 = from[1];
123           from += 2;
124           to_next += 2;
125           n_left_from -= 2;
126           n_left_to_next -= 2;
127
128           b0 = vlib_get_buffer (vm, bi0);
129           b1 = vlib_get_buffer (vm, bi1);
130
131           sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
132           sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
133           
134           ASSERT(l2pm->tx_next_by_rx_sw_if_index[sw_if_index0] != ~0);
135           ASSERT(l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0] != ~0);
136           ASSERT(l2pm->tx_next_by_rx_sw_if_index[sw_if_index1] != ~0);
137           ASSERT(l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index1] != ~0);
138
139           if (PREDICT_TRUE (sw_if_index0 == l2pm->cached_rx_sw_if_index))
140             next0 = l2pm->cached_next_index;
141           else
142             {
143               next0 = l2pm->tx_next_by_rx_sw_if_index[sw_if_index0];
144               l2pm->cached_rx_sw_if_index = sw_if_index0;
145               l2pm->cached_next_index = next0;
146             }
147           
148           if (PREDICT_TRUE (sw_if_index1 == l2pm->cached_rx_sw_if_index))
149             next1 = l2pm->cached_next_index;
150           else 
151             next1 = l2pm->tx_next_by_rx_sw_if_index [sw_if_index1];
152           
153           if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)))
154             {
155               if (b0->flags & VLIB_BUFFER_IS_TRACED) 
156                 {
157                     l2_patch_trace_t *t = 
158                       vlib_add_trace (vm, node, b0, sizeof (*t));
159                     t->rx_sw_if_index = sw_if_index0;
160                     t->tx_sw_if_index = 
161                       l2pm->tx_sw_if_index_by_rx_sw_if_index [sw_if_index0];
162                   }
163               if (b1->flags & VLIB_BUFFER_IS_TRACED) 
164                 {
165                     l2_patch_trace_t *t = 
166                       vlib_add_trace (vm, node, b1, sizeof (*t));
167                     t->rx_sw_if_index = sw_if_index1;
168                     t->tx_sw_if_index = 
169                       l2pm->tx_sw_if_index_by_rx_sw_if_index [sw_if_index1];
170                   }
171               }
172             
173             /* verify speculative enqueues, maybe switch current next frame */
174             vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
175                                              to_next, n_left_to_next,
176                                              bi0, bi1, next0, next1);
177         }
178       
179       while (n_left_from > 0 && n_left_to_next > 0)
180         {
181           u32 bi0;
182           vlib_buffer_t * b0;
183           u32 next0;
184           u32 sw_if_index0;
185
186           /* speculatively enqueue b0 to the current next frame */
187           bi0 = from[0];
188           to_next[0] = bi0;
189           from += 1;
190           to_next += 1;
191           n_left_from -= 1;
192           n_left_to_next -= 1;
193
194           b0 = vlib_get_buffer (vm, bi0);
195
196           sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
197
198           ASSERT(l2pm->tx_next_by_rx_sw_if_index[sw_if_index0] != ~0);
199           ASSERT(l2pm->tx_sw_if_index_by_rx_sw_if_index[sw_if_index0] != ~0);
200
201           if (PREDICT_TRUE (sw_if_index0 == l2pm->cached_rx_sw_if_index))
202             next0 = l2pm->cached_next_index;
203           else
204             {
205               next0 = l2pm->tx_next_by_rx_sw_if_index [sw_if_index0];
206               l2pm->cached_rx_sw_if_index = sw_if_index0;
207               l2pm->cached_next_index = next0;
208             }
209
210           if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)))
211             {
212               if (b0->flags & VLIB_BUFFER_IS_TRACED) 
213                 {
214                     l2_patch_trace_t *t = 
215                       vlib_add_trace (vm, node, b0, sizeof (*t));
216                     t->rx_sw_if_index = sw_if_index0;
217                     t->tx_sw_if_index = 
218                       l2pm->tx_sw_if_index_by_rx_sw_if_index [sw_if_index0];
219                   }
220             }
221
222           /* verify speculative enqueue, maybe switch current next frame */
223           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
224                                            to_next, n_left_to_next,
225                                            bi0, next0);
226         }
227
228       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
229     }
230
231   em->counters[node_counter_base_index + L2_PATCH_ERROR_PATCHED] += 
232     frame->n_vectors;
233
234   return frame->n_vectors;
235 }
236
237 VLIB_REGISTER_NODE (l2_patch_node, static) = {
238   .function = l2_patch_node_fn,
239   .name = "l2_patch",
240   .vector_size = sizeof (u32),
241   .format_trace = format_l2_patch_trace,
242   .type = VLIB_NODE_TYPE_INTERNAL,
243   
244   .n_errors = ARRAY_LEN(l2_patch_error_strings),
245   .error_strings = l2_patch_error_strings,
246
247   .n_next_nodes = L2_PATCH_N_NEXT,
248
249   /* edit / add dispositions here */
250   .next_nodes = {
251         [L2_PATCH_NEXT_DROP] = "error-drop",
252   },
253 };
254
255 VLIB_NODE_FUNCTION_MULTIARCH (l2_patch_node, l2_patch_node_fn)
256
257 int vnet_l2_patch_add_del (u32 rx_sw_if_index, u32 tx_sw_if_index, int is_add)
258 {
259   l2_patch_main_t * l2pm = &l2_patch_main;  
260   vnet_hw_interface_t * rxhi, *txhi;
261   u32 tx_next_index;
262
263   /* 
264    * We assume that the API msg handler has used 2x VALIDATE_SW_IF_INDEX
265    * macros...
266    */
267
268   rxhi = vnet_get_sup_hw_interface (l2pm->vnet_main, rx_sw_if_index);
269
270   /* Make sure caller didn't pass a vlan subif, etc. */
271   if (rxhi->sw_if_index != rx_sw_if_index)
272     return VNET_API_ERROR_INVALID_SW_IF_INDEX;
273
274   txhi = vnet_get_sup_hw_interface (l2pm->vnet_main, tx_sw_if_index);
275   if (txhi->sw_if_index != tx_sw_if_index)
276     return VNET_API_ERROR_INVALID_SW_IF_INDEX_2;
277
278   if (is_add)
279     {
280       tx_next_index = vlib_node_add_next (l2pm->vlib_main,
281                                           l2_patch_node.index,
282                                           txhi->output_node_index);
283
284       vec_validate_init_empty (l2pm->tx_next_by_rx_sw_if_index,
285                                rx_sw_if_index, ~0);
286       
287       l2pm->tx_next_by_rx_sw_if_index[rx_sw_if_index] = tx_next_index;
288       vec_validate_init_empty (l2pm->tx_sw_if_index_by_rx_sw_if_index,
289                                rx_sw_if_index, ~0);
290       l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index] 
291         = txhi->sw_if_index;
292       
293       ethernet_set_flags (l2pm->vnet_main, rxhi->hw_if_index, 
294                           ETHERNET_INTERFACE_FLAG_ACCEPT_ALL);
295
296       vnet_hw_interface_rx_redirect_to_node (l2pm->vnet_main, 
297                                              rxhi->hw_if_index,
298                                              l2_patch_node.index);
299     }
300   else
301     {
302       ethernet_set_flags (l2pm->vnet_main, rxhi->hw_if_index, 
303                           0 /* disable promiscuous mode */);
304
305       vnet_hw_interface_rx_redirect_to_node (l2pm->vnet_main, 
306                                              rxhi->hw_if_index,
307                                              ~0 /* disable */);
308       if (vec_len (l2pm->tx_next_by_rx_sw_if_index) > rx_sw_if_index)
309         {
310           l2pm->tx_next_by_rx_sw_if_index[rx_sw_if_index] = ~0;
311           l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index] = ~0;
312         }
313     }
314   
315   return 0;
316 }
317
318 static clib_error_t *
319 test_patch_command_fn (vlib_main_t * vm,
320                          unformat_input_t * input,
321                          vlib_cli_command_t * cmd)
322 {
323   l2_patch_main_t * l2pm = &l2_patch_main;  
324   unformat_input_t _line_input, * line_input = &_line_input;
325   u32 rx_sw_if_index, tx_sw_if_index;
326   int rv;
327   int rx_set = 0;
328   int tx_set = 0;
329   int is_add = 1;
330
331   /* Get a line of input. */
332   if (! unformat_user (input, unformat_line_input, line_input))
333     return 0;
334
335   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
336     {
337       if (unformat (line_input, "rx %U", unformat_vnet_sw_interface,
338                     l2pm->vnet_main, &rx_sw_if_index))
339         rx_set = 1;
340       else if (unformat (line_input, "tx %U", unformat_vnet_sw_interface,
341                          l2pm->vnet_main, &tx_sw_if_index))
342         tx_set = 1;
343       else if (unformat (line_input, "del"))
344         is_add = 0;
345       else break;
346     }
347
348   if (rx_set == 0)
349     return clib_error_return (0, "rx interface not set");
350
351   if (tx_set == 0)
352     return clib_error_return (0, "tx interface not set");
353
354   rv = vnet_l2_patch_add_del (rx_sw_if_index, tx_sw_if_index, is_add);
355
356   switch (rv)
357     {
358     case 0:
359       break;
360
361     case VNET_API_ERROR_INVALID_SW_IF_INDEX:
362       return clib_error_return (0, "rx interface not a physical port");
363
364     case VNET_API_ERROR_INVALID_SW_IF_INDEX_2:
365       return clib_error_return (0, "tx interface not a physical port");
366
367     default:
368       return clib_error_return 
369         (0, "WARNING: vnet_l2_patch_add_del returned %d", rv);
370     }
371   
372   return 0;
373 }
374
375 VLIB_CLI_COMMAND (test_patch_command, static) = {
376     .path = "test l2patch",
377     .short_help = 
378     "rx <intfc> tx <intfc> [del]",
379     .function = test_patch_command_fn,
380 };
381
382 // Display the contents of the l2patch table.
383 static clib_error_t *
384 show_l2patch  (vlib_main_t * vm,
385                unformat_input_t * input,
386                vlib_cli_command_t * cmd)
387 {
388   l2_patch_main_t * l2pm = &l2_patch_main;
389   u32 rx_sw_if_index;
390   u32 no_entries = 1;
391
392   ASSERT(vec_len(l2pm->tx_next_by_rx_sw_if_index) == 
393          vec_len(l2pm->tx_sw_if_index_by_rx_sw_if_index));
394   
395   for (rx_sw_if_index = 0; 
396        rx_sw_if_index < vec_len (l2pm->tx_sw_if_index_by_rx_sw_if_index); 
397        rx_sw_if_index++)
398     {
399       u32 tx_sw_if_index =
400         l2pm->tx_sw_if_index_by_rx_sw_if_index[rx_sw_if_index];
401       if (tx_sw_if_index != ~0)
402         {
403           no_entries = 0;
404           vlib_cli_output (vm, "%26U -> %U",
405                            format_vnet_sw_if_index_name,
406                            l2pm->vnet_main, rx_sw_if_index,
407                            format_vnet_sw_if_index_name, 
408                            l2pm->vnet_main,tx_sw_if_index);
409         }
410     }
411
412   if (no_entries)
413     vlib_cli_output (vm, "no l2patch entries");
414
415   return 0;
416 }
417
418 VLIB_CLI_COMMAND (show_l2patch_cli, static) = {
419   .path = "show l2patch",
420   .short_help = "Show l2 interface cross-connect entries",
421   .function = show_l2patch,
422 };
423
424 clib_error_t *l2_patch_init (vlib_main_t *vm)
425 {
426   l2_patch_main_t * mp = &l2_patch_main;
427     
428   mp->vlib_main = vm;
429   mp->vnet_main = vnet_get_main();
430
431   return 0;
432 }
433
434 VLIB_INIT_FUNCTION (l2_patch_init);