2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vlib/vlib.h>
16 #include <vnet/pg/pg.h>
17 #include <vnet/ethernet/ethernet.h>
18 #include <vppinfra/error.h>
19 #include <vnet/devices/pci/ige.h>
20 #include <vnet/devices/pci/ixge.h>
21 #include <vnet/devices/pci/ixgev.h>
24 u32 cached_next_index;
25 u32 cached_sw_if_index;
27 /* Hash table to map sw_if_index to next node index */
28 uword * next_node_index_by_sw_if_index;
31 vlib_main_t * vlib_main;
32 vnet_main_t * vnet_main;
42 /* packet trace format function */
43 static u8 * format_swap_trace (u8 * s, va_list * args)
45 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
46 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
47 swap_trace_t * t = va_arg (*args, swap_trace_t *);
49 s = format (s, "SWAP: dst now %U src now %U sw_if_index %d next_index %d",
50 format_ethernet_address, t->dst,
51 format_ethernet_address, t->src,
57 #define foreach_hw_driver_next \
62 mac_swap_main_t mac_swap_main;
64 static vlib_node_registration_t mac_swap_node;
66 #define foreach_mac_swap_error \
67 _(SWAPS, "mac addresses swapped")
70 #define _(sym,str) MAC_SWAP_ERROR_##sym,
71 foreach_mac_swap_error
76 static char * mac_swap_error_strings[] = {
77 #define _(sym,string) string,
78 foreach_mac_swap_error
83 * To drop a pkt and increment one of the previous counters:
85 * set b0->error = error_node->errors[RANDOM_ERROR_SAMPLE];
86 * set next0 to a disposition index bound to "error-drop".
88 * To manually increment the specific counter MAC_SWAP_ERROR_SAMPLE:
90 * vlib_node_t *n = vlib_get_node (vm, mac_swap.index);
91 * u32 node_counter_base_index = n->error_heap_index;
92 * vlib_error_main_t * em = &vm->error_main;
93 * em->counters[node_counter_base_index + MAC_SWAP_ERROR_SAMPLE] += 1;
103 mac_swap_node_fn (vlib_main_t * vm,
104 vlib_node_runtime_t * node,
105 vlib_frame_t * frame)
107 u32 n_left_from, * from, * to_next;
108 mac_swap_next_t next_index;
109 mac_swap_main_t * msm = &mac_swap_main;
110 vlib_node_t *n = vlib_get_node (vm, mac_swap_node.index);
111 u32 node_counter_base_index = n->error_heap_index;
112 vlib_error_main_t * em = &vm->error_main;
114 from = vlib_frame_vector_args (frame);
115 n_left_from = frame->n_vectors;
116 next_index = node->cached_next_index;
118 while (n_left_from > 0)
122 vlib_get_next_frame (vm, node, next_index,
123 to_next, n_left_to_next);
125 while (n_left_from >= 4 && n_left_to_next >= 2)
128 vlib_buffer_t * b0, * b1;
130 u32 sw_if_index0, sw_if_index1;
134 ethernet_header_t * h0, *h1;
137 /* Prefetch next iteration. */
139 vlib_buffer_t * p2, * p3;
141 p2 = vlib_get_buffer (vm, from[2]);
142 p3 = vlib_get_buffer (vm, from[3]);
144 vlib_prefetch_buffer_header (p2, LOAD);
145 vlib_prefetch_buffer_header (p3, LOAD);
147 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
148 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
151 to_next[0] = bi0 = from[0];
152 to_next[1] = bi1 = from[1];
158 b0 = vlib_get_buffer (vm, bi0);
159 b1 = vlib_get_buffer (vm, bi1);
161 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
162 next0 = msm->cached_next_index;
163 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
164 next1 = msm->cached_next_index;
166 if (PREDICT_FALSE (msm->cached_sw_if_index != sw_if_index0))
168 p0 = hash_get (msm->next_node_index_by_sw_if_index, sw_if_index0);
171 vnet_hw_interface_t *hw0;
173 hw0 = vnet_get_sup_hw_interface (msm->vnet_main,
176 next0 = vlib_node_add_next (msm->vlib_main,
178 hw0->output_node_index);
179 hash_set (msm->next_node_index_by_sw_if_index,
180 sw_if_index0, next0);
184 msm->cached_sw_if_index = sw_if_index0;
185 msm->cached_next_index = next0;
188 if (PREDICT_FALSE (msm->cached_sw_if_index != sw_if_index1))
190 p1 = hash_get (msm->next_node_index_by_sw_if_index, sw_if_index1);
193 vnet_hw_interface_t *hw1;
195 hw1 = vnet_get_sup_hw_interface (msm->vnet_main,
198 next1 = vlib_node_add_next (msm->vlib_main,
200 hw1->output_node_index);
201 hash_set (msm->next_node_index_by_sw_if_index,
202 sw_if_index1, next1);
206 msm->cached_sw_if_index = sw_if_index1;
207 msm->cached_next_index = next1;
210 em->counters[node_counter_base_index + MAC_SWAP_ERROR_SWAPS] += 2;
212 /* reset buffer so we always point at the MAC hdr */
213 vlib_buffer_reset (b0);
214 vlib_buffer_reset (b1);
215 h0 = vlib_buffer_get_current (b0);
216 h1 = vlib_buffer_get_current (b1);
218 /* Swap 2 x src and dst mac addresses using 8-byte load/stores */
219 tmp0a = clib_net_to_host_u64(((u64 *)(h0->dst_address))[0]);
220 tmp1a = clib_net_to_host_u64(((u64 *)(h1->dst_address))[0]);
221 tmp0b = clib_net_to_host_u64(((u64 *)(h0->src_address))[0]);
222 tmp1b = clib_net_to_host_u64(((u64 *)(h1->src_address))[0]);
223 ((u64 *)(h0->dst_address))[0] = clib_host_to_net_u64(tmp0b);
224 ((u64 *)(h1->dst_address))[0] = clib_host_to_net_u64(tmp1b);
225 /* Move the ethertype from "b" to "a" */
228 tmp0a |= tmp0b & 0xFFFF;
229 ((u64 *)(h0->src_address))[0] = clib_host_to_net_u64(tmp0a);
230 tmp1a |= tmp1b & 0xFFFF;
231 ((u64 *)(h1->src_address))[0] = clib_host_to_net_u64(tmp1a);
233 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)))
235 if (b0->flags & VLIB_BUFFER_IS_TRACED)
237 swap_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
238 memcpy (t->src, h0->src_address, 6);
239 memcpy (t->dst, h0->dst_address, 6);
240 t->sw_if_index = sw_if_index0;
241 t->next_index = next0;
243 if (b1->flags & VLIB_BUFFER_IS_TRACED)
245 swap_trace_t *t = vlib_add_trace (vm, node, b1, sizeof (*t));
246 memcpy (t->src, h1->src_address, 6);
247 memcpy (t->dst, h1->dst_address, 6);
248 t->sw_if_index = sw_if_index1;
249 t->next_index = next1;
253 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
254 to_next, n_left_to_next,
255 bi0, bi1, next0, next1);
258 while (n_left_from > 0 && n_left_to_next > 0)
266 ethernet_header_t * h0;
275 b0 = vlib_get_buffer (vm, bi0);
277 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
278 next0 = msm->cached_next_index;
280 if (PREDICT_FALSE (msm->cached_sw_if_index != sw_if_index0))
282 p0 = hash_get (msm->next_node_index_by_sw_if_index, sw_if_index0);
285 vnet_hw_interface_t *hw0;
287 hw0 = vnet_get_sup_hw_interface (msm->vnet_main,
290 next0 = vlib_node_add_next (msm->vlib_main,
292 hw0->output_node_index);
293 hash_set (msm->next_node_index_by_sw_if_index,
294 sw_if_index0, next0);
298 msm->cached_sw_if_index = sw_if_index0;
299 msm->cached_next_index = next0;
302 em->counters[node_counter_base_index + MAC_SWAP_ERROR_SWAPS] += 1;
304 /* reset buffer so we always point at the MAC hdr */
305 vlib_buffer_reset (b0);
306 h0 = vlib_buffer_get_current (b0);
308 /* Exchange src and dst, preserve the ethertype */
309 tmp0a = clib_net_to_host_u64(((u64 *)(h0->dst_address))[0]);
310 tmp0b = clib_net_to_host_u64(((u64 *)(h0->src_address))[0]);
311 ((u64 *)(h0->dst_address))[0] = clib_host_to_net_u64(tmp0b);
313 tmp0a |= tmp0b & 0xFFFF;
314 ((u64 *)(h0->src_address))[0] = clib_host_to_net_u64(tmp0a);
317 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
318 && (b0->flags & VLIB_BUFFER_IS_TRACED))) {
319 swap_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
320 memcpy (t->src, h0->src_address, 6);
321 memcpy (t->dst, h0->dst_address, 6);
322 t->sw_if_index = sw_if_index0;
323 t->next_index = next0;
326 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
327 to_next, n_left_to_next,
331 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
334 return frame->n_vectors;
337 VLIB_REGISTER_NODE (mac_swap_node,static) = {
338 .function = mac_swap_node_fn,
340 .vector_size = sizeof (u32),
341 .format_trace = format_swap_trace,
342 .type = VLIB_NODE_TYPE_INTERNAL,
344 .n_errors = ARRAY_LEN(mac_swap_error_strings),
345 .error_strings = mac_swap_error_strings,
347 .n_next_nodes = MAC_SWAP_N_NEXT,
349 /* edit / add dispositions here */
351 [MAC_SWAP_NEXT_DROP] = "error-drop",
355 clib_error_t *mac_swap_init (vlib_main_t *vm)
357 mac_swap_main_t * msm = &mac_swap_main;
359 msm->next_node_index_by_sw_if_index = hash_create (0, sizeof (uword));
360 msm->cached_next_index = (u32)~0;
361 msm->cached_sw_if_index = (u32)~0;
363 msm->vnet_main = vnet_get_main();
365 /* Driver RX nodes send pkts here... */
366 #define _(a) ixge_set_next_node (IXGE_RX_NEXT_##a##_INPUT, "mac-swap");
367 foreach_hw_driver_next
369 #define _(a) ixgev_set_next_node (IXGEV_RX_NEXT_##a##_INPUT, "mac-swap");
370 foreach_hw_driver_next
372 #define _(a) ige_set_next_node (IGE_RX_NEXT_##a##_INPUT, "mac-swap");
373 foreach_hw_driver_next
379 VLIB_INIT_FUNCTION (mac_swap_init);