2 * decap.c: vxlan tunnel decap packet processing
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan/vxlan.h>
22 vlib_node_registration_t vxlan_input_node;
31 static u8 * format_vxlan_rx_trace (u8 * s, va_list * args)
33 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
34 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
35 vxlan_rx_trace_t * t = va_arg (*args, vxlan_rx_trace_t *);
37 if (t->tunnel_index != ~0)
39 s = format (s, "VXLAN: tunnel %d vni %d next %d error %d",
40 t->tunnel_index, t->vni, t->next_index, t->error);
44 s = format (s, "VXLAN: no tunnel for vni %d next %d error %d",
45 t->vni, t->next_index, t->error);
51 vxlan_input (vlib_main_t * vm,
52 vlib_node_runtime_t * node,
53 vlib_frame_t * from_frame,
56 u32 n_left_from, next_index, * from, * to_next;
57 vxlan_main_t * vxm = &vxlan_main;
58 vnet_main_t * vnm = vxm->vnet_main;
59 vnet_interface_main_t * im = &vnm->interface_main;
60 u32 last_tunnel_index = ~0;
61 vxlan4_tunnel_key_t last_key4;
62 vxlan6_tunnel_key_t last_key6;
63 u32 pkts_decapsulated = 0;
64 u32 cpu_index = os_get_cpu_number();
65 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
67 last_key4.as_u64 = ~0;
69 from = vlib_frame_vector_args (from_frame);
70 n_left_from = from_frame->n_vectors;
72 next_index = node->cached_next_index;
73 stats_sw_if_index = node->runtime_data[0];
74 stats_n_packets = stats_n_bytes = 0;
76 while (n_left_from > 0)
80 vlib_get_next_frame (vm, node, next_index,
81 to_next, n_left_to_next);
82 while (n_left_from >= 4 && n_left_to_next >= 2)
85 vlib_buffer_t * b0, * b1;
87 ip4_header_t * ip4_0, * ip4_1;
88 ip6_header_t * ip6_0, * ip6_1;
89 vxlan_header_t * vxlan0, * vxlan1;
91 u32 tunnel_index0, tunnel_index1;
92 vxlan_tunnel_t * t0, * t1;
93 vxlan4_tunnel_key_t key4_0, key4_1;
94 vxlan6_tunnel_key_t key6_0, key6_1;
96 u32 sw_if_index0, sw_if_index1, len0, len1;
98 /* Prefetch next iteration. */
100 vlib_buffer_t * p2, * p3;
102 p2 = vlib_get_buffer (vm, from[2]);
103 p3 = vlib_get_buffer (vm, from[3]);
105 vlib_prefetch_buffer_header (p2, LOAD);
106 vlib_prefetch_buffer_header (p3, LOAD);
108 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
109 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
121 b0 = vlib_get_buffer (vm, bi0);
122 b1 = vlib_get_buffer (vm, bi1);
124 /* udp leaves current_data pointing at the vxlan header */
125 vxlan0 = vlib_buffer_get_current (b0);
126 vxlan1 = vlib_buffer_get_current (b1);
130 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
132 (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
133 ip4_0 = vlib_buffer_get_current (b0);
134 ip4_1 = vlib_buffer_get_current (b1);
137 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
139 (b1, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
140 ip6_0 = vlib_buffer_get_current (b0);
141 ip6_1 = vlib_buffer_get_current (b1);
144 /* pop (ip, udp, vxlan) */
147 (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
149 (b1, sizeof(*ip4_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
152 (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
154 (b1, sizeof(*ip6_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
164 key4_0.src = ip4_0->src_address.as_u32;
165 key4_0.vni = vxlan0->vni_reserved;
167 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
169 p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
173 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
174 next0 = VXLAN_INPUT_NEXT_DROP;
178 last_key4.as_u64 = key4_0.as_u64;
179 tunnel_index0 = last_tunnel_index = p0[0];
182 tunnel_index0 = last_tunnel_index;
183 } else /* !is_ip4 */ {
184 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
185 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
186 key6_0.vni = vxlan0->vni_reserved;
188 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
190 p0 = hash_get (vxm->vxlan6_tunnel_by_key, pointer_to_uword(&key6_0));
194 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
195 next0 = VXLAN_INPUT_NEXT_DROP;
200 tunnel_index0 = last_tunnel_index = p0[0];
203 tunnel_index0 = last_tunnel_index;
206 t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
208 next0 = t0->decap_next_index;
209 sw_if_index0 = t0->sw_if_index;
210 len0 = vlib_buffer_length_in_chain (vm, b0);
212 /* Required to make the l2 tag push / pop code work on l2 subifs */
213 if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
214 vnet_update_l2_len (b0);
216 /* Set input sw_if_index to VXLAN tunnel for learning */
217 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
219 pkts_decapsulated ++;
220 stats_n_packets += 1;
221 stats_n_bytes += len0;
223 /* Batch stats increment on the same vxlan tunnel so counter
224 is not incremented per packet */
225 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
227 stats_n_packets -= 1;
228 stats_n_bytes -= len0;
230 vlib_increment_combined_counter
231 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
232 cpu_index, stats_sw_if_index,
233 stats_n_packets, stats_n_bytes);
235 stats_n_bytes = len0;
236 stats_sw_if_index = sw_if_index0;
240 b0->error = error0 ? node->errors[error0] : 0;
242 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
245 = vlib_add_trace (vm, node, b0, sizeof (*tr));
246 tr->next_index = next0;
248 tr->tunnel_index = tunnel_index0;
249 tr->vni = vnet_get_vni (vxlan0);
254 key4_1.src = ip4_1->src_address.as_u32;
255 key4_1.vni = vxlan1->vni_reserved;
257 if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
259 p1 = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
263 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
264 next1 = VXLAN_INPUT_NEXT_DROP;
268 last_key4.as_u64 = key4_1.as_u64;
269 tunnel_index1 = last_tunnel_index = p1[0];
272 tunnel_index1 = last_tunnel_index;
273 } else /* !is_ip4 */ {
274 key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
275 key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
276 key6_1.vni = vxlan1->vni_reserved;
278 if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
280 p1 = hash_get (vxm->vxlan6_tunnel_by_key, pointer_to_uword(&key6_1));
284 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
285 next1 = VXLAN_INPUT_NEXT_DROP;
290 tunnel_index1 = last_tunnel_index = p1[0];
293 tunnel_index1 = last_tunnel_index;
296 t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
298 next1 = t1->decap_next_index;
299 sw_if_index1 = t1->sw_if_index;
300 len1 = vlib_buffer_length_in_chain (vm, b1);
302 /* Required to make the l2 tag push / pop code work on l2 subifs */
303 if (PREDICT_TRUE(next1 == VXLAN_INPUT_NEXT_L2_INPUT))
304 vnet_update_l2_len (b1);
306 /* Set input sw_if_index to VXLAN tunnel for learning */
307 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
309 pkts_decapsulated ++;
310 stats_n_packets += 1;
311 stats_n_bytes += len1;
313 /* Batch stats increment on the same vxlan tunnel so counter
314 is not incremented per packet */
315 if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
317 stats_n_packets -= 1;
318 stats_n_bytes -= len1;
320 vlib_increment_combined_counter
321 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
322 cpu_index, stats_sw_if_index,
323 stats_n_packets, stats_n_bytes);
325 stats_n_bytes = len1;
326 stats_sw_if_index = sw_if_index1;
330 b1->error = error1 ? node->errors[error1] : 0;
332 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
335 = vlib_add_trace (vm, node, b1, sizeof (*tr));
336 tr->next_index = next1;
338 tr->tunnel_index = tunnel_index1;
339 tr->vni = vnet_get_vni (vxlan1);
342 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
343 to_next, n_left_to_next,
344 bi0, bi1, next0, next1);
347 while (n_left_from > 0 && n_left_to_next > 0)
352 ip4_header_t * ip4_0;
353 ip6_header_t * ip6_0;
354 vxlan_header_t * vxlan0;
358 vxlan4_tunnel_key_t key4_0;
359 vxlan6_tunnel_key_t key6_0;
361 u32 sw_if_index0, len0;
370 b0 = vlib_get_buffer (vm, bi0);
372 /* udp leaves current_data pointing at the vxlan header */
373 vxlan0 = vlib_buffer_get_current (b0);
377 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
378 ip4_0 = vlib_buffer_get_current (b0);
381 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
382 ip6_0 = vlib_buffer_get_current (b0);
385 /* pop (ip, udp, vxlan) */
388 (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
391 (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
398 key4_0.src = ip4_0->src_address.as_u32;
399 key4_0.vni = vxlan0->vni_reserved;
401 if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
403 p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
407 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
408 next0 = VXLAN_INPUT_NEXT_DROP;
412 last_key4.as_u64 = key4_0.as_u64;
413 tunnel_index0 = last_tunnel_index = p0[0];
416 tunnel_index0 = last_tunnel_index;
417 } else /* !is_ip4 */ {
418 key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
419 key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
420 key6_0.vni = vxlan0->vni_reserved;
422 if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
424 p0 = hash_get (vxm->vxlan6_tunnel_by_key, pointer_to_uword(&key6_0));
428 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
429 next0 = VXLAN_INPUT_NEXT_DROP;
434 tunnel_index0 = last_tunnel_index = p0[0];
437 tunnel_index0 = last_tunnel_index;
440 t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
442 next0 = t0->decap_next_index;
443 sw_if_index0 = t0->sw_if_index;
444 len0 = vlib_buffer_length_in_chain (vm, b0);
446 /* Required to make the l2 tag push / pop code work on l2 subifs */
447 if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
448 vnet_update_l2_len (b0);
450 /* Set input sw_if_index to VXLAN tunnel for learning */
451 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
453 pkts_decapsulated ++;
454 stats_n_packets += 1;
455 stats_n_bytes += len0;
457 /* Batch stats increment on the same vxlan tunnel so counter
458 is not incremented per packet */
459 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
461 stats_n_packets -= 1;
462 stats_n_bytes -= len0;
464 vlib_increment_combined_counter
465 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
466 cpu_index, stats_sw_if_index,
467 stats_n_packets, stats_n_bytes);
469 stats_n_bytes = len0;
470 stats_sw_if_index = sw_if_index0;
474 b0->error = error0 ? node->errors[error0] : 0;
476 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
479 = vlib_add_trace (vm, node, b0, sizeof (*tr));
480 tr->next_index = next0;
482 tr->tunnel_index = tunnel_index0;
483 tr->vni = vnet_get_vni (vxlan0);
485 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
486 to_next, n_left_to_next,
490 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
492 /* Do we still need this now that tunnel tx stats is kept? */
493 vlib_node_increment_counter (vm, vxlan_input_node.index,
494 VXLAN_ERROR_DECAPSULATED,
497 /* Increment any remaining batch stats */
500 vlib_increment_combined_counter
501 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
502 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
503 node->runtime_data[0] = stats_sw_if_index;
506 return from_frame->n_vectors;
510 vxlan4_input (vlib_main_t * vm,
511 vlib_node_runtime_t * node,
512 vlib_frame_t * from_frame)
514 return vxlan_input(vm, node, from_frame, /* is_ip4 */ 1);
518 vxlan6_input (vlib_main_t * vm,
519 vlib_node_runtime_t * node,
520 vlib_frame_t * from_frame)
522 return vxlan_input(vm, node, from_frame, /* is_ip4 */ 0);
525 static char * vxlan_error_strings[] = {
526 #define vxlan_error(n,s) s,
527 #include <vnet/vxlan/vxlan_error.def>
532 VLIB_REGISTER_NODE (vxlan4_input_node) = {
533 .function = vxlan4_input,
534 .name = "vxlan4-input",
535 /* Takes a vector of packets. */
536 .vector_size = sizeof (u32),
538 .n_errors = VXLAN_N_ERROR,
539 .error_strings = vxlan_error_strings,
541 .n_next_nodes = VXLAN_INPUT_N_NEXT,
543 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
544 foreach_vxlan_input_next
548 //temp .format_buffer = format_vxlan_header,
549 .format_trace = format_vxlan_rx_trace,
550 // $$$$ .unformat_buffer = unformat_vxlan_header,
553 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_input_node, vxlan4_input)
555 VLIB_REGISTER_NODE (vxlan6_input_node) = {
556 .function = vxlan6_input,
557 .name = "vxlan6-input",
558 /* Takes a vector of packets. */
559 .vector_size = sizeof (u32),
561 .n_errors = VXLAN_N_ERROR,
562 .error_strings = vxlan_error_strings,
564 .n_next_nodes = VXLAN_INPUT_N_NEXT,
566 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
567 foreach_vxlan_input_next
571 //temp .format_buffer = format_vxlan_header,
572 .format_trace = format_vxlan_rx_trace,
573 // $$$$ .unformat_buffer = unformat_vxlan_header,
576 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_input_node, vxlan6_input)