2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief Functions for encapsulating VXLAN GPE tunnels
20 #include <vppinfra/error.h>
21 #include <vppinfra/hash.h>
22 #include <vnet/vnet.h>
23 #include <vnet/ip/ip.h>
24 #include <vnet/ethernet/ethernet.h>
25 #include <vnet/vxlan-gpe/vxlan_gpe.h>
27 /** Statistics (not really errors) */
28 #define foreach_vxlan_gpe_encap_error \
29 _(ENCAPSULATED, "good packets encapsulated")
32 * @brief VXLAN GPE encap error strings
34 static char * vxlan_gpe_encap_error_strings[] = {
35 #define _(sym,string) string,
36 foreach_vxlan_gpe_encap_error
41 * @brief Struct for VXLAN GPE errors/counters
44 #define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym,
45 foreach_vxlan_gpe_encap_error
47 VXLAN_GPE_ENCAP_N_ERROR,
48 } vxlan_gpe_encap_error_t;
51 * @brief Struct for defining VXLAN GPE next nodes
54 VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP,
55 VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP,
56 VXLAN_GPE_ENCAP_NEXT_DROP,
57 VXLAN_GPE_ENCAP_N_NEXT
58 } vxlan_gpe_encap_next_t;
61 * @brief Struct for tracing VXLAN GPE encapsulated packets
65 } vxlan_gpe_encap_trace_t;
68 * @brief Trace of packets encapsulated in VXLAN GPE
76 u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
78 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
79 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
80 vxlan_gpe_encap_trace_t * t
81 = va_arg (*args, vxlan_gpe_encap_trace_t *);
83 s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index);
88 * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup
92 * @param *t0 contains rewrite header
93 * @param *next0 relative index of next dispatch function (next node)
94 * @param is_v4 Is this IPv4? (or IPv6)
98 vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
99 vxlan_gpe_tunnel_t * t0, u32 * next0, u8 is_v4)
101 ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
102 ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
106 ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 36, 1);
107 next0[0] = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
112 ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 56, 0);
113 next0[0] = VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP;
118 * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup for two packets
123 * @param *t0 contains rewrite header for Packet0
124 * @param *t1 contains rewrite header for Packet1
125 * @param *next0 relative index of next dispatch function (next node) for Packet0
126 * @param *next1 relative index of next dispatch function (next node) for Packet1
127 * @param is_v4 Is this IPv4? (or IPv6)
131 vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0, vlib_buffer_t * b1,
132 vxlan_gpe_tunnel_t * t0, vxlan_gpe_tunnel_t * t1, u32 * next0,
133 u32 * next1, u8 is_v4)
135 ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
136 ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
140 ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 36, 1);
141 ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, 36, 1);
142 next0[0] = next1[0] = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
146 ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, 56, 0);
147 ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, 56, 0);
148 next0[0] = next1[0] = VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP;
153 * @brief Common processing for IPv4 and IPv6 VXLAN GPE encap dispatch functions
155 * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
156 * tunnels are "establish local". This means that we don't have a TX interface as yet
157 * as we need to look up where the outer-header dest is. By setting the TX index in the
158 * buffer metadata to the encap FIB, we can do a lookup to get the adjacency and real TX.
160 * vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
162 * @node vxlan-gpe-input
167 * @return from_frame->n_vectors
171 vxlan_gpe_encap (vlib_main_t * vm,
172 vlib_node_runtime_t * node,
173 vlib_frame_t * from_frame)
175 u32 n_left_from, next_index, *from, *to_next;
176 vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
177 vnet_main_t * vnm = ngm->vnet_main;
178 vnet_interface_main_t * im = &vnm->interface_main;
179 u32 pkts_encapsulated = 0;
180 u32 cpu_index = os_get_cpu_number ();
181 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
183 from = vlib_frame_vector_args (from_frame);
184 n_left_from = from_frame->n_vectors;
186 next_index = node->cached_next_index;
187 stats_sw_if_index = node->runtime_data[0];
188 stats_n_packets = stats_n_bytes = 0;
190 while (n_left_from > 0)
194 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
196 while (n_left_from >= 4 && n_left_to_next >= 2)
199 vlib_buffer_t * b0, *b1;
201 u32 sw_if_index0, sw_if_index1, len0, len1;
202 vnet_hw_interface_t * hi0, *hi1;
203 vxlan_gpe_tunnel_t * t0, *t1;
204 u8 is_ip4_0, is_ip4_1;
206 next0 = next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
208 /* Prefetch next iteration. */
210 vlib_buffer_t * p2, *p3;
212 p2 = vlib_get_buffer (vm, from[2]);
213 p3 = vlib_get_buffer (vm, from[3]);
215 vlib_prefetch_buffer_header(p2, LOAD);
216 vlib_prefetch_buffer_header(p3, LOAD);
218 CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
219 CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
231 b0 = vlib_get_buffer (vm, bi0);
232 b1 = vlib_get_buffer (vm, bi1);
235 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
236 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
237 hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
238 hi1 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
240 t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
241 t1 = pool_elt_at_index(ngm->tunnels, hi1->dev_instance);
243 is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
244 is_ip4_1 = (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
246 if (PREDICT_TRUE(is_ip4_0 == is_ip4_1))
248 vxlan_gpe_encap_two_inline (ngm, b0, b1, t0, t1, &next0, &next1,is_ip4_0);
252 vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
253 vxlan_gpe_encap_one_inline (ngm, b1, t1, &next1, is_ip4_1);
256 /* Reset to look up tunnel partner in the configured FIB */
257 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
258 vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
259 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
260 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
261 pkts_encapsulated += 2;
263 len0 = vlib_buffer_length_in_chain (vm, b0);
264 len1 = vlib_buffer_length_in_chain (vm, b0);
265 stats_n_packets += 2;
266 stats_n_bytes += len0 + len1;
268 /* Batch stats increment on the same vxlan tunnel so counter is not
269 incremented per packet. Note stats are still incremented for deleted
270 and admin-down tunnel where packets are dropped. It is not worthwhile
271 to check for this rare case and affect normal path performance. */
272 if (PREDICT_FALSE((sw_if_index0 != stats_sw_if_index)
273 || (sw_if_index1 != stats_sw_if_index)))
275 stats_n_packets -= 2;
276 stats_n_bytes -= len0 + len1;
277 if (sw_if_index0 == sw_if_index1)
280 vlib_increment_combined_counter (
281 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
282 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
283 stats_sw_if_index = sw_if_index0;
285 stats_n_bytes = len0 + len1;
289 vlib_increment_combined_counter (
290 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
291 cpu_index, sw_if_index0, 1, len0);
292 vlib_increment_combined_counter (
293 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
294 cpu_index, sw_if_index1, 1, len1);
298 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
300 vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
301 tr->tunnel_index = t0 - ngm->tunnels;
304 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
306 vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b1,
308 tr->tunnel_index = t1 - ngm->tunnels;
311 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
312 n_left_to_next, bi0, bi1, next0, next1);
315 while (n_left_from > 0 && n_left_to_next > 0)
319 u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
320 u32 sw_if_index0, len0;
321 vnet_hw_interface_t * hi0;
322 vxlan_gpe_tunnel_t * t0;
332 b0 = vlib_get_buffer (vm, bi0);
335 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
336 hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
338 t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
340 is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
342 vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
344 /* Reset to look up tunnel partner in the configured FIB */
345 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
346 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
349 len0 = vlib_buffer_length_in_chain (vm, b0);
350 stats_n_packets += 1;
351 stats_n_bytes += len0;
353 /* Batch stats increment on the same vxlan tunnel so counter is not
354 * incremented per packet. Note stats are still incremented for deleted
355 * and admin-down tunnel where packets are dropped. It is not worthwhile
356 * to check for this rare case and affect normal path performance. */
357 if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
359 stats_n_packets -= 1;
360 stats_n_bytes -= len0;
362 vlib_increment_combined_counter (
363 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
364 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
366 stats_n_bytes = len0;
367 stats_sw_if_index = sw_if_index0;
369 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
371 vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0,
373 tr->tunnel_index = t0 - ngm->tunnels;
375 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
376 n_left_to_next, bi0, next0);
379 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
381 vlib_node_increment_counter (vm, node->node_index,
382 VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
384 /* Increment any remaining batch stats */
387 vlib_increment_combined_counter (
388 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index,
389 stats_sw_if_index, stats_n_packets, stats_n_bytes);
390 node->runtime_data[0] = stats_sw_if_index;
393 return from_frame->n_vectors;
396 VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
397 .function = vxlan_gpe_encap,
398 .name = "vxlan-gpe-encap",
399 .vector_size = sizeof (u32),
400 .format_trace = format_vxlan_gpe_encap_trace,
401 .type = VLIB_NODE_TYPE_INTERNAL,
403 .n_errors = ARRAY_LEN(vxlan_gpe_encap_error_strings),
404 .error_strings = vxlan_gpe_encap_error_strings,
406 .n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT,
409 [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
410 [VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
411 [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",