2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief Functions for encapsulating VXLAN GPE tunnels
20 #include <vppinfra/error.h>
21 #include <vppinfra/hash.h>
22 #include <vnet/vnet.h>
23 #include <vnet/ip/ip.h>
24 #include <vnet/ethernet/ethernet.h>
25 #include <vnet/vxlan-gpe/vxlan_gpe.h>
27 /** Statistics (not really errors) */
28 #define foreach_vxlan_gpe_encap_error \
29 _(ENCAPSULATED, "good packets encapsulated")
32 * @brief VXLAN GPE encap error strings
34 static char * vxlan_gpe_encap_error_strings[] = {
35 #define _(sym,string) string,
36 foreach_vxlan_gpe_encap_error
41 * @brief Struct for VXLAN GPE errors/counters
44 #define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym,
45 foreach_vxlan_gpe_encap_error
47 VXLAN_GPE_ENCAP_N_ERROR,
48 } vxlan_gpe_encap_error_t;
51 * @brief Struct for tracing VXLAN GPE encapsulated packets
55 } vxlan_gpe_encap_trace_t;
58 * @brief Trace of packets encapsulated in VXLAN GPE
66 u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
68 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
69 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
70 vxlan_gpe_encap_trace_t * t
71 = va_arg (*args, vxlan_gpe_encap_trace_t *);
73 s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index);
78 * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup
82 * @param *t0 contains rewrite header
83 * @param *next0 relative index of next dispatch function (next node)
84 * @param is_v4 Is this IPv4? (or IPv6)
88 vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
89 vxlan_gpe_tunnel_t * t0, u32 * next0,
92 ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
93 ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
95 ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
96 next0[0] = t0->encap_next_node;
100 * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup for two packets
105 * @param *t0 contains rewrite header for Packet0
106 * @param *t1 contains rewrite header for Packet1
107 * @param *next0 relative index of next dispatch function (next node) for Packet0
108 * @param *next1 relative index of next dispatch function (next node) for Packet1
109 * @param is_v4 Is this IPv4? (or IPv6)
113 vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
114 vlib_buffer_t * b1, vxlan_gpe_tunnel_t * t0,
115 vxlan_gpe_tunnel_t * t1, u32 * next0,
116 u32 * next1, u8 is_v4)
118 ASSERT(sizeof(ip4_vxlan_gpe_header_t) == 36);
119 ASSERT(sizeof(ip6_vxlan_gpe_header_t) == 56);
121 ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
122 ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, is_v4);
123 next0[0] = next1[0] = t0->encap_next_node;
127 * @brief Common processing for IPv4 and IPv6 VXLAN GPE encap dispatch functions
129 * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
130 * tunnels are "establish local". This means that we don't have a TX interface as yet
131 * as we need to look up where the outer-header dest is. By setting the TX index in the
132 * buffer metadata to the encap FIB, we can do a lookup to get the adjacency and real TX.
134 * vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
136 * @node vxlan-gpe-input
141 * @return from_frame->n_vectors
145 vxlan_gpe_encap (vlib_main_t * vm,
146 vlib_node_runtime_t * node,
147 vlib_frame_t * from_frame)
149 u32 n_left_from, next_index, *from, *to_next;
150 vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
151 vnet_main_t * vnm = ngm->vnet_main;
152 vnet_interface_main_t * im = &vnm->interface_main;
153 u32 pkts_encapsulated = 0;
154 u32 thread_index = vlib_get_thread_index ();
155 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
157 from = vlib_frame_vector_args (from_frame);
158 n_left_from = from_frame->n_vectors;
160 next_index = node->cached_next_index;
161 stats_sw_if_index = node->runtime_data[0];
162 stats_n_packets = stats_n_bytes = 0;
164 while (n_left_from > 0)
168 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
170 while (n_left_from >= 4 && n_left_to_next >= 2)
173 vlib_buffer_t * b0, *b1;
175 u32 sw_if_index0, sw_if_index1, len0, len1;
176 vnet_hw_interface_t * hi0, *hi1;
177 vxlan_gpe_tunnel_t * t0, *t1;
178 u8 is_ip4_0, is_ip4_1;
180 next0 = next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
182 /* Prefetch next iteration. */
184 vlib_buffer_t * p2, *p3;
186 p2 = vlib_get_buffer (vm, from[2]);
187 p3 = vlib_get_buffer (vm, from[3]);
189 vlib_prefetch_buffer_header(p2, LOAD);
190 vlib_prefetch_buffer_header(p3, LOAD);
192 CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
193 CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
205 b0 = vlib_get_buffer (vm, bi0);
206 b1 = vlib_get_buffer (vm, bi1);
209 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
210 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
211 hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
212 hi1 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
214 t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
215 t1 = pool_elt_at_index(ngm->tunnels, hi1->dev_instance);
217 is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
218 is_ip4_1 = (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
220 if (PREDICT_TRUE(is_ip4_0 == is_ip4_1))
222 vxlan_gpe_encap_two_inline (ngm, b0, b1, t0, t1, &next0, &next1,is_ip4_0);
226 vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
227 vxlan_gpe_encap_one_inline (ngm, b1, t1, &next1, is_ip4_1);
230 /* Reset to look up tunnel partner in the configured FIB */
231 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
232 vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
233 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
234 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
235 pkts_encapsulated += 2;
237 len0 = vlib_buffer_length_in_chain (vm, b0);
238 len1 = vlib_buffer_length_in_chain (vm, b0);
239 stats_n_packets += 2;
240 stats_n_bytes += len0 + len1;
242 /* Batch stats increment on the same vxlan tunnel so counter is not
243 incremented per packet. Note stats are still incremented for deleted
244 and admin-down tunnel where packets are dropped. It is not worthwhile
245 to check for this rare case and affect normal path performance. */
246 if (PREDICT_FALSE((sw_if_index0 != stats_sw_if_index)
247 || (sw_if_index1 != stats_sw_if_index)))
249 stats_n_packets -= 2;
250 stats_n_bytes -= len0 + len1;
251 if (sw_if_index0 == sw_if_index1)
254 vlib_increment_combined_counter (
255 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
256 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
257 stats_sw_if_index = sw_if_index0;
259 stats_n_bytes = len0 + len1;
263 vlib_increment_combined_counter (
264 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
265 thread_index, sw_if_index0, 1, len0);
266 vlib_increment_combined_counter (
267 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
268 thread_index, sw_if_index1, 1, len1);
272 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
274 vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof(*tr));
275 tr->tunnel_index = t0 - ngm->tunnels;
278 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
280 vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b1,
282 tr->tunnel_index = t1 - ngm->tunnels;
285 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
286 n_left_to_next, bi0, bi1, next0, next1);
289 while (n_left_from > 0 && n_left_to_next > 0)
293 u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
294 u32 sw_if_index0, len0;
295 vnet_hw_interface_t * hi0;
296 vxlan_gpe_tunnel_t * t0;
306 b0 = vlib_get_buffer (vm, bi0);
309 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
310 hi0 = vnet_get_sup_hw_interface (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
312 t0 = pool_elt_at_index(ngm->tunnels, hi0->dev_instance);
314 is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
316 vxlan_gpe_encap_one_inline (ngm, b0, t0, &next0, is_ip4_0);
318 /* Reset to look up tunnel partner in the configured FIB */
319 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
320 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
323 len0 = vlib_buffer_length_in_chain (vm, b0);
324 stats_n_packets += 1;
325 stats_n_bytes += len0;
327 /* Batch stats increment on the same vxlan tunnel so counter is not
328 * incremented per packet. Note stats are still incremented for deleted
329 * and admin-down tunnel where packets are dropped. It is not worthwhile
330 * to check for this rare case and affect normal path performance. */
331 if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
333 stats_n_packets -= 1;
334 stats_n_bytes -= len0;
336 vlib_increment_combined_counter (
337 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
338 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
340 stats_n_bytes = len0;
341 stats_sw_if_index = sw_if_index0;
343 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
345 vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b0,
347 tr->tunnel_index = t0 - ngm->tunnels;
349 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
350 n_left_to_next, bi0, next0);
353 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
355 vlib_node_increment_counter (vm, node->node_index,
356 VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
358 /* Increment any remaining batch stats */
361 vlib_increment_combined_counter (
362 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index,
363 stats_sw_if_index, stats_n_packets, stats_n_bytes);
364 node->runtime_data[0] = stats_sw_if_index;
367 return from_frame->n_vectors;
370 VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
371 .function = vxlan_gpe_encap,
372 .name = "vxlan-gpe-encap",
373 .vector_size = sizeof (u32),
374 .format_trace = format_vxlan_gpe_encap_trace,
375 .type = VLIB_NODE_TYPE_INTERNAL,
377 .n_errors = ARRAY_LEN(vxlan_gpe_encap_error_strings),
378 .error_strings = vxlan_gpe_encap_error_strings,
380 .n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT,
383 [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
384 [VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
385 [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",