2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief Functions for encapsulating VXLAN GPE tunnels
20 #include <vppinfra/error.h>
21 #include <vppinfra/hash.h>
22 #include <vnet/vnet.h>
23 #include <vnet/ip/ip.h>
24 #include <vnet/ethernet/ethernet.h>
25 #include <vnet/vxlan-gpe/vxlan_gpe.h>
27 /** Statistics (not really errors) */
28 #define foreach_vxlan_gpe_encap_error \
29 _(ENCAPSULATED, "good packets encapsulated")
32 * @brief VXLAN GPE encap error strings
34 static char *vxlan_gpe_encap_error_strings[] = {
35 #define _(sym,string) string,
36 foreach_vxlan_gpe_encap_error
41 * @brief Struct for VXLAN GPE errors/counters
45 #define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym,
46 foreach_vxlan_gpe_encap_error
48 VXLAN_GPE_ENCAP_N_ERROR,
49 } vxlan_gpe_encap_error_t;
52 * @brief Struct for tracing VXLAN GPE encapsulated packets
57 } vxlan_gpe_encap_trace_t;
60 * @brief Trace of packets encapsulated in VXLAN GPE
69 format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
71 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
72 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
73 vxlan_gpe_encap_trace_t *t = va_arg (*args, vxlan_gpe_encap_trace_t *);
75 s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index);
80 * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup
84 * @param *t0 contains rewrite header
85 * @param *next0 relative index of next dispatch function (next node)
86 * @param is_v4 Is this IPv4? (or IPv6)
90 vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
91 vxlan_gpe_tunnel_t * t0, u32 * next0, u8 is_v4)
93 ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
94 ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56);
96 ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
97 next0[0] = t0->encap_next_node;
101 * @brief Instantiates UDP + VXLAN-GPE header then set next node to IP4|6 lookup for two packets
106 * @param *t0 contains rewrite header for Packet0
107 * @param *t1 contains rewrite header for Packet1
108 * @param *next0 relative index of next dispatch function (next node) for Packet0
109 * @param *next1 relative index of next dispatch function (next node) for Packet1
110 * @param is_v4 Is this IPv4? (or IPv6)
114 vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0,
115 vlib_buffer_t * b1, vxlan_gpe_tunnel_t * t0,
116 vxlan_gpe_tunnel_t * t1, u32 * next0,
117 u32 * next1, u8 is_v4)
119 ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
120 ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56);
122 ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4);
123 ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, is_v4);
124 next0[0] = next1[0] = t0->encap_next_node;
128 * @brief Common processing for IPv4 and IPv6 VXLAN GPE encap dispatch functions
130 * It is worth noting that other than trivial UDP forwarding (transit), VXLAN GPE
131 * tunnels are "establish local". This means that we don't have a TX interface as yet
132 * as we need to look up where the outer-header dest is. By setting the TX index in the
133 * buffer metadata to the encap FIB, we can do a lookup to get the adjacency and real TX.
135 * vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
137 * @node vxlan-gpe-input
142 * @return from_frame->n_vectors
146 vxlan_gpe_encap (vlib_main_t * vm,
147 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
149 u32 n_left_from, next_index, *from, *to_next;
150 vxlan_gpe_main_t *ngm = &vxlan_gpe_main;
151 vnet_main_t *vnm = ngm->vnet_main;
152 vnet_interface_main_t *im = &vnm->interface_main;
153 u32 pkts_encapsulated = 0;
154 u32 thread_index = vm->thread_index;
155 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
156 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
158 from = vlib_frame_vector_args (from_frame);
159 n_left_from = from_frame->n_vectors;
161 next_index = node->cached_next_index;
162 stats_sw_if_index = node->runtime_data[0];
163 stats_n_packets = stats_n_bytes = 0;
164 vlib_get_buffers (vm, from, bufs, n_left_from);
166 while (n_left_from > 0)
169 u32 sw_if_index0 = ~0, sw_if_index1 = ~0, len0, len1;
170 vnet_hw_interface_t *hi0, *hi1;
171 vxlan_gpe_tunnel_t *t0 = NULL, *t1 = NULL;
172 u8 is_ip4_0 = 0, is_ip4_1 = 0;
174 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
176 while (n_left_from >= 4 && n_left_to_next >= 2)
181 next0 = next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
183 /* Prefetch next iteration. */
185 vlib_prefetch_buffer_header (b[2], LOAD);
186 vlib_prefetch_buffer_header (b[3], LOAD);
188 CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
189 2 * CLIB_CACHE_LINE_BYTES, LOAD);
190 CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
191 2 * CLIB_CACHE_LINE_BYTES, LOAD);
203 /* get the flag "is_ip4" */
204 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
206 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
208 vnet_get_sup_hw_interface (vnm,
209 vnet_buffer (b[0])->sw_if_index
211 t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
212 is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
215 /* get the flag "is_ip4" */
216 if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
218 if (sw_if_index0 == vnet_buffer (b[1])->sw_if_index[VLIB_TX])
220 sw_if_index1 = sw_if_index0;
227 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
229 vnet_get_sup_hw_interface (vnm,
230 vnet_buffer (b[1])->sw_if_index
232 t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance);
233 is_ip4_1 = (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
237 if (PREDICT_TRUE (is_ip4_0 == is_ip4_1))
239 vxlan_gpe_encap_two_inline (ngm, b[0], b[1], t0, t1, &next0,
244 vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, is_ip4_0);
245 vxlan_gpe_encap_one_inline (ngm, b[1], t1, &next1, is_ip4_1);
248 /* Reset to look up tunnel partner in the configured FIB */
249 vnet_buffer (b[0])->sw_if_index[VLIB_TX] = t0->encap_fib_index;
250 vnet_buffer (b[1])->sw_if_index[VLIB_TX] = t1->encap_fib_index;
251 vnet_buffer (b[0])->sw_if_index[VLIB_RX] = sw_if_index0;
252 vnet_buffer (b[1])->sw_if_index[VLIB_RX] = sw_if_index1;
253 pkts_encapsulated += 2;
255 len0 = vlib_buffer_length_in_chain (vm, b[0]);
256 len1 = vlib_buffer_length_in_chain (vm, b[1]);
257 stats_n_packets += 2;
258 stats_n_bytes += len0 + len1;
260 /* Batch stats increment on the same vxlan tunnel so counter is not
261 incremented per packet. Note stats are still incremented for deleted
262 and admin-down tunnel where packets are dropped. It is not worthwhile
263 to check for this rare case and affect normal path performance. */
264 if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index)
265 || (sw_if_index1 != stats_sw_if_index)))
267 stats_n_packets -= 2;
268 stats_n_bytes -= len0 + len1;
269 if (sw_if_index0 == sw_if_index1)
272 vlib_increment_combined_counter
273 (im->combined_sw_if_counters +
274 VNET_INTERFACE_COUNTER_TX, thread_index,
275 stats_sw_if_index, stats_n_packets, stats_n_bytes);
276 stats_sw_if_index = sw_if_index0;
278 stats_n_bytes = len0 + len1;
282 vlib_increment_combined_counter (im->combined_sw_if_counters
284 VNET_INTERFACE_COUNTER_TX,
285 thread_index, sw_if_index0,
287 vlib_increment_combined_counter (im->combined_sw_if_counters
289 VNET_INTERFACE_COUNTER_TX,
290 thread_index, sw_if_index1,
295 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
297 vxlan_gpe_encap_trace_t *tr =
298 vlib_add_trace (vm, node, b[0], sizeof (*tr));
299 tr->tunnel_index = t0 - ngm->tunnels;
302 if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
304 vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b[1],
306 tr->tunnel_index = t1 - ngm->tunnels;
310 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
311 n_left_to_next, bi0, bi1, next0,
315 while (n_left_from > 0 && n_left_to_next > 0)
318 u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
327 /* get the flag "is_ip4" */
328 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
330 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
332 vnet_get_sup_hw_interface (vnm,
333 vnet_buffer (b[0])->sw_if_index
336 t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
338 is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4);
341 vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, is_ip4_0);
343 /* Reset to look up tunnel partner in the configured FIB */
344 vnet_buffer (b[0])->sw_if_index[VLIB_TX] = t0->encap_fib_index;
345 vnet_buffer (b[0])->sw_if_index[VLIB_RX] = sw_if_index0;
348 len0 = vlib_buffer_length_in_chain (vm, b[0]);
349 stats_n_packets += 1;
350 stats_n_bytes += len0;
352 /* Batch stats increment on the same vxlan tunnel so counter is not
353 * incremented per packet. Note stats are still incremented for deleted
354 * and admin-down tunnel where packets are dropped. It is not worthwhile
355 * to check for this rare case and affect normal path performance. */
356 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
358 stats_n_packets -= 1;
359 stats_n_bytes -= len0;
361 vlib_increment_combined_counter (im->combined_sw_if_counters +
362 VNET_INTERFACE_COUNTER_TX,
368 stats_n_bytes = len0;
369 stats_sw_if_index = sw_if_index0;
371 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
373 vxlan_gpe_encap_trace_t *tr = vlib_add_trace (vm, node, b[0],
375 tr->tunnel_index = t0 - ngm->tunnels;
379 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
380 n_left_to_next, bi0, next0);
383 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
385 vlib_node_increment_counter (vm, node->node_index,
386 VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
388 /* Increment any remaining batch stats */
391 vlib_increment_combined_counter (im->combined_sw_if_counters +
392 VNET_INTERFACE_COUNTER_TX,
393 thread_index, stats_sw_if_index,
394 stats_n_packets, stats_n_bytes);
395 node->runtime_data[0] = stats_sw_if_index;
398 return from_frame->n_vectors;
402 VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
403 .function = vxlan_gpe_encap,
404 .name = "vxlan-gpe-encap",
405 .vector_size = sizeof (u32),
406 .format_trace = format_vxlan_gpe_encap_trace,
407 .type = VLIB_NODE_TYPE_INTERNAL,
409 .n_errors = ARRAY_LEN(vxlan_gpe_encap_error_strings),
410 .error_strings = vxlan_gpe_encap_error_strings,
412 .n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT,
415 [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
416 [VXLAN_GPE_ENCAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
417 [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",
424 * fd.io coding-style-patch-verification: ON
427 * eval: (c-set-style "gnu")