2 * decap.c - decapsulate VXLAN GPE
4 * Copyright (c) 2013 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan-gpe/vxlan_gpe.h>
26 } vxlan_gpe_rx_trace_t;
28 static u8 * format_vxlan_gpe_rx_trace (u8 * s, va_list * args)
30 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
31 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
32 vxlan_gpe_rx_trace_t * t = va_arg (*args, vxlan_gpe_rx_trace_t *);
34 if (t->tunnel_index != ~0)
36 s = format (s, "VXLAN-GPE: tunnel %d next %d error %d", t->tunnel_index,
37 t->next_index, t->error);
41 s = format (s, "VXLAN-GPE: no tunnel next %d error %d\n", t->next_index,
48 static u8 * format_vxlan_gpe_with_length (u8 * s, va_list * args)
50 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
51 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
58 vxlan_gpe_input (vlib_main_t * vm,
59 vlib_node_runtime_t * node,
60 vlib_frame_t * from_frame)
62 u32 n_left_from, next_index, * from, * to_next;
63 vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
64 vnet_main_t * vnm = ngm->vnet_main;
65 vnet_interface_main_t * im = &vnm->interface_main;
66 u32 last_tunnel_index = ~0;
67 vxlan_gpe_tunnel_key_t last_key;
68 u32 pkts_decapsulated = 0;
69 u32 cpu_index = os_get_cpu_number();
70 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
72 memset (&last_key, 0xff, sizeof (last_key));
74 from = vlib_frame_vector_args (from_frame);
75 n_left_from = from_frame->n_vectors;
77 next_index = node->cached_next_index;
78 stats_sw_if_index = node->runtime_data[0];
79 stats_n_packets = stats_n_bytes = 0;
81 while (n_left_from > 0)
85 vlib_get_next_frame (vm, node, next_index,
86 to_next, n_left_to_next);
88 while (n_left_from >= 4 && n_left_to_next >= 2)
91 vlib_buffer_t * b0, * b1;
93 ip4_vxlan_gpe_header_t * iuvn0, * iuvn1;
95 u32 tunnel_index0, tunnel_index1;
96 vxlan_gpe_tunnel_t * t0, * t1;
97 vxlan_gpe_tunnel_key_t key0, key1;
99 u32 sw_if_index0, sw_if_index1, len0, len1;
101 /* Prefetch next iteration. */
103 vlib_buffer_t * p2, * p3;
105 p2 = vlib_get_buffer (vm, from[2]);
106 p3 = vlib_get_buffer (vm, from[3]);
108 vlib_prefetch_buffer_header (p2, LOAD);
109 vlib_prefetch_buffer_header (p3, LOAD);
111 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
112 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
124 b0 = vlib_get_buffer (vm, bi0);
125 b1 = vlib_get_buffer (vm, bi1);
127 /* udp leaves current_data pointing at the vxlan header */
129 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
131 (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
133 iuvn0 = vlib_buffer_get_current (b0);
134 iuvn1 = vlib_buffer_get_current (b1);
136 /* pop (ip, udp, vxlan) */
137 vlib_buffer_advance (b0, sizeof (*iuvn0));
138 vlib_buffer_advance (b1, sizeof (*iuvn1));
145 next0 = (iuvn0->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
146 next1 = (iuvn1->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn1->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
151 key0.local = iuvn0->ip4.dst_address.as_u32;
152 key1.local = iuvn1->ip4.dst_address.as_u32;
154 key0.remote = iuvn0->ip4.src_address.as_u32;
155 key1.remote = iuvn1->ip4.src_address.as_u32;
157 key0.vni = iuvn0->vxlan.vni_res;
158 key1.vni = iuvn1->vxlan.vni_res;
163 /* Processing for key0 */
164 if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0])
165 || (key0.as_u64[1] != last_key.as_u64[1])))
167 p0 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key0);
171 error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
175 last_key.as_u64[0] = key0.as_u64[0];
176 last_key.as_u64[1] = key0.as_u64[1];
177 tunnel_index0 = last_tunnel_index = p0[0];
180 tunnel_index0 = last_tunnel_index;
182 t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
184 next0 = t0->protocol;
186 sw_if_index0 = t0->sw_if_index;
187 len0 = vlib_buffer_length_in_chain(vm, b0);
189 /* Required to make the l2 tag push / pop code work on l2 subifs */
190 vnet_update_l2_len (b0);
193 * ip[46] lookup in the configured FIB
195 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
198 stats_n_packets += 1;
199 stats_n_bytes += len0;
201 if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
203 stats_n_packets -= 1;
204 stats_n_bytes -= len0;
206 vlib_increment_combined_counter(
207 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
208 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
210 stats_n_bytes = len0;
211 stats_sw_if_index = sw_if_index0;
215 b0->error = error0 ? node->errors[error0] : 0;
217 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
219 vxlan_gpe_rx_trace_t *tr
220 = vlib_add_trace (vm, node, b0, sizeof (*tr));
221 tr->next_index = next0;
223 tr->tunnel_index = tunnel_index0;
227 /* Processing for key1 */
228 if (PREDICT_FALSE ((key1.as_u64[0] != last_key.as_u64[0])
229 || (key1.as_u64[1] != last_key.as_u64[1])))
231 p1 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key1);
235 error1 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
239 last_key.as_u64[0] = key1.as_u64[0];
240 last_key.as_u64[1] = key1.as_u64[1];
241 tunnel_index1 = last_tunnel_index = p1[0];
244 tunnel_index1 = last_tunnel_index;
246 t1 = pool_elt_at_index (ngm->tunnels, tunnel_index1);
248 next1 = t1->protocol;
249 sw_if_index1 = t1->sw_if_index;
250 len1 = vlib_buffer_length_in_chain(vm, b1);
252 /* Required to make the l2 tag push / pop code work on l2 subifs */
253 vnet_update_l2_len (b1);
256 * ip[46] lookup in the configured FIB
258 vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
262 stats_n_packets += 1;
263 stats_n_bytes += len1;
265 /* Batch stats increment on the same vxlan tunnel so counter
266 is not incremented per packet */
267 if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index))
269 stats_n_packets -= 1;
270 stats_n_bytes -= len1;
272 vlib_increment_combined_counter(
273 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
274 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
276 stats_n_bytes = len1;
277 stats_sw_if_index = sw_if_index1;
279 vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
282 b1->error = error1 ? node->errors[error1] : 0;
284 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
286 vxlan_gpe_rx_trace_t *tr
287 = vlib_add_trace (vm, node, b1, sizeof (*tr));
288 tr->next_index = next1;
290 tr->tunnel_index = tunnel_index1;
293 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
294 to_next, n_left_to_next,
295 bi0, bi1, next0, next1);
298 while (n_left_from > 0 && n_left_to_next > 0)
303 ip4_vxlan_gpe_header_t * iuvn0;
306 vxlan_gpe_tunnel_t * t0;
307 vxlan_gpe_tunnel_key_t key0;
309 u32 sw_if_index0, len0;
318 b0 = vlib_get_buffer (vm, bi0);
320 /* udp leaves current_data pointing at the vxlan header */
322 (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
324 iuvn0 = vlib_buffer_get_current (b0);
326 /* pop (ip, udp, vxlan) */
327 vlib_buffer_advance (b0, sizeof (*iuvn0));
331 next0 = (iuvn0->vxlan.protocol < VXLAN_GPE_INPUT_N_NEXT) ? iuvn0->vxlan.protocol : VXLAN_GPE_INPUT_NEXT_DROP;
333 key0.local = iuvn0->ip4.dst_address.as_u32;
334 key0.remote = iuvn0->ip4.src_address.as_u32;
335 key0.vni = iuvn0->vxlan.vni_res;
338 if (PREDICT_FALSE ((key0.as_u64[0] != last_key.as_u64[0])
339 || (key0.as_u64[1] != last_key.as_u64[1])))
341 p0 = hash_get_mem (ngm->vxlan_gpe_tunnel_by_key, &key0);
345 error0 = VXLAN_GPE_ERROR_NO_SUCH_TUNNEL;
349 last_key.as_u64[0] = key0.as_u64[0];
350 last_key.as_u64[1] = key0.as_u64[1];
351 tunnel_index0 = last_tunnel_index = p0[0];
354 tunnel_index0 = last_tunnel_index;
356 t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
358 next0 = t0->protocol;
360 sw_if_index0 = t0->sw_if_index;
361 len0 = vlib_buffer_length_in_chain(vm, b0);
363 /* Required to make the l2 tag push / pop code work on l2 subifs */
364 vnet_update_l2_len (b0);
367 * ip[46] lookup in the configured FIB
369 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
371 pkts_decapsulated ++;
372 stats_n_packets += 1;
373 stats_n_bytes += len0;
375 /* Batch stats increment on the same vxlan-gpe tunnel so counter
376 is not incremented per packet */
377 if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
379 stats_n_packets -= 1;
380 stats_n_bytes -= len0;
382 vlib_increment_combined_counter(
383 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
384 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
386 stats_n_bytes = len0;
387 stats_sw_if_index = sw_if_index0;
391 b0->error = error0 ? node->errors[error0] : 0;
393 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
395 vxlan_gpe_rx_trace_t *tr
396 = vlib_add_trace (vm, node, b0, sizeof (*tr));
397 tr->next_index = next0;
399 tr->tunnel_index = tunnel_index0;
401 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
402 to_next, n_left_to_next,
406 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
408 vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
409 VXLAN_GPE_ERROR_DECAPSULATED,
411 /* Increment any remaining batch stats */
414 vlib_increment_combined_counter(
415 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, cpu_index,
416 stats_sw_if_index, stats_n_packets, stats_n_bytes);
417 node->runtime_data[0] = stats_sw_if_index;
419 return from_frame->n_vectors;
422 static char * vxlan_gpe_error_strings[] = {
423 #define vxlan_gpe_error(n,s) s,
424 #include <vnet/vxlan-gpe/vxlan_gpe_error.def>
425 #undef vxlan_gpe_error
429 VLIB_REGISTER_NODE (vxlan_gpe_input_node) = {
430 .function = vxlan_gpe_input,
431 .name = "vxlan-gpe-input",
432 /* Takes a vector of packets. */
433 .vector_size = sizeof (u32),
434 .type = VLIB_NODE_TYPE_INTERNAL,
435 .n_errors = ARRAY_LEN(vxlan_gpe_error_strings),
436 .error_strings = vxlan_gpe_error_strings,
438 .n_next_nodes = VXLAN_GPE_INPUT_N_NEXT,
440 #define _(s,n) [VXLAN_GPE_INPUT_NEXT_##s] = n,
441 foreach_vxlan_gpe_input_next
445 .format_buffer = format_vxlan_gpe_with_length,
446 .format_trace = format_vxlan_gpe_rx_trace,
447 // $$$$ .unformat_buffer = unformat_vxlan_gpe_header,