2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/vxlan-gpe/vxlan_gpe.h>
22 /* Statistics (not really errors) */
23 #define foreach_vxlan_gpe_encap_error \
24 _(ENCAPSULATED, "good packets encapsulated")
26 static char * vxlan_gpe_encap_error_strings[] = {
27 #define _(sym,string) string,
28 foreach_vxlan_gpe_encap_error
33 #define _(sym,str) VXLAN_GPE_ENCAP_ERROR_##sym,
34 foreach_vxlan_gpe_encap_error
36 VXLAN_GPE_ENCAP_N_ERROR,
37 } vxlan_gpe_encap_error_t;
40 VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP,
41 VXLAN_GPE_ENCAP_NEXT_DROP,
42 VXLAN_GPE_ENCAP_N_NEXT
43 } vxlan_gpe_encap_next_t;
47 } vxlan_gpe_encap_trace_t;
50 u8 * format_vxlan_gpe_encap_trace (u8 * s, va_list * args)
52 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
53 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
54 vxlan_gpe_encap_trace_t * t
55 = va_arg (*args, vxlan_gpe_encap_trace_t *);
57 s = format (s, "VXLAN-GPE-ENCAP: tunnel %d", t->tunnel_index);
61 #define foreach_fixed_header_offset \
62 _(0) _(1) _(2) _(3) _(4) _(5) _(6)
65 vxlan_gpe_encap (vlib_main_t * vm,
66 vlib_node_runtime_t * node,
67 vlib_frame_t * from_frame)
69 u32 n_left_from, next_index, * from, * to_next;
70 vxlan_gpe_main_t * ngm = &vxlan_gpe_main;
71 vnet_main_t * vnm = ngm->vnet_main;
72 vnet_interface_main_t * im = &vnm->interface_main;
73 u32 pkts_encapsulated = 0;
74 u16 old_l0 = 0, old_l1 = 0;
75 u32 cpu_index = os_get_cpu_number();
76 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
78 from = vlib_frame_vector_args (from_frame);
79 n_left_from = from_frame->n_vectors;
81 next_index = node->cached_next_index;
82 stats_sw_if_index = node->runtime_data[0];
83 stats_n_packets = stats_n_bytes = 0;
85 while (n_left_from > 0)
89 vlib_get_next_frame (vm, node, next_index,
90 to_next, n_left_to_next);
92 while (n_left_from >= 4 && n_left_to_next >= 2)
95 vlib_buffer_t * b0, * b1;
96 u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
97 u32 next1 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
98 u32 sw_if_index0, sw_if_index1, len0, len1;
99 vnet_hw_interface_t * hi0, * hi1;
100 ip4_header_t * ip0, * ip1;
101 udp_header_t * udp0, * udp1;
102 u64 * copy_src0, * copy_dst0;
103 u64 * copy_src1, * copy_dst1;
104 u32 * copy_src_last0, * copy_dst_last0;
105 u32 * copy_src_last1, * copy_dst_last1;
106 vxlan_gpe_tunnel_t * t0, * t1;
108 ip_csum_t sum0, sum1;
110 /* Prefetch next iteration. */
112 vlib_buffer_t * p2, * p3;
114 p2 = vlib_get_buffer (vm, from[2]);
115 p3 = vlib_get_buffer (vm, from[3]);
117 vlib_prefetch_buffer_header (p2, LOAD);
118 vlib_prefetch_buffer_header (p3, LOAD);
120 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
121 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
133 b0 = vlib_get_buffer (vm, bi0);
134 b1 = vlib_get_buffer (vm, bi1);
137 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
138 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
139 hi0 = vnet_get_sup_hw_interface
140 (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
141 hi1 = vnet_get_sup_hw_interface
142 (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
144 t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
145 t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance);
147 ASSERT(vec_len(t0->rewrite) >= 24);
148 ASSERT(vec_len(t1->rewrite) >= 24);
150 /* Apply the rewrite string. $$$$ vnet_rewrite? */
151 vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
152 vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
154 ip0 = vlib_buffer_get_current(b0);
155 ip1 = vlib_buffer_get_current(b1);
156 /* Copy the fixed header */
157 copy_dst0 = (u64 *) ip0;
158 copy_src0 = (u64 *) t0->rewrite;
159 copy_dst1 = (u64 *) ip1;
160 copy_src1 = (u64 *) t1->rewrite;
162 ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
164 /* Copy first 36 octets 8-bytes at a time */
165 #define _(offs) copy_dst0[offs] = copy_src0[offs];
166 foreach_fixed_header_offset;
168 #define _(offs) copy_dst1[offs] = copy_src1[offs];
169 foreach_fixed_header_offset;
172 /* Last 4 octets. Hopefully gcc will be our friend */
173 copy_dst_last0 = (u32 *)(©_dst0[7]);
174 copy_src_last0 = (u32 *)(©_src0[7]);
175 copy_dst_last1 = (u32 *)(©_dst1[7]);
176 copy_src_last1 = (u32 *)(©_src1[7]);
178 copy_dst_last0[0] = copy_src_last0[0];
179 copy_dst_last1[0] = copy_src_last1[0];
181 /* If there are TLVs to copy, do so */
182 if (PREDICT_FALSE (_vec_len(t0->rewrite) > 64))
183 clib_memcpy (©_dst0[3], t0->rewrite + 64 ,
184 _vec_len (t0->rewrite)-64);
186 if (PREDICT_FALSE (_vec_len(t1->rewrite) > 64))
187 clib_memcpy (©_dst0[3], t1->rewrite + 64 ,
188 _vec_len (t1->rewrite)-64);
190 /* fix the <bleep>ing outer-IP checksum */
191 sum0 = ip0->checksum;
192 /* old_l0 always 0, see the rewrite setup */
194 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
196 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
197 length /* changed member */);
198 ip0->checksum = ip_csum_fold (sum0);
199 ip0->length = new_l0;
201 sum1 = ip1->checksum;
202 /* old_l1 always 0, see the rewrite setup */
204 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
206 sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
207 length /* changed member */);
208 ip1->checksum = ip_csum_fold (sum1);
209 ip1->length = new_l1;
212 udp0 = (udp_header_t *)(ip0+1);
213 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
215 udp1 = (udp_header_t *)(ip1+1);
216 new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
219 udp0->length = new_l0;
220 udp1->length = new_l1;
222 /* Reset to look up tunnel partner in the configured FIB */
223 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
224 vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
225 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
226 vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
227 pkts_encapsulated += 2;
229 len0 = vlib_buffer_length_in_chain(vm, b0);
230 len1 = vlib_buffer_length_in_chain(vm, b0);
231 stats_n_packets += 2;
232 stats_n_bytes += len0 + len1;
234 /* Batch stats increment on the same vxlan tunnel so counter is not
235 incremented per packet. Note stats are still incremented for deleted
236 and admin-down tunnel where packets are dropped. It is not worthwhile
237 to check for this rare case and affect normal path performance. */
239 (sw_if_index0 != stats_sw_if_index)
240 || (sw_if_index1 != stats_sw_if_index))) {
241 stats_n_packets -= 2;
242 stats_n_bytes -= len0 + len1;
243 if (sw_if_index0 == sw_if_index1) {
245 vlib_increment_combined_counter(
246 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
247 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
248 stats_sw_if_index = sw_if_index0;
250 stats_n_bytes = len0 + len1;
252 vlib_increment_combined_counter(
253 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
254 cpu_index, sw_if_index0, 1, len0);
255 vlib_increment_combined_counter(
256 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
257 cpu_index, sw_if_index1, 1, len1);
261 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
263 vxlan_gpe_encap_trace_t *tr =
264 vlib_add_trace (vm, node, b0, sizeof (*tr));
265 tr->tunnel_index = t0 - ngm->tunnels;
268 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
270 vxlan_gpe_encap_trace_t *tr =
271 vlib_add_trace (vm, node, b1, sizeof (*tr));
272 tr->tunnel_index = t1 - ngm->tunnels;
275 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
276 to_next, n_left_to_next,
277 bi0, bi1, next0, next1);
280 while (n_left_from > 0 && n_left_to_next > 0)
284 u32 next0 = VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
285 u32 sw_if_index0, len0;
286 vnet_hw_interface_t * hi0;
289 u64 * copy_src0, * copy_dst0;
290 u32 * copy_src_last0, * copy_dst_last0;
291 vxlan_gpe_tunnel_t * t0;
302 b0 = vlib_get_buffer (vm, bi0);
305 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
306 hi0 = vnet_get_sup_hw_interface
307 (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
309 t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
311 ASSERT(vec_len(t0->rewrite) >= 24);
313 /* Apply the rewrite string. $$$$ vnet_rewrite? */
314 vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
316 ip0 = vlib_buffer_get_current(b0);
317 /* Copy the fixed header */
318 copy_dst0 = (u64 *) ip0;
319 copy_src0 = (u64 *) t0->rewrite;
321 ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36);
323 /* Copy first 36 octets 8-bytes at a time */
324 #define _(offs) copy_dst0[offs] = copy_src0[offs];
325 foreach_fixed_header_offset;
327 /* Last 4 octets. Hopefully gcc will be our friend */
328 copy_dst_last0 = (u32 *)(©_dst0[7]);
329 copy_src_last0 = (u32 *)(©_src0[7]);
331 copy_dst_last0[0] = copy_src_last0[0];
333 /* If there are TLVs to copy, do so */
334 if (PREDICT_FALSE (_vec_len(t0->rewrite) > 64))
335 clib_memcpy (©_dst0[3], t0->rewrite + 64 ,
336 _vec_len (t0->rewrite)-64);
338 /* fix the <bleep>ing outer-IP checksum */
339 sum0 = ip0->checksum;
340 /* old_l0 always 0, see the rewrite setup */
342 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
344 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
345 length /* changed member */);
346 ip0->checksum = ip_csum_fold (sum0);
347 ip0->length = new_l0;
350 udp0 = (udp_header_t *)(ip0+1);
351 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
354 udp0->length = new_l0;
356 /* Reset to look up tunnel partner in the configured FIB */
357 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
358 vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
359 pkts_encapsulated ++;
361 len0 = vlib_buffer_length_in_chain(vm, b0);
362 stats_n_packets += 1;
363 stats_n_bytes += len0;
365 /* Batch stats increment on the same vxlan tunnel so counter is not
366 * incremented per packet. Note stats are still incremented for deleted
367 * and admin-down tunnel where packets are dropped. It is not worthwhile
368 * to check for this rare case and affect normal path performance. */
369 if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
371 stats_n_packets -= 1;
372 stats_n_bytes -= len0;
374 vlib_increment_combined_counter(
375 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
376 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
378 stats_n_bytes = len0;
379 stats_sw_if_index = sw_if_index0;
381 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
383 vxlan_gpe_encap_trace_t *tr =
384 vlib_add_trace (vm, node, b0, sizeof (*tr));
385 tr->tunnel_index = t0 - ngm->tunnels;
387 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
388 to_next, n_left_to_next,
392 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
394 vlib_node_increment_counter (vm, node->node_index,
395 VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
397 /* Increment any remaining batch stats */
398 if (stats_n_packets) {
399 vlib_increment_combined_counter(
400 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index,
401 stats_sw_if_index, stats_n_packets, stats_n_bytes);
402 node->runtime_data[0] = stats_sw_if_index;
405 return from_frame->n_vectors;
408 VLIB_REGISTER_NODE (vxlan_gpe_encap_node) = {
409 .function = vxlan_gpe_encap,
410 .name = "vxlan-gpe-encap",
411 .vector_size = sizeof (u32),
412 .format_trace = format_vxlan_gpe_encap_trace,
413 .type = VLIB_NODE_TYPE_INTERNAL,
415 .n_errors = ARRAY_LEN(vxlan_gpe_encap_error_strings),
416 .error_strings = vxlan_gpe_encap_error_strings,
418 .n_next_nodes = VXLAN_GPE_ENCAP_N_NEXT,
421 [VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
422 [VXLAN_GPE_ENCAP_NEXT_DROP] = "error-drop",