3 * Copyright (c) 2015 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include <vppinfra/error.h>
17 #include <vppinfra/hash.h>
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ethernet/ethernet.h>
21 #include <vnet/vxlan/vxlan.h>
22 #include <vnet/qos/qos_types.h>
23 #include <vnet/adj/rewrite.h>
25 /* Statistics (not all errors) */
26 #define foreach_vxlan_encap_error \
27 _(ENCAPSULATED, "good packets encapsulated")
29 static char * vxlan_encap_error_strings[] = {
30 #define _(sym,string) string,
31 foreach_vxlan_encap_error
36 #define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
37 foreach_vxlan_encap_error
40 } vxlan_encap_error_t;
43 VXLAN_ENCAP_NEXT_DROP,
50 } vxlan_encap_trace_t;
52 #ifndef CLIB_MARCH_VARIANT
53 u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
55 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
56 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
57 vxlan_encap_trace_t * t
58 = va_arg (*args, vxlan_encap_trace_t *);
60 s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
61 t->tunnel_index, t->vni);
67 vxlan_encap_inline (vlib_main_t * vm,
68 vlib_node_runtime_t * node,
69 vlib_frame_t * from_frame,
70 u8 is_ip4, u8 csum_offload)
72 u32 n_left_from, next_index, * from, * to_next;
73 vxlan_main_t * vxm = &vxlan_main;
74 vnet_main_t * vnm = vxm->vnet_main;
75 vnet_interface_main_t * im = &vnm->interface_main;
76 vlib_combined_counter_main_t * tx_counter =
77 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
78 u32 pkts_encapsulated = 0;
79 u32 thread_index = vlib_get_thread_index();
80 u32 sw_if_index0 = 0, sw_if_index1 = 0;
81 u32 next0 = 0, next1 = 0;
82 vxlan_tunnel_t * t0 = NULL, * t1 = NULL;
83 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
85 from = vlib_frame_vector_args (from_frame);
86 n_left_from = from_frame->n_vectors;
88 next_index = node->cached_next_index;
90 STATIC_ASSERT_SIZEOF(ip6_vxlan_header_t, 56);
91 STATIC_ASSERT_SIZEOF(ip4_vxlan_header_t, 36);
93 u8 const underlay_hdr_len = is_ip4 ?
94 sizeof(ip4_vxlan_header_t) : sizeof(ip6_vxlan_header_t);
95 u8 const rw_hdr_offset = sizeof t0->rewrite_data - underlay_hdr_len;
96 u16 const l3_len = is_ip4 ? sizeof(ip4_header_t) : sizeof(ip6_header_t);
97 u32 const csum_flags = is_ip4 ?
98 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
99 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM :
100 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
102 while (n_left_from > 0)
106 vlib_get_next_frame (vm, node, next_index,
107 to_next, n_left_to_next);
109 while (n_left_from >= 4 && n_left_to_next >= 2)
111 /* Prefetch next iteration. */
113 vlib_buffer_t * p2, * p3;
115 p2 = vlib_get_buffer (vm, from[2]);
116 p3 = vlib_get_buffer (vm, from[3]);
118 vlib_prefetch_buffer_header (p2, LOAD);
119 vlib_prefetch_buffer_header (p3, LOAD);
121 CLIB_PREFETCH (p2->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
122 CLIB_PREFETCH (p3->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
125 u32 bi0 = to_next[0] = from[0];
126 u32 bi1 = to_next[1] = from[1];
132 vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
133 vlib_buffer_t * b1 = vlib_get_buffer (vm, bi1);
134 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
135 u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
137 /* Get next node index and adj index from tunnel next_dpo */
138 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
140 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
141 vnet_hw_interface_t *hi0 =
142 vnet_get_sup_hw_interface (vnm, sw_if_index0);
143 t0 = &vxm->tunnels[hi0->dev_instance];
144 /* Note: change to always set next0 if it may set to drop */
145 next0 = t0->next_dpo.dpoi_next_node;
146 dpoi_idx0 = t0->next_dpo.dpoi_index;
149 /* Get next node index and adj index from tunnel next_dpo */
150 if (sw_if_index1 != vnet_buffer(b1)->sw_if_index[VLIB_TX])
152 if (sw_if_index0 == vnet_buffer(b1)->sw_if_index[VLIB_TX])
154 sw_if_index1 = sw_if_index0;
157 dpoi_idx1 = dpoi_idx0;
161 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
162 vnet_hw_interface_t *hi1 =
163 vnet_get_sup_hw_interface (vnm, sw_if_index1);
164 t1 = &vxm->tunnels[hi1->dev_instance];
165 /* Note: change to always set next1 if it may set to drop */
166 next1 = t1->next_dpo.dpoi_next_node;
167 dpoi_idx1 = t1->next_dpo.dpoi_index;
171 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
172 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
174 ASSERT(t0->rewrite_header.data_bytes == underlay_hdr_len);
175 ASSERT(t1->rewrite_header.data_bytes == underlay_hdr_len);
177 vlib_buffer_advance (b0, -underlay_hdr_len);
178 vlib_buffer_advance (b1, -underlay_hdr_len);
180 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
181 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
182 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
183 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
185 void * underlay0 = vlib_buffer_get_current(b0);
186 void * underlay1 = vlib_buffer_get_current(b1);
188 /* vnet_rewrite_two_header writes only in (uword) 8 bytes chunks
189 * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
190 * use memcpy as a workaround */
191 clib_memcpy_fast(underlay0, t0->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
192 clib_memcpy_fast(underlay1, t1->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
194 ip4_header_t * ip4_0, * ip4_1;
195 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
196 ip6_header_t * ip6_0, * ip6_1;
197 udp_header_t * udp0, * udp1;
201 ip4_vxlan_header_t * hdr0 = underlay0;
202 ip4_vxlan_header_t * hdr1 = underlay1;
204 /* Fix the IP4 checksum and length */
207 ip4_0->length = clib_host_to_net_u16 (len0);
208 ip4_1->length = clib_host_to_net_u16 (len1);
210 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
212 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
213 ip4_0->tos = ip4_0_tos;
215 if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
217 ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
218 ip4_1->tos = ip4_1_tos;
228 ip6_vxlan_header_t * hdr0 = underlay0;
229 ip6_vxlan_header_t * hdr1 = underlay1;
231 /* Fix IP6 payload length */
234 ip6_0->payload_length = payload_l0;
235 ip6_1->payload_length = payload_l1;
243 /* Fix UDP length and set source port */
244 udp0->length = payload_l0;
245 udp0->src_port = flow_hash0;
246 udp1->length = payload_l1;
247 udp1->src_port = flow_hash1;
251 b0->flags |= csum_flags;
252 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
253 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
254 b1->flags |= csum_flags;
255 vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
256 vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
258 /* IPv4 UDP checksum only if checksum offload is used */
261 ip_csum_t sum0 = ip4_0->checksum;
262 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
263 length /* changed member */);
264 if (PREDICT_FALSE (ip4_0_tos))
266 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
267 tos /* changed member */);
269 ip4_0->checksum = ip_csum_fold (sum0);
270 ip_csum_t sum1 = ip4_1->checksum;
271 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
272 length /* changed member */);
273 if (PREDICT_FALSE (ip4_1_tos))
275 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
276 tos /* changed member */);
278 ip4_1->checksum = ip_csum_fold (sum1);
280 /* IPv6 UDP checksum is mandatory */
285 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
286 (vm, b0, ip6_0, &bogus);
288 if (udp0->checksum == 0)
289 udp0->checksum = 0xffff;
290 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
291 (vm, b1, ip6_1, &bogus);
293 if (udp1->checksum == 0)
294 udp1->checksum = 0xffff;
297 if (sw_if_index0 == sw_if_index1)
299 vlib_increment_combined_counter (tx_counter, thread_index,
300 sw_if_index0, 2, len0 + len1);
304 vlib_increment_combined_counter (tx_counter, thread_index,
305 sw_if_index0, 1, len0);
306 vlib_increment_combined_counter (tx_counter, thread_index,
307 sw_if_index1, 1, len1);
309 pkts_encapsulated += 2;
311 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
313 vxlan_encap_trace_t *tr =
314 vlib_add_trace (vm, node, b0, sizeof (*tr));
315 tr->tunnel_index = t0 - vxm->tunnels;
319 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
321 vxlan_encap_trace_t *tr =
322 vlib_add_trace (vm, node, b1, sizeof (*tr));
323 tr->tunnel_index = t1 - vxm->tunnels;
327 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
328 to_next, n_left_to_next,
329 bi0, bi1, next0, next1);
332 while (n_left_from > 0 && n_left_to_next > 0)
334 u32 bi0 = to_next[0] = from[0];
340 vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
341 u32 flow_hash0 = vnet_l2_compute_flow_hash(b0);
343 /* Get next node index and adj index from tunnel next_dpo */
344 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
346 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
347 vnet_hw_interface_t *hi0 =
348 vnet_get_sup_hw_interface (vnm, sw_if_index0);
349 t0 = &vxm->tunnels[hi0->dev_instance];
350 /* Note: change to always set next0 if it may be set to drop */
351 next0 = t0->next_dpo.dpoi_next_node;
352 dpoi_idx0 = t0->next_dpo.dpoi_index;
354 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
356 ASSERT(t0->rewrite_header.data_bytes == underlay_hdr_len);
358 vlib_buffer_advance (b0, -underlay_hdr_len);
359 void * underlay0 = vlib_buffer_get_current(b0);
361 /* vnet_rewrite_one_header writes only in (uword) 8 bytes chunks
362 * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
363 * use memcpy as a workaround */
364 clib_memcpy_fast(underlay0, t0->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
366 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
367 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
370 ip4_header_t * ip4_0;
371 qos_bits_t ip4_0_tos = 0;
372 ip6_header_t * ip6_0;
376 ip4_vxlan_header_t * hdr = underlay0;
378 /* Fix the IP4 checksum and length */
380 ip4_0->length = clib_host_to_net_u16 (len0);
382 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
384 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
385 ip4_0->tos = ip4_0_tos;
393 ip6_vxlan_header_t * hdr = underlay0;
395 /* Fix IP6 payload length */
397 ip6_0->payload_length = payload_l0;
403 /* Fix UDP length and set source port */
404 udp0->length = payload_l0;
405 udp0->src_port = flow_hash0;
409 b0->flags |= csum_flags;
410 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
411 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
413 /* IPv4 UDP checksum only if checksum offload is used */
416 ip_csum_t sum0 = ip4_0->checksum;
417 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
418 length /* changed member */);
419 if (PREDICT_FALSE (ip4_0_tos))
421 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
422 tos /* changed member */);
424 ip4_0->checksum = ip_csum_fold (sum0);
426 /* IPv6 UDP checksum is mandatory */
431 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
432 (vm, b0, ip6_0, &bogus);
434 if (udp0->checksum == 0)
435 udp0->checksum = 0xffff;
438 vlib_increment_combined_counter (tx_counter, thread_index,
439 sw_if_index0, 1, len0);
440 pkts_encapsulated ++;
442 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
444 vxlan_encap_trace_t *tr =
445 vlib_add_trace (vm, node, b0, sizeof (*tr));
446 tr->tunnel_index = t0 - vxm->tunnels;
449 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
450 to_next, n_left_to_next,
454 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
457 /* Do we still need this now that tunnel tx stats is kept? */
458 vlib_node_increment_counter (vm, node->node_index,
459 VXLAN_ENCAP_ERROR_ENCAPSULATED,
462 return from_frame->n_vectors;
465 VLIB_NODE_FN (vxlan4_encap_node) (vlib_main_t * vm,
466 vlib_node_runtime_t * node,
467 vlib_frame_t * from_frame)
469 /* Disable chksum offload as setup overhead in tx node is not worthwhile
470 for ip4 header checksum only, unless udp checksum is also required */
471 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
472 /* csum_offload */ 0);
475 VLIB_NODE_FN (vxlan6_encap_node) (vlib_main_t * vm,
476 vlib_node_runtime_t * node,
477 vlib_frame_t * from_frame)
479 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
480 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
481 /* csum_offload */ 1);
484 VLIB_REGISTER_NODE (vxlan4_encap_node) = {
485 .name = "vxlan4-encap",
486 .vector_size = sizeof (u32),
487 .format_trace = format_vxlan_encap_trace,
488 .type = VLIB_NODE_TYPE_INTERNAL,
489 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
490 .error_strings = vxlan_encap_error_strings,
491 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
493 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
497 VLIB_REGISTER_NODE (vxlan6_encap_node) = {
498 .name = "vxlan6-encap",
499 .vector_size = sizeof (u32),
500 .format_trace = format_vxlan_encap_trace,
501 .type = VLIB_NODE_TYPE_INTERNAL,
502 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
503 .error_strings = vxlan_encap_error_strings,
504 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
506 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",