3 * Copyright (c) 2015 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include <vppinfra/error.h>
17 #include <vppinfra/hash.h>
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ethernet/ethernet.h>
21 #include <vnet/vxlan/vxlan.h>
22 #include <vnet/qos/qos_types.h>
23 #include <vnet/adj/rewrite.h>
25 /* Statistics (not all errors) */
26 #define foreach_vxlan_encap_error \
27 _(ENCAPSULATED, "good packets encapsulated")
29 static char *vxlan_encap_error_strings[] = {
30 #define _(sym,string) string,
31 foreach_vxlan_encap_error
37 #define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
38 foreach_vxlan_encap_error
41 } vxlan_encap_error_t;
45 VXLAN_ENCAP_NEXT_DROP,
53 } vxlan_encap_trace_t;
55 #ifndef CLIB_MARCH_VARIANT
57 format_vxlan_encap_trace (u8 * s, va_list * args)
59 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
60 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
61 vxlan_encap_trace_t *t = va_arg (*args, vxlan_encap_trace_t *);
63 s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
64 t->tunnel_index, t->vni);
70 vxlan_encap_inline (vlib_main_t * vm,
71 vlib_node_runtime_t * node,
72 vlib_frame_t * from_frame, u8 is_ip4, u8 csum_offload)
74 u32 n_left_from, next_index, *from, *to_next;
75 vxlan_main_t *vxm = &vxlan_main;
76 vnet_main_t *vnm = vxm->vnet_main;
77 vnet_interface_main_t *im = &vnm->interface_main;
78 vlib_combined_counter_main_t *tx_counter =
79 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
80 u32 pkts_encapsulated = 0;
81 u32 thread_index = vlib_get_thread_index ();
82 u32 sw_if_index0 = 0, sw_if_index1 = 0;
83 u32 next0 = 0, next1 = 0;
84 vxlan_tunnel_t *t0 = NULL, *t1 = NULL;
85 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
86 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
87 vlib_buffer_t **b = bufs;
89 from = vlib_frame_vector_args (from_frame);
90 n_left_from = from_frame->n_vectors;
92 next_index = node->cached_next_index;
94 STATIC_ASSERT_SIZEOF (ip6_vxlan_header_t, 56);
95 STATIC_ASSERT_SIZEOF (ip4_vxlan_header_t, 36);
97 u8 const underlay_hdr_len = is_ip4 ?
98 sizeof (ip4_vxlan_header_t) : sizeof (ip6_vxlan_header_t);
99 u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
100 u32 const csum_flags = is_ip4 ? VNET_BUFFER_F_OFFLOAD_IP_CKSUM |
101 VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM :
102 VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
103 vlib_get_buffers (vm, from, bufs, n_left_from);
105 while (n_left_from > 0)
109 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
111 while (n_left_from >= 4 && n_left_to_next >= 2)
113 /* Prefetch next iteration. */
115 vlib_prefetch_buffer_header (b[2], LOAD);
116 vlib_prefetch_buffer_header (b[3], LOAD);
118 CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
119 2 * CLIB_CACHE_LINE_BYTES, LOAD);
120 CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
121 2 * CLIB_CACHE_LINE_BYTES, LOAD);
124 u32 bi0 = to_next[0] = from[0];
125 u32 bi1 = to_next[1] = from[1];
131 vlib_buffer_t *b0 = b[0];
132 vlib_buffer_t *b1 = b[1];
135 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
136 u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
138 /* Get next node index and adj index from tunnel next_dpo */
139 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
141 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
142 vnet_hw_interface_t *hi0 =
143 vnet_get_sup_hw_interface (vnm, sw_if_index0);
144 t0 = &vxm->tunnels[hi0->dev_instance];
145 /* Note: change to always set next0 if it may set to drop */
146 next0 = t0->next_dpo.dpoi_next_node;
147 dpoi_idx0 = t0->next_dpo.dpoi_index;
150 /* Get next node index and adj index from tunnel next_dpo */
151 if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
153 if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
155 sw_if_index1 = sw_if_index0;
158 dpoi_idx1 = dpoi_idx0;
162 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
163 vnet_hw_interface_t *hi1 =
164 vnet_get_sup_hw_interface (vnm, sw_if_index1);
165 t1 = &vxm->tunnels[hi1->dev_instance];
166 /* Note: change to always set next1 if it may set to drop */
167 next1 = t1->next_dpo.dpoi_next_node;
168 dpoi_idx1 = t1->next_dpo.dpoi_index;
172 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
173 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
175 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
176 ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
177 vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b0),
178 vlib_buffer_get_current (b1),
181 vlib_buffer_advance (b0, -underlay_hdr_len);
182 vlib_buffer_advance (b1, -underlay_hdr_len);
184 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
185 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
186 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
187 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
189 void *underlay0 = vlib_buffer_get_current (b0);
190 void *underlay1 = vlib_buffer_get_current (b1);
192 ip4_header_t *ip4_0, *ip4_1;
193 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
194 ip6_header_t *ip6_0, *ip6_1;
195 udp_header_t *udp0, *udp1;
199 ip4_vxlan_header_t *hdr0 = underlay0;
200 ip4_vxlan_header_t *hdr1 = underlay1;
202 /* Fix the IP4 checksum and length */
205 ip4_0->length = clib_host_to_net_u16 (len0);
206 ip4_1->length = clib_host_to_net_u16 (len1);
208 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
210 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
211 ip4_0->tos = ip4_0_tos;
213 if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
215 ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
216 ip4_1->tos = ip4_1_tos;
226 ip6_vxlan_header_t *hdr0 = underlay0;
227 ip6_vxlan_header_t *hdr1 = underlay1;
229 /* Fix IP6 payload length */
232 ip6_0->payload_length = payload_l0;
233 ip6_1->payload_length = payload_l1;
241 /* Fix UDP length and set source port */
242 udp0->length = payload_l0;
243 udp0->src_port = flow_hash0;
244 udp1->length = payload_l1;
245 udp1->src_port = flow_hash1;
249 b0->flags |= csum_flags;
250 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
251 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
252 b1->flags |= csum_flags;
253 vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
254 vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
256 /* IPv4 UDP checksum only if checksum offload is used */
259 ip_csum_t sum0 = ip4_0->checksum;
260 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
261 length /* changed member */ );
262 if (PREDICT_FALSE (ip4_0_tos))
264 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
265 tos /* changed member */ );
267 ip4_0->checksum = ip_csum_fold (sum0);
268 ip_csum_t sum1 = ip4_1->checksum;
269 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
270 length /* changed member */ );
271 if (PREDICT_FALSE (ip4_1_tos))
273 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
274 tos /* changed member */ );
276 ip4_1->checksum = ip_csum_fold (sum1);
278 /* IPv6 UDP checksum is mandatory */
283 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
284 (vm, b0, ip6_0, &bogus);
286 if (udp0->checksum == 0)
287 udp0->checksum = 0xffff;
288 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
289 (vm, b1, ip6_1, &bogus);
291 if (udp1->checksum == 0)
292 udp1->checksum = 0xffff;
295 /* save inner packet flow_hash for load-balance node */
296 vnet_buffer (b0)->ip.flow_hash = flow_hash0;
297 vnet_buffer (b1)->ip.flow_hash = flow_hash1;
299 if (sw_if_index0 == sw_if_index1)
301 vlib_increment_combined_counter (tx_counter, thread_index,
302 sw_if_index0, 2, len0 + len1);
306 vlib_increment_combined_counter (tx_counter, thread_index,
307 sw_if_index0, 1, len0);
308 vlib_increment_combined_counter (tx_counter, thread_index,
309 sw_if_index1, 1, len1);
311 pkts_encapsulated += 2;
313 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
315 vxlan_encap_trace_t *tr =
316 vlib_add_trace (vm, node, b0, sizeof (*tr));
317 tr->tunnel_index = t0 - vxm->tunnels;
321 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
323 vxlan_encap_trace_t *tr =
324 vlib_add_trace (vm, node, b1, sizeof (*tr));
325 tr->tunnel_index = t1 - vxm->tunnels;
329 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
330 to_next, n_left_to_next,
331 bi0, bi1, next0, next1);
334 while (n_left_from > 0 && n_left_to_next > 0)
336 u32 bi0 = to_next[0] = from[0];
342 vlib_buffer_t *b0 = b[0];
345 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
347 /* Get next node index and adj index from tunnel next_dpo */
348 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
350 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
351 vnet_hw_interface_t *hi0 =
352 vnet_get_sup_hw_interface (vnm, sw_if_index0);
353 t0 = &vxm->tunnels[hi0->dev_instance];
354 /* Note: change to always set next0 if it may be set to drop */
355 next0 = t0->next_dpo.dpoi_next_node;
356 dpoi_idx0 = t0->next_dpo.dpoi_index;
358 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
360 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
361 vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b0),
364 vlib_buffer_advance (b0, -underlay_hdr_len);
365 void *underlay0 = vlib_buffer_get_current (b0);
367 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
368 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
372 qos_bits_t ip4_0_tos = 0;
377 ip4_vxlan_header_t *hdr = underlay0;
379 /* Fix the IP4 checksum and length */
381 ip4_0->length = clib_host_to_net_u16 (len0);
383 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
385 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
386 ip4_0->tos = ip4_0_tos;
394 ip6_vxlan_header_t *hdr = underlay0;
396 /* Fix IP6 payload length */
398 ip6_0->payload_length = payload_l0;
404 /* Fix UDP length and set source port */
405 udp0->length = payload_l0;
406 udp0->src_port = flow_hash0;
410 b0->flags |= csum_flags;
411 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
412 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
414 /* IPv4 UDP checksum only if checksum offload is used */
417 ip_csum_t sum0 = ip4_0->checksum;
418 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
419 length /* changed member */ );
420 if (PREDICT_FALSE (ip4_0_tos))
422 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
423 tos /* changed member */ );
425 ip4_0->checksum = ip_csum_fold (sum0);
427 /* IPv6 UDP checksum is mandatory */
432 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
433 (vm, b0, ip6_0, &bogus);
435 if (udp0->checksum == 0)
436 udp0->checksum = 0xffff;
439 /* reuse inner packet flow_hash for load-balance node */
440 vnet_buffer (b0)->ip.flow_hash = flow_hash0;
442 vlib_increment_combined_counter (tx_counter, thread_index,
443 sw_if_index0, 1, len0);
446 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
448 vxlan_encap_trace_t *tr =
449 vlib_add_trace (vm, node, b0, sizeof (*tr));
450 tr->tunnel_index = t0 - vxm->tunnels;
453 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
454 to_next, n_left_to_next,
458 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
461 /* Do we still need this now that tunnel tx stats is kept? */
462 vlib_node_increment_counter (vm, node->node_index,
463 VXLAN_ENCAP_ERROR_ENCAPSULATED,
466 return from_frame->n_vectors;
469 VLIB_NODE_FN (vxlan4_encap_node) (vlib_main_t * vm,
470 vlib_node_runtime_t * node,
471 vlib_frame_t * from_frame)
473 /* Disable chksum offload as setup overhead in tx node is not worthwhile
474 for ip4 header checksum only, unless udp checksum is also required */
475 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
476 /* csum_offload */ 0);
479 VLIB_NODE_FN (vxlan6_encap_node) (vlib_main_t * vm,
480 vlib_node_runtime_t * node,
481 vlib_frame_t * from_frame)
483 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
484 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
485 /* csum_offload */ 1);
489 VLIB_REGISTER_NODE (vxlan4_encap_node) = {
490 .name = "vxlan4-encap",
491 .vector_size = sizeof (u32),
492 .format_trace = format_vxlan_encap_trace,
493 .type = VLIB_NODE_TYPE_INTERNAL,
494 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
495 .error_strings = vxlan_encap_error_strings,
496 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
498 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
502 VLIB_REGISTER_NODE (vxlan6_encap_node) = {
503 .name = "vxlan6-encap",
504 .vector_size = sizeof (u32),
505 .format_trace = format_vxlan_encap_trace,
506 .type = VLIB_NODE_TYPE_INTERNAL,
507 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
508 .error_strings = vxlan_encap_error_strings,
509 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
511 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
517 * fd.io coding-style-patch-verification: ON
520 * eval: (c-set-style "gnu")