3 * Copyright (c) 2015 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include <vppinfra/error.h>
17 #include <vppinfra/hash.h>
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ethernet/ethernet.h>
21 #include <vnet/interface_output.h>
22 #include <vxlan/vxlan.h>
23 #include <vnet/qos/qos_types.h>
24 #include <vnet/adj/rewrite.h>
26 /* Statistics (not all errors) */
27 #define foreach_vxlan_encap_error \
28 _(ENCAPSULATED, "good packets encapsulated")
30 static char *vxlan_encap_error_strings[] = {
31 #define _(sym,string) string,
32 foreach_vxlan_encap_error
38 #define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
39 foreach_vxlan_encap_error
42 } vxlan_encap_error_t;
46 VXLAN_ENCAP_NEXT_DROP,
54 } vxlan_encap_trace_t;
56 #ifndef CLIB_MARCH_VARIANT
58 format_vxlan_encap_trace (u8 * s, va_list * args)
60 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62 vxlan_encap_trace_t *t = va_arg (*args, vxlan_encap_trace_t *);
64 s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
65 t->tunnel_index, t->vni);
71 vxlan_encap_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
72 vlib_frame_t *from_frame, u8 is_ip4)
74 u32 n_left_from, next_index, *from, *to_next;
75 vxlan_main_t *vxm = &vxlan_main;
76 vnet_main_t *vnm = vxm->vnet_main;
77 vnet_interface_main_t *im = &vnm->interface_main;
78 vlib_combined_counter_main_t *tx_counter =
79 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
80 u32 pkts_encapsulated = 0;
81 u32 thread_index = vlib_get_thread_index ();
82 u32 sw_if_index0 = 0, sw_if_index1 = 0;
83 u32 next0 = 0, next1 = 0;
84 vxlan_tunnel_t *t0 = NULL, *t1 = NULL;
85 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
86 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
87 vlib_buffer_t **b = bufs;
89 from = vlib_frame_vector_args (from_frame);
90 n_left_from = from_frame->n_vectors;
92 next_index = node->cached_next_index;
94 STATIC_ASSERT_SIZEOF (ip6_vxlan_header_t, 56);
95 STATIC_ASSERT_SIZEOF (ip4_vxlan_header_t, 36);
97 u8 const underlay_hdr_len = is_ip4 ?
98 sizeof (ip4_vxlan_header_t) : sizeof (ip6_vxlan_header_t);
99 u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
100 u32 const outer_packet_csum_offload_flags =
101 is_ip4 ? (VNET_BUFFER_OFFLOAD_F_OUTER_IP_CKSUM |
102 VNET_BUFFER_OFFLOAD_F_TNL_VXLAN) :
103 (VNET_BUFFER_OFFLOAD_F_OUTER_UDP_CKSUM |
104 VNET_BUFFER_OFFLOAD_F_TNL_VXLAN);
106 vlib_get_buffers (vm, from, bufs, n_left_from);
108 while (n_left_from > 0)
112 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
114 while (n_left_from >= 4 && n_left_to_next >= 2)
116 /* Prefetch next iteration. */
118 vlib_prefetch_buffer_header (b[2], LOAD);
119 vlib_prefetch_buffer_header (b[3], LOAD);
121 CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
122 2 * CLIB_CACHE_LINE_BYTES, LOAD);
123 CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
124 2 * CLIB_CACHE_LINE_BYTES, LOAD);
127 u32 bi0 = to_next[0] = from[0];
128 u32 bi1 = to_next[1] = from[1];
134 vlib_buffer_t *b0 = b[0];
135 vlib_buffer_t *b1 = b[1];
138 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
139 u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
141 /* Get next node index and adj index from tunnel next_dpo */
142 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
144 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
145 vnet_hw_interface_t *hi0 =
146 vnet_get_sup_hw_interface (vnm, sw_if_index0);
147 t0 = &vxm->tunnels[hi0->dev_instance];
148 /* Note: change to always set next0 if it may set to drop */
149 next0 = t0->next_dpo.dpoi_next_node;
150 dpoi_idx0 = t0->next_dpo.dpoi_index;
153 /* Get next node index and adj index from tunnel next_dpo */
154 if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
156 if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
158 sw_if_index1 = sw_if_index0;
161 dpoi_idx1 = dpoi_idx0;
165 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
166 vnet_hw_interface_t *hi1 =
167 vnet_get_sup_hw_interface (vnm, sw_if_index1);
168 t1 = &vxm->tunnels[hi1->dev_instance];
169 /* Note: change to always set next1 if it may set to drop */
170 next1 = t1->next_dpo.dpoi_next_node;
171 dpoi_idx1 = t1->next_dpo.dpoi_index;
175 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
176 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
178 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
179 ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
180 vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b0),
181 vlib_buffer_get_current (b1),
184 vlib_buffer_advance (b0, -underlay_hdr_len);
185 vlib_buffer_advance (b1, -underlay_hdr_len);
187 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
188 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
189 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
190 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
192 void *underlay0 = vlib_buffer_get_current (b0);
193 void *underlay1 = vlib_buffer_get_current (b1);
195 ip4_header_t *ip4_0, *ip4_1;
196 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
197 ip6_header_t *ip6_0, *ip6_1;
198 udp_header_t *udp0, *udp1;
202 ip4_vxlan_header_t *hdr0 = underlay0;
203 ip4_vxlan_header_t *hdr1 = underlay1;
205 /* Fix the IP4 checksum and length */
208 ip4_0->length = clib_host_to_net_u16 (len0);
209 ip4_1->length = clib_host_to_net_u16 (len1);
211 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
213 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
214 ip4_0->tos = ip4_0_tos;
216 if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
218 ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
219 ip4_1->tos = ip4_1_tos;
229 ip6_vxlan_header_t *hdr0 = underlay0;
230 ip6_vxlan_header_t *hdr1 = underlay1;
232 /* Fix IP6 payload length */
235 ip6_0->payload_length = payload_l0;
236 ip6_1->payload_length = payload_l1;
244 /* Fix UDP length and set source port */
245 udp0->length = payload_l0;
246 udp0->src_port = flow_hash0;
247 udp1->length = payload_l1;
248 udp1->src_port = flow_hash1;
250 if (b0->flags & VNET_BUFFER_F_OFFLOAD)
252 vnet_buffer2 (b0)->outer_l3_hdr_offset = l3_0 - b0->data;
253 vnet_buffer2 (b0)->outer_l4_hdr_offset = (u8 *) udp0 - b0->data;
254 vnet_buffer_offload_flags_set (b0,
255 outer_packet_csum_offload_flags);
257 /* IPv4 checksum only */
260 ip_csum_t sum0 = ip4_0->checksum;
261 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
262 length /* changed member */);
263 if (PREDICT_FALSE (ip4_0_tos))
265 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
266 tos /* changed member */);
268 ip4_0->checksum = ip_csum_fold (sum0);
270 /* IPv6 UDP checksum is mandatory */
276 ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6_0, &bogus);
278 if (udp0->checksum == 0)
279 udp0->checksum = 0xffff;
282 if (b1->flags & VNET_BUFFER_F_OFFLOAD)
284 vnet_buffer2 (b1)->outer_l3_hdr_offset = l3_1 - b1->data;
285 vnet_buffer2 (b1)->outer_l4_hdr_offset = (u8 *) udp1 - b1->data;
286 vnet_buffer_offload_flags_set (b1,
287 outer_packet_csum_offload_flags);
289 /* IPv4 checksum only */
292 ip_csum_t sum1 = ip4_1->checksum;
293 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
294 length /* changed member */);
295 if (PREDICT_FALSE (ip4_1_tos))
297 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
298 tos /* changed member */);
300 ip4_1->checksum = ip_csum_fold (sum1);
302 /* IPv6 UDP checksum is mandatory */
307 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
308 (vm, b1, ip6_1, &bogus);
310 if (udp1->checksum == 0)
311 udp1->checksum = 0xffff;
314 /* save inner packet flow_hash for load-balance node */
315 vnet_buffer (b0)->ip.flow_hash = flow_hash0;
316 vnet_buffer (b1)->ip.flow_hash = flow_hash1;
318 if (sw_if_index0 == sw_if_index1)
320 vlib_increment_combined_counter (tx_counter, thread_index,
321 sw_if_index0, 2, len0 + len1);
325 vlib_increment_combined_counter (tx_counter, thread_index,
326 sw_if_index0, 1, len0);
327 vlib_increment_combined_counter (tx_counter, thread_index,
328 sw_if_index1, 1, len1);
330 pkts_encapsulated += 2;
332 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
334 vxlan_encap_trace_t *tr =
335 vlib_add_trace (vm, node, b0, sizeof (*tr));
336 tr->tunnel_index = t0 - vxm->tunnels;
340 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
342 vxlan_encap_trace_t *tr =
343 vlib_add_trace (vm, node, b1, sizeof (*tr));
344 tr->tunnel_index = t1 - vxm->tunnels;
348 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
349 to_next, n_left_to_next,
350 bi0, bi1, next0, next1);
353 while (n_left_from > 0 && n_left_to_next > 0)
355 u32 bi0 = to_next[0] = from[0];
361 vlib_buffer_t *b0 = b[0];
364 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
366 /* Get next node index and adj index from tunnel next_dpo */
367 if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
369 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
370 vnet_hw_interface_t *hi0 =
371 vnet_get_sup_hw_interface (vnm, sw_if_index0);
372 t0 = &vxm->tunnels[hi0->dev_instance];
373 /* Note: change to always set next0 if it may be set to drop */
374 next0 = t0->next_dpo.dpoi_next_node;
375 dpoi_idx0 = t0->next_dpo.dpoi_index;
377 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
379 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
380 vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b0),
383 vlib_buffer_advance (b0, -underlay_hdr_len);
384 void *underlay0 = vlib_buffer_get_current (b0);
386 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
387 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
391 qos_bits_t ip4_0_tos = 0;
396 ip4_vxlan_header_t *hdr = underlay0;
398 /* Fix the IP4 checksum and length */
400 ip4_0->length = clib_host_to_net_u16 (len0);
402 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
404 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
405 ip4_0->tos = ip4_0_tos;
413 ip6_vxlan_header_t *hdr = underlay0;
415 /* Fix IP6 payload length */
417 ip6_0->payload_length = payload_l0;
423 /* Fix UDP length and set source port */
424 udp0->length = payload_l0;
425 udp0->src_port = flow_hash0;
427 if (b0->flags & VNET_BUFFER_F_OFFLOAD)
429 vnet_buffer2 (b0)->outer_l3_hdr_offset = l3_0 - b0->data;
430 vnet_buffer2 (b0)->outer_l4_hdr_offset = (u8 *) udp0 - b0->data;
431 vnet_buffer_offload_flags_set (b0,
432 outer_packet_csum_offload_flags);
434 /* IPv4 checksum only */
437 ip_csum_t sum0 = ip4_0->checksum;
438 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
439 length /* changed member */);
440 if (PREDICT_FALSE (ip4_0_tos))
442 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
443 tos /* changed member */);
445 ip4_0->checksum = ip_csum_fold (sum0);
447 /* IPv6 UDP checksum is mandatory */
452 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
453 (vm, b0, ip6_0, &bogus);
455 if (udp0->checksum == 0)
456 udp0->checksum = 0xffff;
459 /* reuse inner packet flow_hash for load-balance node */
460 vnet_buffer (b0)->ip.flow_hash = flow_hash0;
462 vlib_increment_combined_counter (tx_counter, thread_index,
463 sw_if_index0, 1, len0);
466 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
468 vxlan_encap_trace_t *tr =
469 vlib_add_trace (vm, node, b0, sizeof (*tr));
470 tr->tunnel_index = t0 - vxm->tunnels;
473 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
474 to_next, n_left_to_next,
478 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
481 /* Do we still need this now that tunnel tx stats is kept? */
482 vlib_node_increment_counter (vm, node->node_index,
483 VXLAN_ENCAP_ERROR_ENCAPSULATED,
486 return from_frame->n_vectors;
489 VLIB_NODE_FN (vxlan4_encap_node) (vlib_main_t * vm,
490 vlib_node_runtime_t * node,
491 vlib_frame_t * from_frame)
493 /* Disable chksum offload as setup overhead in tx node is not worthwhile
494 for ip4 header checksum only, unless udp checksum is also required */
495 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
498 VLIB_NODE_FN (vxlan6_encap_node) (vlib_main_t * vm,
499 vlib_node_runtime_t * node,
500 vlib_frame_t * from_frame)
502 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
503 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
506 VLIB_REGISTER_NODE (vxlan4_encap_node) = {
507 .name = "vxlan4-encap",
508 .vector_size = sizeof (u32),
509 .format_trace = format_vxlan_encap_trace,
510 .type = VLIB_NODE_TYPE_INTERNAL,
511 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
512 .error_strings = vxlan_encap_error_strings,
513 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
515 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
519 VLIB_REGISTER_NODE (vxlan6_encap_node) = {
520 .name = "vxlan6-encap",
521 .vector_size = sizeof (u32),
522 .format_trace = format_vxlan_encap_trace,
523 .type = VLIB_NODE_TYPE_INTERNAL,
524 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
525 .error_strings = vxlan_encap_error_strings,
526 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
528 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
533 * fd.io coding-style-patch-verification: ON
536 * eval: (c-set-style "gnu")