3 * Copyright (c) 2015 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include <vppinfra/error.h>
17 #include <vppinfra/hash.h>
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ethernet/ethernet.h>
21 #include <vnet/vxlan/vxlan.h>
23 /* Statistics (not all errors) */
24 #define foreach_vxlan_encap_error \
25 _(ENCAPSULATED, "good packets encapsulated")
27 static char * vxlan_encap_error_strings[] = {
28 #define _(sym,string) string,
29 foreach_vxlan_encap_error
34 #define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
35 foreach_vxlan_encap_error
38 } vxlan_encap_error_t;
41 VXLAN_ENCAP_NEXT_DROP,
48 } vxlan_encap_trace_t;
50 u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
52 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
53 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
54 vxlan_encap_trace_t * t
55 = va_arg (*args, vxlan_encap_trace_t *);
57 s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
58 t->tunnel_index, t->vni);
63 vxlan_encap_inline (vlib_main_t * vm,
64 vlib_node_runtime_t * node,
65 vlib_frame_t * from_frame,
66 u8 is_ip4, u8 csum_offload)
68 u32 n_left_from, next_index, * from, * to_next;
69 vxlan_main_t * vxm = &vxlan_main;
70 vnet_main_t * vnm = vxm->vnet_main;
71 vnet_interface_main_t * im = &vnm->interface_main;
72 vlib_combined_counter_main_t * tx_counter =
73 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
74 u32 pkts_encapsulated = 0;
75 u32 thread_index = vlib_get_thread_index();
76 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
77 u32 sw_if_index0 = 0, sw_if_index1 = 0;
78 u32 next0 = 0, next1 = 0;
79 vxlan_tunnel_t * t0 = NULL, * t1 = NULL;
80 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
82 from = vlib_frame_vector_args (from_frame);
83 n_left_from = from_frame->n_vectors;
85 next_index = node->cached_next_index;
86 stats_sw_if_index = node->runtime_data[0];
87 stats_n_packets = stats_n_bytes = 0;
89 STATIC_ASSERT_SIZEOF(ip6_vxlan_header_t, 56);
90 STATIC_ASSERT_SIZEOF(ip4_vxlan_header_t, 36);
92 word const underlay_hdr_len = is_ip4 ?
93 sizeof(ip4_vxlan_header_t) : sizeof(ip6_vxlan_header_t);
94 u16 const l3_len = is_ip4 ? sizeof(ip4_header_t) : sizeof(ip6_header_t);
95 u32 const csum_flags = is_ip4 ?
96 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
97 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM :
98 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
100 while (n_left_from > 0)
104 vlib_get_next_frame (vm, node, next_index,
105 to_next, n_left_to_next);
107 while (n_left_from >= 4 && n_left_to_next >= 2)
109 /* Prefetch next iteration. */
111 vlib_buffer_t * p2, * p3;
113 p2 = vlib_get_buffer (vm, from[2]);
114 p3 = vlib_get_buffer (vm, from[3]);
116 vlib_prefetch_buffer_header (p2, LOAD);
117 vlib_prefetch_buffer_header (p3, LOAD);
119 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
120 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
126 vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
127 vlib_buffer_t * b1 = vlib_get_buffer (vm, bi1);
128 u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
129 u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
138 /* Get next node index and adj index from tunnel next_dpo */
139 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
141 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
142 vnet_hw_interface_t *hi0 =
143 vnet_get_sup_hw_interface (vnm, sw_if_index0);
144 t0 = &vxm->tunnels[hi0->dev_instance];
145 /* Note: change to always set next0 if it may set to drop */
146 next0 = t0->next_dpo.dpoi_next_node;
147 dpoi_idx0 = t0->next_dpo.dpoi_index;
150 /* Get next node index and adj index from tunnel next_dpo */
151 if (sw_if_index1 != vnet_buffer(b1)->sw_if_index[VLIB_TX])
153 if (sw_if_index0 == vnet_buffer(b1)->sw_if_index[VLIB_TX])
155 sw_if_index1 = sw_if_index0;
158 dpoi_idx1 = dpoi_idx0;
162 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
163 vnet_hw_interface_t *hi1 =
164 vnet_get_sup_hw_interface (vnm, sw_if_index1);
165 t1 = &vxm->tunnels[hi1->dev_instance];
166 /* Note: change to always set next1 if it may set to drop */
167 next1 = t1->next_dpo.dpoi_next_node;
168 dpoi_idx1 = t1->next_dpo.dpoi_index;
172 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
173 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
175 ASSERT(vec_len(t0->rewrite) == underlay_hdr_len);
176 ASSERT(vec_len(t1->rewrite) == underlay_hdr_len);
178 vlib_buffer_advance (b0, -underlay_hdr_len);
179 vlib_buffer_advance (b1, -underlay_hdr_len);
181 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
182 u32 len1 = vlib_buffer_length_in_chain (vm, b1);
183 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
184 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
186 ip4_header_t * ip4_0, * ip4_1;
187 ip6_header_t * ip6_0, * ip6_1;
188 udp_header_t * udp0, * udp1;
192 ip4_vxlan_header_t * hdr0 = vlib_buffer_get_current(b0);
193 ip4_vxlan_header_t * rewrite0 = (void *)t0->rewrite;
194 ip4_vxlan_header_t * hdr1 = vlib_buffer_get_current(b1);
195 ip4_vxlan_header_t * rewrite1 = (void *)t1->rewrite;
199 /* Fix the IP4 checksum and length */
202 ip4_0->length = clib_host_to_net_u16 (len0);
203 ip4_1->length = clib_host_to_net_u16 (len1);
212 ip6_vxlan_header_t * hdr0 = vlib_buffer_get_current(b0);
213 ip6_vxlan_header_t * rewrite0 = (void *) t0->rewrite;
214 ip6_vxlan_header_t * hdr1 = vlib_buffer_get_current(b0);
215 ip6_vxlan_header_t * rewrite1 = (void *) t1->rewrite;
219 /* Fix IP6 payload length */
222 ip6_0->payload_length = payload_l0;
223 ip6_1->payload_length = payload_l1;
231 /* Fix UDP length and set source port */
232 udp0->length = payload_l0;
233 udp0->src_port = flow_hash0;
234 udp1->length = payload_l1;
235 udp1->src_port = flow_hash1;
239 b0->flags |= csum_flags;
240 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
241 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
242 b1->flags |= csum_flags;
243 vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
244 vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
246 /* IPv4 UDP checksum only if checksum offload is used */
249 ip_csum_t sum0 = ip4_0->checksum;
250 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
251 length /* changed member */);
252 ip4_0->checksum = ip_csum_fold (sum0);
253 ip_csum_t sum1 = ip4_1->checksum;
254 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
255 length /* changed member */);
256 ip4_1->checksum = ip_csum_fold (sum1);
258 /* IPv6 UDP checksum is mandatory */
263 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
264 (vm, b0, ip6_0, &bogus);
266 if (udp0->checksum == 0)
267 udp0->checksum = 0xffff;
268 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
269 (vm, b1, ip6_1, &bogus);
271 if (udp1->checksum == 0)
272 udp1->checksum = 0xffff;
275 /* Batch stats increment on the same vxlan tunnel so counter is not
276 incremented per packet. Note stats are still incremented for deleted
277 and admin-down tunnel where packets are dropped. It is not worthwhile
278 to check for this rare case and affect normal path performance. */
279 if (sw_if_index0 == sw_if_index1)
281 if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
285 vlib_increment_combined_counter (tx_counter, thread_index,
286 stats_sw_if_index, stats_n_packets, stats_n_bytes);
287 stats_n_packets = stats_n_bytes = 0;
289 stats_sw_if_index = sw_if_index0;
291 stats_n_packets += 2;
292 stats_n_bytes += len0 + len1;
296 vlib_increment_combined_counter (tx_counter, thread_index,
297 sw_if_index0, 1, len0);
298 vlib_increment_combined_counter (tx_counter, thread_index,
299 sw_if_index1, 1, len1);
301 pkts_encapsulated += 2;
303 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
305 vxlan_encap_trace_t *tr =
306 vlib_add_trace (vm, node, b0, sizeof (*tr));
307 tr->tunnel_index = t0 - vxm->tunnels;
311 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
313 vxlan_encap_trace_t *tr =
314 vlib_add_trace (vm, node, b1, sizeof (*tr));
315 tr->tunnel_index = t1 - vxm->tunnels;
319 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
320 to_next, n_left_to_next,
321 bi0, bi1, next0, next1);
324 while (n_left_from > 0 && n_left_to_next > 0)
327 vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
328 u32 flow_hash0 = vnet_l2_compute_flow_hash(b0);
336 /* Get next node index and adj index from tunnel next_dpo */
337 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
339 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
340 vnet_hw_interface_t *hi0 =
341 vnet_get_sup_hw_interface (vnm, sw_if_index0);
342 t0 = &vxm->tunnels[hi0->dev_instance];
343 /* Note: change to always set next0 if it may be set to drop */
344 next0 = t0->next_dpo.dpoi_next_node;
345 dpoi_idx0 = t0->next_dpo.dpoi_index;
347 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
349 ASSERT(vec_len(t0->rewrite) == underlay_hdr_len);
350 vlib_buffer_advance (b0, -underlay_hdr_len);
352 u32 len0 = vlib_buffer_length_in_chain (vm, b0);
353 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
356 ip4_header_t * ip4_0;
357 ip6_header_t * ip6_0;
361 ip4_vxlan_header_t * rewrite = (void *)t0->rewrite;
362 ip4_vxlan_header_t * hdr = vlib_buffer_get_current(b0);
365 /* Fix the IP4 checksum and length */
367 ip4_0->length = clib_host_to_net_u16 (len0);
374 ip6_vxlan_header_t * hdr = vlib_buffer_get_current(b0);
375 ip6_vxlan_header_t * rewrite = (void *) t0->rewrite;
378 /* Fix IP6 payload length */
380 ip6_0->payload_length = payload_l0;
386 /* Fix UDP length and set source port */
387 udp0->length = payload_l0;
388 udp0->src_port = flow_hash0;
392 b0->flags |= csum_flags;
393 vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
394 vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
396 /* IPv4 UDP checksum only if checksum offload is used */
399 ip_csum_t sum0 = ip4_0->checksum;
400 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
401 length /* changed member */);
402 ip4_0->checksum = ip_csum_fold (sum0);
404 /* IPv6 UDP checksum is mandatory */
409 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
410 (vm, b0, ip6_0, &bogus);
412 if (udp0->checksum == 0)
413 udp0->checksum = 0xffff;
416 /* Batch stats increment on the same vxlan tunnel so counter is not
417 incremented per packet. Note stats are still incremented for deleted
418 and admin-down tunnel where packets are dropped. It is not worthwhile
419 to check for this rare case and affect normal path performance. */
420 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
424 vlib_increment_combined_counter (tx_counter, thread_index,
425 stats_sw_if_index, stats_n_packets, stats_n_bytes);
426 stats_n_bytes = stats_n_packets = 0;
428 stats_sw_if_index = sw_if_index0;
430 stats_n_packets += 1;
431 stats_n_bytes += len0;
432 pkts_encapsulated ++;
434 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
436 vxlan_encap_trace_t *tr =
437 vlib_add_trace (vm, node, b0, sizeof (*tr));
438 tr->tunnel_index = t0 - vxm->tunnels;
441 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
442 to_next, n_left_to_next,
446 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
449 /* Do we still need this now that tunnel tx stats is kept? */
450 vlib_node_increment_counter (vm, node->node_index,
451 VXLAN_ENCAP_ERROR_ENCAPSULATED,
454 /* Increment any remaining batch stats */
457 vlib_increment_combined_counter (tx_counter, thread_index,
458 stats_sw_if_index, stats_n_packets, stats_n_bytes);
459 node->runtime_data[0] = stats_sw_if_index;
462 return from_frame->n_vectors;
466 vxlan4_encap (vlib_main_t * vm,
467 vlib_node_runtime_t * node,
468 vlib_frame_t * from_frame)
470 /* Disable chksum offload as setup overhead in tx node is not worthwhile
471 for ip4 header checksum only, unless udp checksum is also required */
472 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
473 /* csum_offload */ 0);
477 vxlan6_encap (vlib_main_t * vm,
478 vlib_node_runtime_t * node,
479 vlib_frame_t * from_frame)
481 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
482 return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
483 /* csum_offload */ 1);
486 VLIB_REGISTER_NODE (vxlan4_encap_node) = {
487 .function = vxlan4_encap,
488 .name = "vxlan4-encap",
489 .vector_size = sizeof (u32),
490 .format_trace = format_vxlan_encap_trace,
491 .type = VLIB_NODE_TYPE_INTERNAL,
492 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
493 .error_strings = vxlan_encap_error_strings,
494 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
496 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
500 VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_encap_node, vxlan4_encap)
502 VLIB_REGISTER_NODE (vxlan6_encap_node) = {
503 .function = vxlan6_encap,
504 .name = "vxlan6-encap",
505 .vector_size = sizeof (u32),
506 .format_trace = format_vxlan_encap_trace,
507 .type = VLIB_NODE_TYPE_INTERNAL,
508 .n_errors = ARRAY_LEN(vxlan_encap_error_strings),
509 .error_strings = vxlan_encap_error_strings,
510 .n_next_nodes = VXLAN_ENCAP_N_NEXT,
512 [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
516 VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_encap_node, vxlan6_encap)