2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/vxlan-gbp/vxlan_gbp.h>
21 #include <vnet/qos/qos_types.h>
22 #include <vnet/adj/rewrite.h>
24 /* Statistics (not all errors) */
25 #define foreach_vxlan_gbp_encap_error \
26 _(ENCAPSULATED, "good packets encapsulated")
28 static char *vxlan_gbp_encap_error_strings[] = {
29 #define _(sym,string) string,
30 foreach_vxlan_gbp_encap_error
36 #define _(sym,str) VXLAN_GBP_ENCAP_ERROR_##sym,
37 foreach_vxlan_gbp_encap_error
39 VXLAN_GBP_ENCAP_N_ERROR,
40 } vxlan_gbp_encap_error_t;
44 VXLAN_GBP_ENCAP_NEXT_DROP,
45 VXLAN_GBP_ENCAP_N_NEXT,
46 } vxlan_gbp_encap_next_t;
54 } vxlan_gbp_encap_trace_t;
56 #ifndef CLIB_MARCH_VARIANT
58 format_vxlan_gbp_encap_trace (u8 * s, va_list * args)
60 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62 vxlan_gbp_encap_trace_t *t = va_arg (*args, vxlan_gbp_encap_trace_t *);
66 "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d flags %U",
67 t->tunnel_index, t->vni, t->sclass,
68 format_vxlan_gbp_header_gpflags, t->flags);
71 #endif /* CLIB_MARCH_VARIANT */
74 vxlan_gbp_encap_inline (vlib_main_t * vm,
75 vlib_node_runtime_t * node,
76 vlib_frame_t * from_frame, u8 is_ip4, u8 csum_offload)
78 u32 n_left_from, next_index, *from, *to_next;
79 vxlan_gbp_main_t *vxm = &vxlan_gbp_main;
80 vnet_main_t *vnm = vxm->vnet_main;
81 vnet_interface_main_t *im = &vnm->interface_main;
82 vlib_combined_counter_main_t *tx_counter =
83 im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
84 u32 pkts_encapsulated = 0;
85 u32 thread_index = vlib_get_thread_index ();
86 u32 sw_if_index0 = 0, sw_if_index1 = 0;
87 u32 next0 = 0, next1 = 0;
88 vxlan_gbp_tunnel_t *t0 = NULL, *t1 = NULL;
89 index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
90 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
92 from = vlib_frame_vector_args (from_frame);
93 n_left_from = from_frame->n_vectors;
94 vlib_get_buffers (vm, from, bufs, n_left_from);
96 next_index = node->cached_next_index;
98 STATIC_ASSERT_SIZEOF (ip6_vxlan_gbp_header_t, 56);
99 STATIC_ASSERT_SIZEOF (ip4_vxlan_gbp_header_t, 36);
101 u8 const underlay_hdr_len = is_ip4 ?
102 sizeof (ip4_vxlan_gbp_header_t) : sizeof (ip6_vxlan_gbp_header_t);
103 u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
104 u32 const csum_flags = is_ip4 ? VNET_BUFFER_F_OFFLOAD_IP_CKSUM |
105 VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM :
106 VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
108 while (n_left_from > 0)
112 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
114 while (n_left_from >= 4 && n_left_to_next >= 2)
116 /* Prefetch next iteration. */
118 vlib_buffer_t *p2, *p3;
120 p2 = vlib_get_buffer (vm, from[2]);
121 p3 = vlib_get_buffer (vm, from[3]);
123 vlib_prefetch_buffer_header (p2, LOAD);
124 vlib_prefetch_buffer_header (p3, LOAD);
126 CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
127 2 * CLIB_CACHE_LINE_BYTES, LOAD);
128 CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
129 2 * CLIB_CACHE_LINE_BYTES, LOAD);
132 u32 bi0 = to_next[0] = from[0];
133 u32 bi1 = to_next[1] = from[1];
139 u32 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
140 u32 flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
142 /* Get next node index and adj index from tunnel next_dpo */
143 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
145 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
146 vnet_hw_interface_t *hi0 =
147 vnet_get_sup_hw_interface (vnm, sw_if_index0);
148 t0 = &vxm->tunnels[hi0->dev_instance];
149 /* Note: change to always set next0 if it may set to drop */
150 next0 = t0->next_dpo.dpoi_next_node;
151 dpoi_idx0 = t0->next_dpo.dpoi_index;
154 /* Get next node index and adj index from tunnel next_dpo */
155 if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
157 if (sw_if_index0 == vnet_buffer (b[1])->sw_if_index[VLIB_TX])
159 sw_if_index1 = sw_if_index0;
162 dpoi_idx1 = dpoi_idx0;
166 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
167 vnet_hw_interface_t *hi1 =
168 vnet_get_sup_hw_interface (vnm, sw_if_index1);
169 t1 = &vxm->tunnels[hi1->dev_instance];
170 /* Note: change to always set next1 if it may set to drop */
171 next1 = t1->next_dpo.dpoi_next_node;
172 dpoi_idx1 = t1->next_dpo.dpoi_index;
176 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpoi_idx0;
177 vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpoi_idx1;
179 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
180 ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
181 vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b[0]),
182 vlib_buffer_get_current (b[1]),
185 vlib_buffer_advance (b[0], -underlay_hdr_len);
186 vlib_buffer_advance (b[1], -underlay_hdr_len);
188 u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
189 u32 len1 = vlib_buffer_length_in_chain (vm, b[1]);
190 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
191 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
193 void *underlay0 = vlib_buffer_get_current (b[0]);
194 void *underlay1 = vlib_buffer_get_current (b[1]);
196 ip4_header_t *ip4_0, *ip4_1;
197 qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
198 ip6_header_t *ip6_0, *ip6_1;
199 udp_header_t *udp0, *udp1;
200 vxlan_gbp_header_t *vxlan_gbp0, *vxlan_gbp1;
204 ip4_vxlan_gbp_header_t *hdr0 = underlay0;
205 ip4_vxlan_gbp_header_t *hdr1 = underlay1;
207 /* Fix the IP4 checksum and length */
210 ip4_0->length = clib_host_to_net_u16 (len0);
211 ip4_1->length = clib_host_to_net_u16 (len1);
213 if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
215 ip4_0_tos = vnet_buffer2 (b[0])->qos.bits;
216 ip4_0->tos = ip4_0_tos;
218 if (PREDICT_FALSE (b[1]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
220 ip4_1_tos = vnet_buffer2 (b[1])->qos.bits;
221 ip4_1->tos = ip4_1_tos;
228 vxlan_gbp0 = &hdr0->vxlan_gbp;
229 vxlan_gbp1 = &hdr1->vxlan_gbp;
233 ip6_vxlan_gbp_header_t *hdr0 = underlay0;
234 ip6_vxlan_gbp_header_t *hdr1 = underlay1;
236 /* Fix IP6 payload length */
239 ip6_0->payload_length = payload_l0;
240 ip6_1->payload_length = payload_l1;
246 vxlan_gbp0 = &hdr0->vxlan_gbp;
247 vxlan_gbp1 = &hdr1->vxlan_gbp;
250 /* Fix UDP length and set source port */
251 udp0->length = payload_l0;
252 udp0->src_port = flow_hash0;
253 udp1->length = payload_l1;
254 udp1->src_port = flow_hash1;
256 /* set source class and gpflags */
257 vxlan_gbp0->gpflags = vnet_buffer2 (b[0])->gbp.flags;
258 vxlan_gbp1->gpflags = vnet_buffer2 (b[1])->gbp.flags;
260 clib_host_to_net_u16 (vnet_buffer2 (b[0])->gbp.sclass);
262 clib_host_to_net_u16 (vnet_buffer2 (b[1])->gbp.sclass);
266 b[0]->flags |= csum_flags;
267 vnet_buffer (b[0])->l3_hdr_offset = l3_0 - b[0]->data;
268 vnet_buffer (b[0])->l4_hdr_offset = (u8 *) udp0 - b[0]->data;
269 b[1]->flags |= csum_flags;
270 vnet_buffer (b[1])->l3_hdr_offset = l3_1 - b[1]->data;
271 vnet_buffer (b[1])->l4_hdr_offset = (u8 *) udp1 - b[1]->data;
273 /* IPv4 UDP checksum only if checksum offload is used */
276 ip_csum_t sum0 = ip4_0->checksum;
277 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
278 length /* changed member */ );
279 if (PREDICT_FALSE (ip4_0_tos))
281 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
282 tos /* changed member */ );
284 ip4_0->checksum = ip_csum_fold (sum0);
285 ip_csum_t sum1 = ip4_1->checksum;
286 sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
287 length /* changed member */ );
288 if (PREDICT_FALSE (ip4_1_tos))
290 sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
291 tos /* changed member */ );
293 ip4_1->checksum = ip_csum_fold (sum1);
295 /* IPv6 UDP checksum is mandatory */
300 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
301 (vm, b[0], ip6_0, &bogus);
303 if (udp0->checksum == 0)
304 udp0->checksum = 0xffff;
305 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
306 (vm, b[1], ip6_1, &bogus);
308 if (udp1->checksum == 0)
309 udp1->checksum = 0xffff;
312 /* save inner packet flow_hash for load-balance node */
313 vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
314 vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
316 vlib_increment_combined_counter (tx_counter, thread_index,
317 sw_if_index0, 1, len0);
318 vlib_increment_combined_counter (tx_counter, thread_index,
319 sw_if_index1, 1, len1);
320 pkts_encapsulated += 2;
322 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
324 vxlan_gbp_encap_trace_t *tr =
325 vlib_add_trace (vm, node, b[0], sizeof (*tr));
326 tr->tunnel_index = t0 - vxm->tunnels;
328 tr->sclass = vnet_buffer2 (b[0])->gbp.sclass;
329 tr->flags = vnet_buffer2 (b[0])->gbp.flags;
332 if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
334 vxlan_gbp_encap_trace_t *tr =
335 vlib_add_trace (vm, node, b[1], sizeof (*tr));
336 tr->tunnel_index = t1 - vxm->tunnels;
338 tr->sclass = vnet_buffer2 (b[1])->gbp.sclass;
339 tr->flags = vnet_buffer2 (b[1])->gbp.flags;
343 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
344 to_next, n_left_to_next,
345 bi0, bi1, next0, next1);
348 while (n_left_from > 0 && n_left_to_next > 0)
350 u32 bi0 = to_next[0] = from[0];
356 u32 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
358 /* Get next node index and adj index from tunnel next_dpo */
359 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
361 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
362 vnet_hw_interface_t *hi0 =
363 vnet_get_sup_hw_interface (vnm, sw_if_index0);
364 t0 = &vxm->tunnels[hi0->dev_instance];
365 /* Note: change to always set next0 if it may be set to drop */
366 next0 = t0->next_dpo.dpoi_next_node;
367 dpoi_idx0 = t0->next_dpo.dpoi_index;
369 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpoi_idx0;
371 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
372 vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b[0]),
375 vlib_buffer_advance (b[0], -underlay_hdr_len);
376 void *underlay0 = vlib_buffer_get_current (b[0]);
378 u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
379 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
381 vxlan_gbp_header_t *vxlan_gbp0;
384 qos_bits_t ip4_0_tos = 0;
389 ip4_vxlan_gbp_header_t *hdr = underlay0;
391 /* Fix the IP4 checksum and length */
393 ip4_0->length = clib_host_to_net_u16 (len0);
395 if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
397 ip4_0_tos = vnet_buffer2 (b[0])->qos.bits;
398 ip4_0->tos = ip4_0_tos;
403 vxlan_gbp0 = &hdr->vxlan_gbp;
407 ip6_vxlan_gbp_header_t *hdr = underlay0;
409 /* Fix IP6 payload length */
411 ip6_0->payload_length = payload_l0;
415 vxlan_gbp0 = &hdr->vxlan_gbp;
418 /* Fix UDP length and set source port */
419 udp0->length = payload_l0;
420 udp0->src_port = flow_hash0;
422 /* set source class and gpflags */
423 vxlan_gbp0->gpflags = vnet_buffer2 (b[0])->gbp.flags;
425 clib_host_to_net_u16 (vnet_buffer2 (b[0])->gbp.sclass);
429 b[0]->flags |= csum_flags;
430 vnet_buffer (b[0])->l3_hdr_offset = l3_0 - b[0]->data;
431 vnet_buffer (b[0])->l4_hdr_offset = (u8 *) udp0 - b[0]->data;
433 /* IPv4 UDP checksum only if checksum offload is used */
436 ip_csum_t sum0 = ip4_0->checksum;
437 sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
438 length /* changed member */ );
439 if (PREDICT_FALSE (ip4_0_tos))
441 sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
442 tos /* changed member */ );
444 ip4_0->checksum = ip_csum_fold (sum0);
446 /* IPv6 UDP checksum is mandatory */
451 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
452 (vm, b[0], ip6_0, &bogus);
454 if (udp0->checksum == 0)
455 udp0->checksum = 0xffff;
458 /* save inner packet flow_hash for load-balance node */
459 vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
461 vlib_increment_combined_counter (tx_counter, thread_index,
462 sw_if_index0, 1, len0);
465 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
467 vxlan_gbp_encap_trace_t *tr =
468 vlib_add_trace (vm, node, b[0], sizeof (*tr));
469 tr->tunnel_index = t0 - vxm->tunnels;
471 tr->sclass = vnet_buffer2 (b[0])->gbp.sclass;
472 tr->flags = vnet_buffer2 (b[0])->gbp.flags;
476 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
477 to_next, n_left_to_next,
481 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
484 /* Do we still need this now that tunnel tx stats is kept? */
485 vlib_node_increment_counter (vm, node->node_index,
486 VXLAN_GBP_ENCAP_ERROR_ENCAPSULATED,
489 return from_frame->n_vectors;
492 VLIB_NODE_FN (vxlan4_gbp_encap_node) (vlib_main_t * vm,
493 vlib_node_runtime_t * node,
494 vlib_frame_t * from_frame)
496 /* Disable chksum offload as setup overhead in tx node is not worthwhile
497 for ip4 header checksum only, unless udp checksum is also required */
498 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 1,
499 /* csum_offload */ 0);
502 VLIB_NODE_FN (vxlan6_gbp_encap_node) (vlib_main_t * vm,
503 vlib_node_runtime_t * node,
504 vlib_frame_t * from_frame)
506 /* Enable checksum offload for ip6 as udp checksum is mandatory, */
507 return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
508 /* csum_offload */ 1);
512 VLIB_REGISTER_NODE (vxlan4_gbp_encap_node) =
514 .name = "vxlan4-gbp-encap",
515 .vector_size = sizeof (u32),
516 .format_trace = format_vxlan_gbp_encap_trace,
517 .type = VLIB_NODE_TYPE_INTERNAL,
518 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
519 .error_strings = vxlan_gbp_encap_error_strings,
520 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
522 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
526 VLIB_REGISTER_NODE (vxlan6_gbp_encap_node) =
528 .name = "vxlan6-gbp-encap",
529 .vector_size = sizeof (u32),
530 .format_trace = format_vxlan_gbp_encap_trace,
531 .type = VLIB_NODE_TYPE_INTERNAL,
532 .n_errors = ARRAY_LEN (vxlan_gbp_encap_error_strings),
533 .error_strings = vxlan_gbp_encap_error_strings,
534 .n_next_nodes = VXLAN_GBP_ENCAP_N_NEXT,
536 [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
542 * fd.io coding-style-patch-verification: ON
545 * eval: (c-set-style "gnu")