2 * Copyright (c) 2017 SUSE LLC.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/geneve/geneve.h>
22 /* Statistics (not all errors) */
23 #define foreach_geneve_encap_error \
24 _(ENCAPSULATED, "good packets encapsulated")
26 static char *geneve_encap_error_strings[] = {
27 #define _(sym,string) string,
28 foreach_geneve_encap_error
34 #define _(sym,str) GENEVE_ENCAP_ERROR_##sym,
35 foreach_geneve_encap_error
38 } geneve_encap_error_t;
42 GENEVE_ENCAP_NEXT_DROP,
44 } geneve_encap_next_t;
46 #define foreach_fixed_header4_offset \
49 #define foreach_fixed_header6_offset \
50 _(0) _(1) _(2) _(3) _(4) _(5) _(6)
53 geneve_encap_inline (vlib_main_t * vm,
54 vlib_node_runtime_t * node,
55 vlib_frame_t * from_frame, u32 is_ip4)
57 u32 n_left_from, next_index, *from, *to_next;
58 geneve_main_t *vxm = &geneve_main;
59 vnet_main_t *vnm = vxm->vnet_main;
60 vnet_interface_main_t *im = &vnm->interface_main;
61 u32 pkts_encapsulated = 0;
62 u16 old_l0 = 0, old_l1 = 0;
63 u32 thread_index = vm->thread_index;
64 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
65 u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
66 u32 next0 = 0, next1 = 0;
67 vnet_hw_interface_t *hi0, *hi1;
68 geneve_tunnel_t *t0 = NULL, *t1 = NULL;
69 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
71 from = vlib_frame_vector_args (from_frame);
72 n_left_from = from_frame->n_vectors;
73 vlib_get_buffers (vm, from, bufs, n_left_from);
75 next_index = node->cached_next_index;
76 stats_sw_if_index = node->runtime_data[0];
77 stats_n_packets = stats_n_bytes = 0;
79 while (n_left_from > 0)
83 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
85 while (n_left_from >= 4 && n_left_to_next >= 2)
88 u32 flow_hash0, flow_hash1;
90 ip4_header_t *ip4_0, *ip4_1;
91 ip6_header_t *ip6_0, *ip6_1;
92 udp_header_t *udp0, *udp1;
93 u64 *copy_src0, *copy_dst0;
94 u64 *copy_src1, *copy_dst1;
95 u32 *copy_src_last0, *copy_dst_last0;
96 u32 *copy_src_last1, *copy_dst_last1;
100 /* Prefetch next iteration. */
102 vlib_prefetch_buffer_header (b[2], LOAD);
103 vlib_prefetch_buffer_header (b[3], LOAD);
105 CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
106 2 * CLIB_CACHE_LINE_BYTES, LOAD);
107 CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
108 2 * CLIB_CACHE_LINE_BYTES, LOAD);
120 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
121 flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
123 /* Get next node index and adj index from tunnel next_dpo */
124 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
126 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
127 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
128 t0 = &vxm->tunnels[hi0->dev_instance];
129 /* Note: change to always set next0 if it may be set to drop */
130 next0 = t0->next_dpo.dpoi_next_node;
135 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
137 /* Get next node index and adj index from tunnel next_dpo */
138 if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
140 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
141 hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
142 t1 = &vxm->tunnels[hi1->dev_instance];
143 /* Note: change to always set next1 if it may be set to drop */
144 next1 = t1->next_dpo.dpoi_next_node;
149 vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
151 /* Apply the rewrite string. $$$$ vnet_rewrite? */
152 vlib_buffer_advance (b[0], -(word) _vec_len (t0->rewrite));
153 vlib_buffer_advance (b[1], -(word) _vec_len (t1->rewrite));
157 u8 ip4_geneve_base_header_len =
158 sizeof (ip4_header_t) + sizeof (udp_header_t) +
159 GENEVE_BASE_HEADER_LENGTH;
160 u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
161 u8 ip4_geneve_header_total_len1 = ip4_geneve_base_header_len;
162 #if SUPPORT_OPTIONS_HEADER==1
163 ip4_geneve_header_total_len0 += t0->options_len;
164 ip4_geneve_header_total_len1 += t1->options_len;
166 ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
167 ASSERT (vec_len (t1->rewrite) == ip4_geneve_header_total_len1);
169 ip4_0 = vlib_buffer_get_current (b[0]);
170 ip4_1 = vlib_buffer_get_current (b[1]);
172 /* Copy the fixed header */
173 copy_dst0 = (u64 *) ip4_0;
174 copy_src0 = (u64 *) t0->rewrite;
175 copy_dst1 = (u64 *) ip4_1;
176 copy_src1 = (u64 *) t1->rewrite;
177 /* Copy first 32 octets 8-bytes at a time */
178 #define _(offs) copy_dst0[offs] = copy_src0[offs];
179 foreach_fixed_header4_offset;
181 #define _(offs) copy_dst1[offs] = copy_src1[offs];
182 foreach_fixed_header4_offset;
184 /* Last 4 octets. Hopefully gcc will be our friend */
185 copy_dst_last0 = (u32 *) (©_dst0[4]);
186 copy_src_last0 = (u32 *) (©_src0[4]);
187 copy_dst_last0[0] = copy_src_last0[0];
188 copy_dst_last1 = (u32 *) (©_dst1[4]);
189 copy_src_last1 = (u32 *) (©_src1[4]);
190 copy_dst_last1[0] = copy_src_last1[0];
192 /* Fix the IP4 checksum and length */
193 sum0 = ip4_0->checksum;
194 new_l0 = /* old_l0 always 0, see the rewrite setup */
195 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
196 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
197 length /* changed member */ );
198 ip4_0->checksum = ip_csum_fold (sum0);
199 ip4_0->length = new_l0;
200 sum1 = ip4_1->checksum;
201 new_l1 = /* old_l1 always 0, see the rewrite setup */
202 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1]));
203 sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
204 length /* changed member */ );
205 ip4_1->checksum = ip_csum_fold (sum1);
206 ip4_1->length = new_l1;
208 /* Fix UDP length and set source port */
209 udp0 = (udp_header_t *) (ip4_0 + 1);
211 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
213 udp0->length = new_l0;
214 udp0->src_port = flow_hash0;
215 udp1 = (udp_header_t *) (ip4_1 + 1);
217 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1]) -
219 udp1->length = new_l1;
220 udp1->src_port = flow_hash1;
226 u8 ip6_geneve_base_header_len =
227 sizeof (ip6_header_t) + sizeof (udp_header_t) +
228 GENEVE_BASE_HEADER_LENGTH;
229 u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
230 u8 ip6_geneve_header_total_len1 = ip6_geneve_base_header_len;
231 #if SUPPORT_OPTIONS_HEADER==1
232 ip6_geneve_header_total_len0 += t0->options_len;
233 ip6_geneve_header_total_len1 += t1->options_len;
235 ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
236 ASSERT (vec_len (t1->rewrite) == ip6_geneve_header_total_len1);
238 ip6_0 = vlib_buffer_get_current (b[0]);
239 ip6_1 = vlib_buffer_get_current (b[1]);
241 /* Copy the fixed header */
242 copy_dst0 = (u64 *) ip6_0;
243 copy_src0 = (u64 *) t0->rewrite;
244 copy_dst1 = (u64 *) ip6_1;
245 copy_src1 = (u64 *) t1->rewrite;
246 /* Copy first 56 (ip6) octets 8-bytes at a time */
247 #define _(offs) copy_dst0[offs] = copy_src0[offs];
248 foreach_fixed_header6_offset;
250 #define _(offs) copy_dst1[offs] = copy_src1[offs];
251 foreach_fixed_header6_offset;
253 /* Fix IP6 payload length */
255 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0])
257 ip6_0->payload_length = new_l0;
259 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1])
261 ip6_1->payload_length = new_l1;
263 /* Fix UDP length and set source port */
264 udp0 = (udp_header_t *) (ip6_0 + 1);
265 udp0->length = new_l0;
266 udp0->src_port = flow_hash0;
267 udp1 = (udp_header_t *) (ip6_1 + 1);
268 udp1->length = new_l1;
269 udp1->src_port = flow_hash1;
271 /* IPv6 UDP checksum is mandatory */
272 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[0],
276 if (udp0->checksum == 0)
277 udp0->checksum = 0xffff;
278 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[1],
282 if (udp1->checksum == 0)
283 udp1->checksum = 0xffff;
286 pkts_encapsulated += 2;
287 len0 = vlib_buffer_length_in_chain (vm, b[0]);
288 len1 = vlib_buffer_length_in_chain (vm, b[1]);
289 stats_n_packets += 2;
290 stats_n_bytes += len0 + len1;
292 /* Batch stats increment on the same geneve tunnel so counter is not
293 incremented per packet. Note stats are still incremented for deleted
294 and admin-down tunnel where packets are dropped. It is not worthwhile
295 to check for this rare case and affect normal path performance. */
296 if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
297 (sw_if_index1 != stats_sw_if_index)))
299 stats_n_packets -= 2;
300 stats_n_bytes -= len0 + len1;
301 if (sw_if_index0 == sw_if_index1)
304 vlib_increment_combined_counter
305 (im->combined_sw_if_counters +
306 VNET_INTERFACE_COUNTER_TX, thread_index,
307 stats_sw_if_index, stats_n_packets, stats_n_bytes);
308 stats_sw_if_index = sw_if_index0;
310 stats_n_bytes = len0 + len1;
314 vlib_increment_combined_counter
315 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
316 thread_index, sw_if_index0, 1, len0);
317 vlib_increment_combined_counter
318 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
319 thread_index, sw_if_index1, 1, len1);
323 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
325 geneve_encap_trace_t *tr =
326 vlib_add_trace (vm, node, b[0], sizeof (*tr));
327 tr->tunnel_index = t0 - vxm->tunnels;
331 if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
333 geneve_encap_trace_t *tr =
334 vlib_add_trace (vm, node, b[1], sizeof (*tr));
335 tr->tunnel_index = t1 - vxm->tunnels;
340 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
341 to_next, n_left_to_next,
342 bi0, bi1, next0, next1);
345 while (n_left_from > 0 && n_left_to_next > 0)
353 u64 *copy_src0, *copy_dst0;
354 u32 *copy_src_last0, *copy_dst_last0;
365 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
367 /* Get next node index and adj index from tunnel next_dpo */
368 if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
370 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
371 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
372 t0 = &vxm->tunnels[hi0->dev_instance];
373 /* Note: change to always set next0 if it may be set to drop */
374 next0 = t0->next_dpo.dpoi_next_node;
376 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
378 /* Apply the rewrite string. $$$$ vnet_rewrite? */
379 vlib_buffer_advance (b[0], -(word) _vec_len (t0->rewrite));
383 u8 ip4_geneve_base_header_len =
384 sizeof (ip4_header_t) + sizeof (udp_header_t) +
385 GENEVE_BASE_HEADER_LENGTH;
386 u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
387 #if SUPPORT_OPTIONS_HEADER==1
388 ip4_geneve_header_total_len0 += t0->options_len;
390 ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
392 ip4_0 = vlib_buffer_get_current (b[0]);
394 /* Copy the fixed header */
395 copy_dst0 = (u64 *) ip4_0;
396 copy_src0 = (u64 *) t0->rewrite;
397 /* Copy first 32 octets 8-bytes at a time */
398 #define _(offs) copy_dst0[offs] = copy_src0[offs];
399 foreach_fixed_header4_offset;
401 /* Last 4 octets. Hopefully gcc will be our friend */
402 copy_dst_last0 = (u32 *) (©_dst0[4]);
403 copy_src_last0 = (u32 *) (©_src0[4]);
404 copy_dst_last0[0] = copy_src_last0[0];
406 /* Fix the IP4 checksum and length */
407 sum0 = ip4_0->checksum;
408 new_l0 = /* old_l0 always 0, see the rewrite setup */
409 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
410 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
411 length /* changed member */ );
412 ip4_0->checksum = ip_csum_fold (sum0);
413 ip4_0->length = new_l0;
415 /* Fix UDP length and set source port */
416 udp0 = (udp_header_t *) (ip4_0 + 1);
418 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
420 udp0->length = new_l0;
421 udp0->src_port = flow_hash0;
428 u8 ip6_geneve_base_header_len =
429 sizeof (ip6_header_t) + sizeof (udp_header_t) +
430 GENEVE_BASE_HEADER_LENGTH;
431 u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
432 #if SUPPORT_OPTIONS_HEADER==1
433 ip6_geneve_header_total_len0 += t0->options_len;
435 ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
437 ip6_0 = vlib_buffer_get_current (b[0]);
438 /* Copy the fixed header */
439 copy_dst0 = (u64 *) ip6_0;
440 copy_src0 = (u64 *) t0->rewrite;
441 /* Copy first 56 (ip6) octets 8-bytes at a time */
442 #define _(offs) copy_dst0[offs] = copy_src0[offs];
443 foreach_fixed_header6_offset;
445 /* Fix IP6 payload length */
447 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0])
449 ip6_0->payload_length = new_l0;
451 /* Fix UDP length and set source port */
452 udp0 = (udp_header_t *) (ip6_0 + 1);
453 udp0->length = new_l0;
454 udp0->src_port = flow_hash0;
456 /* IPv6 UDP checksum is mandatory */
457 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b[0],
461 if (udp0->checksum == 0)
462 udp0->checksum = 0xffff;
466 len0 = vlib_buffer_length_in_chain (vm, b[0]);
467 stats_n_packets += 1;
468 stats_n_bytes += len0;
470 /* Batch stats increment on the same geneve tunnel so counter is not
471 incremented per packet. Note stats are still incremented for deleted
472 and admin-down tunnel where packets are dropped. It is not worthwhile
473 to check for this rare case and affect normal path performance. */
474 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
476 stats_n_packets -= 1;
477 stats_n_bytes -= len0;
479 vlib_increment_combined_counter
480 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
481 thread_index, stats_sw_if_index,
482 stats_n_packets, stats_n_bytes);
484 stats_n_bytes = len0;
485 stats_sw_if_index = sw_if_index0;
488 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
490 geneve_encap_trace_t *tr =
491 vlib_add_trace (vm, node, b[0], sizeof (*tr));
492 tr->tunnel_index = t0 - vxm->tunnels;
497 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
498 to_next, n_left_to_next,
502 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
505 /* Do we still need this now that tunnel tx stats is kept? */
506 vlib_node_increment_counter (vm, node->node_index,
507 GENEVE_ENCAP_ERROR_ENCAPSULATED,
510 /* Increment any remaining batch stats */
513 vlib_increment_combined_counter
514 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
515 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
516 node->runtime_data[0] = stats_sw_if_index;
519 return from_frame->n_vectors;
522 VLIB_NODE_FN (geneve4_encap_node) (vlib_main_t * vm,
523 vlib_node_runtime_t * node,
524 vlib_frame_t * from_frame)
526 return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
529 VLIB_NODE_FN (geneve6_encap_node) (vlib_main_t * vm,
530 vlib_node_runtime_t * node,
531 vlib_frame_t * from_frame)
533 return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
537 VLIB_REGISTER_NODE (geneve4_encap_node) = {
538 .name = "geneve4-encap",
539 .vector_size = sizeof (u32),
540 .format_trace = format_geneve_encap_trace,
541 .type = VLIB_NODE_TYPE_INTERNAL,
542 .n_errors = ARRAY_LEN (geneve_encap_error_strings),
543 .error_strings = geneve_encap_error_strings,
544 .n_next_nodes = GENEVE_ENCAP_N_NEXT,
546 [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
550 VLIB_REGISTER_NODE (geneve6_encap_node) = {
551 .name = "geneve6-encap",
552 .vector_size = sizeof (u32),
553 .format_trace = format_geneve_encap_trace,
554 .type = VLIB_NODE_TYPE_INTERNAL,
555 .n_errors = ARRAY_LEN (geneve_encap_error_strings),
556 .error_strings = geneve_encap_error_strings,
557 .n_next_nodes = GENEVE_ENCAP_N_NEXT,
559 [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
565 * fd.io coding-style-patch-verification: ON
568 * eval: (c-set-style "gnu")