2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <gtpu/gtpu.h>
22 /* Statistics (not all errors) */
23 #define foreach_gtpu_encap_error \
24 _(ENCAPSULATED, "good packets encapsulated")
26 static char * gtpu_encap_error_strings[] = {
27 #define _(sym,string) string,
28 foreach_gtpu_encap_error
33 #define _(sym,str) GTPU_ENCAP_ERROR_##sym,
34 foreach_gtpu_encap_error
49 u8 * format_gtpu_encap_trace (u8 * s, va_list * args)
51 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
52 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53 gtpu_encap_trace_t * t
54 = va_arg (*args, gtpu_encap_trace_t *);
56 s = format (s, "GTPU encap to gtpu_tunnel%d teid %d",
57 t->tunnel_index, t->teid);
62 #define foreach_fixed_header4_offset \
65 #define foreach_fixed_header6_offset \
66 _(0) _(1) _(2) _(3) _(4) _(5) _(6)
69 gtpu_encap_inline (vlib_main_t * vm,
70 vlib_node_runtime_t * node,
71 vlib_frame_t * from_frame,
74 u32 n_left_from, next_index, * from, * to_next;
75 gtpu_main_t * gtm = >pu_main;
76 vnet_main_t * vnm = gtm->vnet_main;
77 vnet_interface_main_t * im = &vnm->interface_main;
78 u32 pkts_encapsulated = 0;
79 u16 old_l0 = 0, old_l1 = 0;
80 u32 thread_index = vlib_get_thread_index();
81 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
82 u32 sw_if_index0 = 0, sw_if_index1 = 0;
83 u32 next0 = 0, next1 = 0;
84 vnet_hw_interface_t * hi0, * hi1;
85 gtpu_tunnel_t * t0 = NULL, * t1 = NULL;
87 from = vlib_frame_vector_args (from_frame);
88 n_left_from = from_frame->n_vectors;
90 next_index = node->cached_next_index;
91 stats_sw_if_index = node->runtime_data[0];
92 stats_n_packets = stats_n_bytes = 0;
94 while (n_left_from > 0)
98 vlib_get_next_frame (vm, node, next_index,
99 to_next, n_left_to_next);
101 while (n_left_from >= 4 && n_left_to_next >= 2)
104 vlib_buffer_t * b0, * b1;
105 u32 flow_hash0, flow_hash1;
107 ip4_header_t * ip4_0, * ip4_1;
108 ip6_header_t * ip6_0, * ip6_1;
109 udp_header_t * udp0, * udp1;
110 gtpu_header_t * gtpu0, * gtpu1;
111 u64 * copy_src0, * copy_dst0;
112 u64 * copy_src1, * copy_dst1;
113 u32 * copy_src_last0, * copy_dst_last0;
114 u32 * copy_src_last1, * copy_dst_last1;
116 ip_csum_t sum0, sum1;
118 /* Prefetch next iteration. */
120 vlib_buffer_t * p2, * p3;
122 p2 = vlib_get_buffer (vm, from[2]);
123 p3 = vlib_get_buffer (vm, from[3]);
125 vlib_prefetch_buffer_header (p2, LOAD);
126 vlib_prefetch_buffer_header (p3, LOAD);
128 CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
129 CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
141 b0 = vlib_get_buffer (vm, bi0);
142 b1 = vlib_get_buffer (vm, bi1);
144 flow_hash0 = vnet_l2_compute_flow_hash (b0);
145 flow_hash1 = vnet_l2_compute_flow_hash (b1);
147 /* Get next node index and adj index from tunnel next_dpo */
148 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
150 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
151 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
152 t0 = >m->tunnels[hi0->dev_instance];
153 /* Note: change to always set next0 if it may be set to drop */
154 next0 = t0->next_dpo.dpoi_next_node;
156 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
158 /* Get next node index and adj index from tunnel next_dpo */
159 if (sw_if_index1 != vnet_buffer(b1)->sw_if_index[VLIB_TX])
161 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
162 hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
163 t1 = >m->tunnels[hi1->dev_instance];
164 /* Note: change to always set next1 if it may be set to drop */
165 next1 = t1->next_dpo.dpoi_next_node;
167 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
169 /* Apply the rewrite string. $$$$ vnet_rewrite? */
170 vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
171 vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
175 ip4_0 = vlib_buffer_get_current(b0);
176 ip4_1 = vlib_buffer_get_current(b1);
178 /* Copy the fixed header */
179 copy_dst0 = (u64 *) ip4_0;
180 copy_src0 = (u64 *) t0->rewrite;
181 copy_dst1 = (u64 *) ip4_1;
182 copy_src1 = (u64 *) t1->rewrite;
183 /* Copy first 32 octets 8-bytes at a time */
184 #define _(offs) copy_dst0[offs] = copy_src0[offs];
185 foreach_fixed_header4_offset;
187 #define _(offs) copy_dst1[offs] = copy_src1[offs];
188 foreach_fixed_header4_offset;
190 /* Last 4 octets. Hopefully gcc will be our friend */
191 copy_dst_last0 = (u32 *)(©_dst0[4]);
192 copy_src_last0 = (u32 *)(©_src0[4]);
193 copy_dst_last0[0] = copy_src_last0[0];
194 copy_dst_last1 = (u32 *)(©_dst1[4]);
195 copy_src_last1 = (u32 *)(©_src1[4]);
196 copy_dst_last1[0] = copy_src_last1[0];
198 /* Fix the IP4 checksum and length */
199 sum0 = ip4_0->checksum;
200 new_l0 = /* old_l0 always 0, see the rewrite setup */
201 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
202 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
203 length /* changed member */);
204 ip4_0->checksum = ip_csum_fold (sum0);
205 ip4_0->length = new_l0;
206 sum1 = ip4_1->checksum;
207 new_l1 = /* old_l1 always 0, see the rewrite setup */
208 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
209 sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
210 length /* changed member */);
211 ip4_1->checksum = ip_csum_fold (sum1);
212 ip4_1->length = new_l1;
214 /* Fix UDP length and set source port */
215 udp0 = (udp_header_t *)(ip4_0+1);
216 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
218 udp0->length = new_l0;
219 udp0->src_port = flow_hash0;
220 udp1 = (udp_header_t *)(ip4_1+1);
221 new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
223 udp1->length = new_l1;
224 udp1->src_port = flow_hash1;
226 /* Fix GTPU length */
227 gtpu0 = (gtpu_header_t *)(udp0+1);
228 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
229 - sizeof (*ip4_0) - sizeof(*udp0));
230 gtpu0->length = new_l0;
231 gtpu1 = (gtpu_header_t *)(udp1+1);
232 new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
233 - sizeof (*ip4_1) - sizeof(*udp1));
234 gtpu1->length = new_l1;
240 ip6_0 = vlib_buffer_get_current(b0);
241 ip6_1 = vlib_buffer_get_current(b1);
243 /* Copy the fixed header */
244 copy_dst0 = (u64 *) ip6_0;
245 copy_src0 = (u64 *) t0->rewrite;
246 copy_dst1 = (u64 *) ip6_1;
247 copy_src1 = (u64 *) t1->rewrite;
248 /* Copy first 56 (ip6) octets 8-bytes at a time */
249 #define _(offs) copy_dst0[offs] = copy_src0[offs];
250 foreach_fixed_header6_offset;
252 #define _(offs) copy_dst1[offs] = copy_src1[offs];
253 foreach_fixed_header6_offset;
255 /* Fix IP6 payload length */
257 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
259 ip6_0->payload_length = new_l0;
261 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
263 ip6_1->payload_length = new_l1;
265 /* Fix UDP length and set source port */
266 udp0 = (udp_header_t *)(ip6_0+1);
267 udp0->length = new_l0;
268 udp0->src_port = flow_hash0;
269 udp1 = (udp_header_t *)(ip6_1+1);
270 udp1->length = new_l1;
271 udp1->src_port = flow_hash1;
273 /* IPv6 UDP checksum is mandatory */
274 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
276 if (udp0->checksum == 0)
277 udp0->checksum = 0xffff;
278 udp1->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b1,
280 if (udp1->checksum == 0)
281 udp1->checksum = 0xffff;
283 /* Fix GTPU length */
284 gtpu0 = (gtpu_header_t *)(udp0+1);
285 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
286 - sizeof (*ip4_0) - sizeof(*udp0));
287 gtpu0->length = new_l0;
288 gtpu1 = (gtpu_header_t *)(udp1+1);
289 new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
290 - sizeof (*ip4_1) - sizeof(*udp1));
291 gtpu1->length = new_l1;
294 pkts_encapsulated += 2;
295 len0 = vlib_buffer_length_in_chain (vm, b0);
296 len1 = vlib_buffer_length_in_chain (vm, b1);
297 stats_n_packets += 2;
298 stats_n_bytes += len0 + len1;
300 /* Batch stats increment on the same gtpu tunnel so counter is not
301 incremented per packet. Note stats are still incremented for deleted
302 and admin-down tunnel where packets are dropped. It is not worthwhile
303 to check for this rare case and affect normal path performance. */
304 if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
305 (sw_if_index1 != stats_sw_if_index)))
307 stats_n_packets -= 2;
308 stats_n_bytes -= len0 + len1;
309 if (sw_if_index0 == sw_if_index1)
312 vlib_increment_combined_counter
313 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
314 thread_index, stats_sw_if_index,
315 stats_n_packets, stats_n_bytes);
316 stats_sw_if_index = sw_if_index0;
318 stats_n_bytes = len0 + len1;
322 vlib_increment_combined_counter
323 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
324 thread_index, sw_if_index0, 1, len0);
325 vlib_increment_combined_counter
326 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
327 thread_index, sw_if_index1, 1, len1);
331 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
333 gtpu_encap_trace_t *tr =
334 vlib_add_trace (vm, node, b0, sizeof (*tr));
335 tr->tunnel_index = t0 - gtm->tunnels;
339 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
341 gtpu_encap_trace_t *tr =
342 vlib_add_trace (vm, node, b1, sizeof (*tr));
343 tr->tunnel_index = t1 - gtm->tunnels;
347 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
348 to_next, n_left_to_next,
349 bi0, bi1, next0, next1);
352 while (n_left_from > 0 && n_left_to_next > 0)
358 ip4_header_t * ip4_0;
359 ip6_header_t * ip6_0;
361 gtpu_header_t * gtpu0;
362 u64 * copy_src0, * copy_dst0;
363 u32 * copy_src_last0, * copy_dst_last0;
374 b0 = vlib_get_buffer (vm, bi0);
376 flow_hash0 = vnet_l2_compute_flow_hash(b0);
378 /* Get next node index and adj index from tunnel next_dpo */
379 if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
381 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
382 hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
383 t0 = >m->tunnels[hi0->dev_instance];
384 /* Note: change to always set next0 if it may be set to drop */
385 next0 = t0->next_dpo.dpoi_next_node;
387 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
389 /* Apply the rewrite string. $$$$ vnet_rewrite? */
390 vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
394 ip4_0 = vlib_buffer_get_current(b0);
396 /* Copy the fixed header */
397 copy_dst0 = (u64 *) ip4_0;
398 copy_src0 = (u64 *) t0->rewrite;
399 /* Copy first 32 octets 8-bytes at a time */
400 #define _(offs) copy_dst0[offs] = copy_src0[offs];
401 foreach_fixed_header4_offset;
403 /* Last 4 octets. Hopefully gcc will be our friend */
404 copy_dst_last0 = (u32 *)(©_dst0[4]);
405 copy_src_last0 = (u32 *)(©_src0[4]);
406 copy_dst_last0[0] = copy_src_last0[0];
408 /* Fix the IP4 checksum and length */
409 sum0 = ip4_0->checksum;
410 new_l0 = /* old_l0 always 0, see the rewrite setup */
411 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
412 sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
413 length /* changed member */);
414 ip4_0->checksum = ip_csum_fold (sum0);
415 ip4_0->length = new_l0;
417 /* Fix UDP length and set source port */
418 udp0 = (udp_header_t *)(ip4_0+1);
419 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
421 udp0->length = new_l0;
422 udp0->src_port = flow_hash0;
424 /* Fix GTPU length */
425 gtpu0 = (gtpu_header_t *)(udp0+1);
426 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
427 - sizeof (*ip4_0) - sizeof(*udp0));
428 gtpu0->length = new_l0;
435 ip6_0 = vlib_buffer_get_current(b0);
436 /* Copy the fixed header */
437 copy_dst0 = (u64 *) ip6_0;
438 copy_src0 = (u64 *) t0->rewrite;
439 /* Copy first 56 (ip6) octets 8-bytes at a time */
440 #define _(offs) copy_dst0[offs] = copy_src0[offs];
441 foreach_fixed_header6_offset;
443 /* Fix IP6 payload length */
445 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
447 ip6_0->payload_length = new_l0;
449 /* Fix UDP length and set source port */
450 udp0 = (udp_header_t *)(ip6_0+1);
451 udp0->length = new_l0;
452 udp0->src_port = flow_hash0;
454 /* IPv6 UDP checksum is mandatory */
455 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
457 if (udp0->checksum == 0)
458 udp0->checksum = 0xffff;
460 /* Fix GTPU length */
461 gtpu0 = (gtpu_header_t *)(udp0+1);
462 new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
463 - sizeof (*ip4_0) - sizeof(*udp0));
464 gtpu0->length = new_l0;
467 pkts_encapsulated ++;
468 len0 = vlib_buffer_length_in_chain (vm, b0);
469 stats_n_packets += 1;
470 stats_n_bytes += len0;
472 /* Batch stats increment on the same gtpu tunnel so counter is not
473 incremented per packet. Note stats are still incremented for deleted
474 and admin-down tunnel where packets are dropped. It is not worthwhile
475 to check for this rare case and affect normal path performance. */
476 if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
478 stats_n_packets -= 1;
479 stats_n_bytes -= len0;
481 vlib_increment_combined_counter
482 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
483 thread_index, stats_sw_if_index,
484 stats_n_packets, stats_n_bytes);
486 stats_n_bytes = len0;
487 stats_sw_if_index = sw_if_index0;
490 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
492 gtpu_encap_trace_t *tr =
493 vlib_add_trace (vm, node, b0, sizeof (*tr));
494 tr->tunnel_index = t0 - gtm->tunnels;
497 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
498 to_next, n_left_to_next,
502 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
505 /* Do we still need this now that tunnel tx stats is kept? */
506 vlib_node_increment_counter (vm, node->node_index,
507 GTPU_ENCAP_ERROR_ENCAPSULATED,
510 /* Increment any remaining batch stats */
513 vlib_increment_combined_counter
514 (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
515 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
516 node->runtime_data[0] = stats_sw_if_index;
519 return from_frame->n_vectors;
523 gtpu4_encap (vlib_main_t * vm,
524 vlib_node_runtime_t * node,
525 vlib_frame_t * from_frame)
527 return gtpu_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
531 gtpu6_encap (vlib_main_t * vm,
532 vlib_node_runtime_t * node,
533 vlib_frame_t * from_frame)
535 return gtpu_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
538 VLIB_REGISTER_NODE (gtpu4_encap_node) = {
539 .function = gtpu4_encap,
540 .name = "gtpu4-encap",
541 .vector_size = sizeof (u32),
542 .format_trace = format_gtpu_encap_trace,
543 .type = VLIB_NODE_TYPE_INTERNAL,
544 .n_errors = ARRAY_LEN(gtpu_encap_error_strings),
545 .error_strings = gtpu_encap_error_strings,
546 .n_next_nodes = GTPU_ENCAP_N_NEXT,
548 [GTPU_ENCAP_NEXT_DROP] = "error-drop",
552 VLIB_NODE_FUNCTION_MULTIARCH (gtpu4_encap_node, gtpu4_encap)
554 VLIB_REGISTER_NODE (gtpu6_encap_node) = {
555 .function = gtpu6_encap,
556 .name = "gtpu6-encap",
557 .vector_size = sizeof (u32),
558 .format_trace = format_gtpu_encap_trace,
559 .type = VLIB_NODE_TYPE_INTERNAL,
560 .n_errors = ARRAY_LEN(gtpu_encap_error_strings),
561 .error_strings = gtpu_encap_error_strings,
562 .n_next_nodes = GTPU_ENCAP_N_NEXT,
564 [GTPU_ENCAP_NEXT_DROP] = "error-drop",
568 VLIB_NODE_FUNCTION_MULTIARCH (gtpu6_encap_node, gtpu6_encap)