gtpu: migrate old MULTIARCH macros to VLIB_NODE_FN
[vpp.git] / src / plugins / gtpu / gtpu_encap.c
1 /*
2  * Copyright (c) 2017 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <gtpu/gtpu.h>
21
22 /* Statistics (not all errors) */
23 #define foreach_gtpu_encap_error    \
24 _(ENCAPSULATED, "good packets encapsulated")
25
26 static char * gtpu_encap_error_strings[] = {
27 #define _(sym,string) string,
28   foreach_gtpu_encap_error
29 #undef _
30 };
31
32 typedef enum {
33 #define _(sym,str) GTPU_ENCAP_ERROR_##sym,
34     foreach_gtpu_encap_error
35 #undef _
36     GTPU_ENCAP_N_ERROR,
37 } gtpu_encap_error_t;
38
39 #define foreach_gtpu_encap_next        \
40 _(DROP, "error-drop")                  \
41 _(IP4_LOOKUP, "ip4-lookup")             \
42 _(IP6_LOOKUP, "ip6-lookup")
43
44 typedef enum {
45     GTPU_ENCAP_NEXT_DROP,
46     GTPU_ENCAP_NEXT_IP4_LOOKUP,
47     GTPU_ENCAP_NEXT_IP6_LOOKUP,
48     GTPU_ENCAP_N_NEXT,
49 } gtpu_encap_next_t;
50
51
52 #define foreach_fixed_header4_offset            \
53     _(0) _(1) _(2) _(3)
54
55 #define foreach_fixed_header6_offset            \
56     _(0) _(1) _(2) _(3) _(4) _(5) _(6)
57
58 always_inline uword
59 gtpu_encap_inline (vlib_main_t * vm,
60                     vlib_node_runtime_t * node,
61                     vlib_frame_t * from_frame,
62                     u32 is_ip4)
63 {
64   u32 n_left_from, next_index, * from, * to_next;
65   gtpu_main_t * gtm = &gtpu_main;
66   vnet_main_t * vnm = gtm->vnet_main;
67   vnet_interface_main_t * im = &vnm->interface_main;
68   u32 pkts_encapsulated = 0;
69   u16 old_l0 = 0, old_l1 = 0, old_l2 = 0, old_l3 = 0;
70   u32 thread_index = vlib_get_thread_index();
71   u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
72   u32 sw_if_index0 = 0, sw_if_index1 = 0, sw_if_index2 = 0, sw_if_index3 = 0;
73   u32 next0 = 0, next1 = 0, next2 = 0, next3 = 0;
74   vnet_hw_interface_t * hi0, * hi1, * hi2, * hi3;
75   gtpu_tunnel_t * t0 = NULL, * t1 = NULL, * t2 = NULL, * t3 = NULL;
76
77   from = vlib_frame_vector_args (from_frame);
78   n_left_from = from_frame->n_vectors;
79
80   next_index = node->cached_next_index;
81   stats_sw_if_index = node->runtime_data[0];
82   stats_n_packets = stats_n_bytes = 0;
83
84   while (n_left_from > 0)
85     {
86       u32 n_left_to_next;
87
88       vlib_get_next_frame (vm, node, next_index,
89                            to_next, n_left_to_next);
90
91       while (n_left_from >= 8 && n_left_to_next >= 4)
92         {
93           u32 bi0, bi1, bi2, bi3;
94           vlib_buffer_t * b0, * b1, * b2, * b3;
95           u32 flow_hash0, flow_hash1, flow_hash2, flow_hash3;
96           u32 len0, len1, len2, len3;
97           ip4_header_t * ip4_0, * ip4_1, * ip4_2, * ip4_3;
98           ip6_header_t * ip6_0, * ip6_1, * ip6_2, * ip6_3;
99           udp_header_t * udp0, * udp1, * udp2, * udp3;
100           gtpu_header_t * gtpu0, * gtpu1, * gtpu2, * gtpu3;
101           u64 * copy_src0, * copy_dst0;
102           u64 * copy_src1, * copy_dst1;
103           u64 * copy_src2, * copy_dst2;
104           u64 * copy_src3, * copy_dst3;
105           u32 * copy_src_last0, * copy_dst_last0;
106           u32 * copy_src_last1, * copy_dst_last1;
107           u32 * copy_src_last2, * copy_dst_last2;
108           u32 * copy_src_last3, * copy_dst_last3;
109           u16 new_l0, new_l1, new_l2, new_l3;
110           ip_csum_t sum0, sum1, sum2, sum3;
111
112           /* Prefetch next iteration. */
113           {
114             vlib_buffer_t * p4, * p5, * p6, * p7;
115
116             p4 = vlib_get_buffer (vm, from[4]);
117             p5 = vlib_get_buffer (vm, from[5]);
118             p6 = vlib_get_buffer (vm, from[6]);
119             p7 = vlib_get_buffer (vm, from[7]);
120
121             vlib_prefetch_buffer_header (p4, LOAD);
122             vlib_prefetch_buffer_header (p5, LOAD);
123             vlib_prefetch_buffer_header (p6, LOAD);
124             vlib_prefetch_buffer_header (p7, LOAD);
125
126             CLIB_PREFETCH (p4->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
127             CLIB_PREFETCH (p5->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
128             CLIB_PREFETCH (p6->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
129             CLIB_PREFETCH (p7->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
130           }
131
132           bi0 = from[0];
133           bi1 = from[1];
134           bi2 = from[2];
135           bi3 = from[3];
136           to_next[0] = bi0;
137           to_next[1] = bi1;
138           to_next[2] = bi2;
139           to_next[3] = bi3;
140           from += 4;
141           to_next += 4;
142           n_left_to_next -= 4;
143           n_left_from -= 4;
144
145           b0 = vlib_get_buffer (vm, bi0);
146           b1 = vlib_get_buffer (vm, bi1);
147           b2 = vlib_get_buffer (vm, bi2);
148           b3 = vlib_get_buffer (vm, bi3);
149
150           flow_hash0 = vnet_l2_compute_flow_hash (b0);
151           flow_hash1 = vnet_l2_compute_flow_hash (b1);
152           flow_hash2 = vnet_l2_compute_flow_hash (b2);
153           flow_hash3 = vnet_l2_compute_flow_hash (b3);
154
155           /* Get next node index and adj index from tunnel next_dpo */
156           sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
157           sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
158           sw_if_index2 = vnet_buffer(b2)->sw_if_index[VLIB_TX];
159           sw_if_index3 = vnet_buffer(b3)->sw_if_index[VLIB_TX];
160           hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
161           hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
162           hi2 = vnet_get_sup_hw_interface (vnm, sw_if_index2);
163           hi3 = vnet_get_sup_hw_interface (vnm, sw_if_index3);
164           t0 = &gtm->tunnels[hi0->dev_instance];
165           t1 = &gtm->tunnels[hi1->dev_instance];
166           t2 = &gtm->tunnels[hi2->dev_instance];
167           t3 = &gtm->tunnels[hi3->dev_instance];
168
169           /* Note: change to always set next0 if it may be set to drop */
170           next0 = t0->next_dpo.dpoi_next_node;
171           vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
172           next1 = t1->next_dpo.dpoi_next_node;
173           vnet_buffer(b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
174           next2 = t2->next_dpo.dpoi_next_node;
175           vnet_buffer(b2)->ip.adj_index[VLIB_TX] = t2->next_dpo.dpoi_index;
176           next3 = t3->next_dpo.dpoi_next_node;
177           vnet_buffer(b3)->ip.adj_index[VLIB_TX] = t3->next_dpo.dpoi_index;
178
179           /* Apply the rewrite string. $$$$ vnet_rewrite? */
180           vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
181           vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
182           vlib_buffer_advance (b2, -(word)_vec_len(t2->rewrite));
183           vlib_buffer_advance (b3, -(word)_vec_len(t3->rewrite));
184
185           if (is_ip4)
186             {
187               ip4_0 = vlib_buffer_get_current(b0);
188               ip4_1 = vlib_buffer_get_current(b1);
189               ip4_2 = vlib_buffer_get_current(b2);
190               ip4_3 = vlib_buffer_get_current(b3);
191
192               /* Copy the fixed header */
193               copy_dst0 = (u64 *) ip4_0;
194               copy_src0 = (u64 *) t0->rewrite;
195               copy_dst1 = (u64 *) ip4_1;
196               copy_src1 = (u64 *) t1->rewrite;
197               copy_dst2 = (u64 *) ip4_2;
198               copy_src2 = (u64 *) t2->rewrite;
199               copy_dst3 = (u64 *) ip4_3;
200               copy_src3 = (u64 *) t3->rewrite;
201
202               /* Copy first 32 octets 8-bytes at a time */
203 #define _(offs) copy_dst0[offs] = copy_src0[offs];
204               foreach_fixed_header4_offset;
205 #undef _
206 #define _(offs) copy_dst1[offs] = copy_src1[offs];
207               foreach_fixed_header4_offset;
208 #undef _
209 #define _(offs) copy_dst2[offs] = copy_src2[offs];
210               foreach_fixed_header4_offset;
211 #undef _
212 #define _(offs) copy_dst3[offs] = copy_src3[offs];
213               foreach_fixed_header4_offset;
214 #undef _
215               /* Last 4 octets. Hopefully gcc will be our friend */
216               copy_dst_last0 = (u32 *)(&copy_dst0[4]);
217               copy_src_last0 = (u32 *)(&copy_src0[4]);
218               copy_dst_last0[0] = copy_src_last0[0];
219               copy_dst_last1 = (u32 *)(&copy_dst1[4]);
220               copy_src_last1 = (u32 *)(&copy_src1[4]);
221               copy_dst_last1[0] = copy_src_last1[0];
222               copy_dst_last2 = (u32 *)(&copy_dst2[4]);
223               copy_src_last2 = (u32 *)(&copy_src2[4]);
224               copy_dst_last2[0] = copy_src_last2[0];
225               copy_dst_last3 = (u32 *)(&copy_dst3[4]);
226               copy_src_last3 = (u32 *)(&copy_src3[4]);
227               copy_dst_last3[0] = copy_src_last3[0];
228
229               /* Fix the IP4 checksum and length */
230               sum0 = ip4_0->checksum;
231               new_l0 = /* old_l0 always 0, see the rewrite setup */
232                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
233               sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
234                                      length /* changed member */);
235               ip4_0->checksum = ip_csum_fold (sum0);
236               ip4_0->length = new_l0;
237               sum1 = ip4_1->checksum;
238               new_l1 = /* old_l1 always 0, see the rewrite setup */
239                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
240               sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
241                                      length /* changed member */);
242               ip4_1->checksum = ip_csum_fold (sum1);
243               ip4_1->length = new_l1;
244               sum2 = ip4_2->checksum;
245               new_l2 = /* old_l0 always 0, see the rewrite setup */
246                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b2));
247               sum2 = ip_csum_update (sum2, old_l2, new_l2, ip4_header_t,
248                                      length /* changed member */);
249               ip4_2->checksum = ip_csum_fold (sum2);
250               ip4_2->length = new_l2;
251               sum3 = ip4_3->checksum;
252               new_l3 = /* old_l1 always 0, see the rewrite setup */
253                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b3));
254               sum3 = ip_csum_update (sum3, old_l3, new_l3, ip4_header_t,
255                                      length /* changed member */);
256               ip4_3->checksum = ip_csum_fold (sum3);
257               ip4_3->length = new_l3;
258
259               /* Fix UDP length and set source port */
260               udp0 = (udp_header_t *)(ip4_0+1);
261               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
262                                              - sizeof (*ip4_0));
263               udp0->length = new_l0;
264               udp0->src_port = flow_hash0;
265               udp1 = (udp_header_t *)(ip4_1+1);
266               new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
267                                              - sizeof (*ip4_1));
268               udp1->length = new_l1;
269               udp1->src_port = flow_hash1;
270               udp2 = (udp_header_t *)(ip4_2+1);
271               new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
272                                              - sizeof (*ip4_2));
273               udp2->length = new_l2;
274               udp2->src_port = flow_hash2;
275               udp3 = (udp_header_t *)(ip4_3+1);
276               new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
277                                              - sizeof (*ip4_3));
278               udp3->length = new_l3;
279               udp3->src_port = flow_hash3;
280
281               /* Fix GTPU length */
282               gtpu0 = (gtpu_header_t *)(udp0+1);
283               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
284                                              - sizeof (*ip4_0) - sizeof(*udp0)
285                                              - GTPU_V1_HDR_LEN);
286               gtpu0->length = new_l0;
287               gtpu1 = (gtpu_header_t *)(udp1+1);
288               new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
289                                              - sizeof (*ip4_1) - sizeof(*udp1)
290                                              - GTPU_V1_HDR_LEN);
291               gtpu1->length = new_l1;
292               gtpu2 = (gtpu_header_t *)(udp2+1);
293               new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
294                                              - sizeof (*ip4_2) - sizeof(*udp2)
295                                              - GTPU_V1_HDR_LEN);
296               gtpu2->length = new_l2;
297               gtpu3 = (gtpu_header_t *)(udp3+1);
298               new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
299                                              - sizeof (*ip4_3) - sizeof(*udp3)
300                                              - GTPU_V1_HDR_LEN);
301               gtpu3->length = new_l3;
302             }
303           else /* ipv6 */
304             {
305               int bogus = 0;
306
307               ip6_0 = vlib_buffer_get_current(b0);
308               ip6_1 = vlib_buffer_get_current(b1);
309               ip6_2 = vlib_buffer_get_current(b2);
310               ip6_3 = vlib_buffer_get_current(b3);
311
312               /* Copy the fixed header */
313               copy_dst0 = (u64 *) ip6_0;
314               copy_src0 = (u64 *) t0->rewrite;
315               copy_dst1 = (u64 *) ip6_1;
316               copy_src1 = (u64 *) t1->rewrite;
317               copy_dst2 = (u64 *) ip6_2;
318               copy_src2 = (u64 *) t2->rewrite;
319               copy_dst3 = (u64 *) ip6_3;
320               copy_src3 = (u64 *) t3->rewrite;
321               /* Copy first 56 (ip6) octets 8-bytes at a time */
322 #define _(offs) copy_dst0[offs] = copy_src0[offs];
323               foreach_fixed_header6_offset;
324 #undef _
325 #define _(offs) copy_dst1[offs] = copy_src1[offs];
326               foreach_fixed_header6_offset;
327 #undef _
328 #define _(offs) copy_dst2[offs] = copy_src2[offs];
329               foreach_fixed_header6_offset;
330 #undef _
331 #define _(offs) copy_dst3[offs] = copy_src3[offs];
332               foreach_fixed_header6_offset;
333 #undef _
334               /* Fix IP6 payload length */
335               new_l0 =
336                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
337                                       - sizeof(*ip6_0));
338               ip6_0->payload_length = new_l0;
339               new_l1 =
340                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
341                                       - sizeof(*ip6_1));
342               ip6_1->payload_length = new_l1;
343               new_l2 =
344                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b2)
345                                       - sizeof(*ip6_2));
346               ip6_2->payload_length = new_l2;
347               new_l3 =
348                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b3)
349                                       - sizeof(*ip6_3));
350               ip6_3->payload_length = new_l3;
351
352               /* Fix UDP length  and set source port */
353               udp0 = (udp_header_t *)(ip6_0+1);
354               udp0->length = new_l0;
355               udp0->src_port = flow_hash0;
356               udp1 = (udp_header_t *)(ip6_1+1);
357               udp1->length = new_l1;
358               udp1->src_port = flow_hash1;
359               udp2 = (udp_header_t *)(ip6_2+1);
360               udp2->length = new_l2;
361               udp2->src_port = flow_hash2;
362               udp3 = (udp_header_t *)(ip6_3+1);
363               udp3->length = new_l3;
364               udp3->src_port = flow_hash3;
365
366               /* Fix GTPU length */
367               gtpu0 = (gtpu_header_t *)(udp0+1);
368               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
369                                              - sizeof (*ip6_0) - sizeof(*udp0)
370                                              - GTPU_V1_HDR_LEN);
371               gtpu0->length = new_l0;
372               gtpu1 = (gtpu_header_t *)(udp1+1);
373               new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
374                                              - sizeof (*ip6_1) - sizeof(*udp1)
375                                              - GTPU_V1_HDR_LEN);
376               gtpu1->length = new_l1;
377               gtpu2 = (gtpu_header_t *)(udp2+1);
378               new_l2 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b2)
379                                              - sizeof (*ip6_2) - sizeof(*udp2)
380                                              - GTPU_V1_HDR_LEN);
381               gtpu2->length = new_l2;
382               gtpu3 = (gtpu_header_t *)(udp3+1);
383               new_l3 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b3)
384                                              - sizeof (*ip6_3) - sizeof(*udp3)
385                                              - GTPU_V1_HDR_LEN);
386               gtpu3->length = new_l3;
387
388               /* IPv6 UDP checksum is mandatory */
389               udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
390                                                                  ip6_0, &bogus);
391               if (udp0->checksum == 0)
392                 udp0->checksum = 0xffff;
393               udp1->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b1,
394                                                                  ip6_1, &bogus);
395               if (udp1->checksum == 0)
396                 udp1->checksum = 0xffff;
397               udp2->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b2,
398                                                                  ip6_2, &bogus);
399               if (udp2->checksum == 0)
400                 udp2->checksum = 0xffff;
401               udp3->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b3,
402                                                                  ip6_3, &bogus);
403               if (udp3->checksum == 0)
404                 udp3->checksum = 0xffff;
405
406             }
407
408           pkts_encapsulated += 4;
409           len0 = vlib_buffer_length_in_chain (vm, b0);
410           len1 = vlib_buffer_length_in_chain (vm, b1);
411           len2 = vlib_buffer_length_in_chain (vm, b2);
412           len3 = vlib_buffer_length_in_chain (vm, b3);
413           stats_n_packets += 4;
414           stats_n_bytes += len0 + len1 + len2 + len3;
415
416           /* Batch stats increment on the same gtpu tunnel so counter is not
417              incremented per packet. Note stats are still incremented for deleted
418              and admin-down tunnel where packets are dropped. It is not worthwhile
419              to check for this rare case and affect normal path performance. */
420           if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
421                              (sw_if_index1 != stats_sw_if_index) ||
422                              (sw_if_index2 != stats_sw_if_index) ||
423                              (sw_if_index3 != stats_sw_if_index) ))
424             {
425               stats_n_packets -= 4;
426               stats_n_bytes -= len0 + len1 + len2 + len3;
427               if ( (sw_if_index0 == sw_if_index1 ) &&
428                    (sw_if_index1 == sw_if_index2 ) &&
429                    (sw_if_index2 == sw_if_index3 ) )
430                 {
431                   if (stats_n_packets)
432                     vlib_increment_combined_counter
433                       (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
434                        thread_index, stats_sw_if_index,
435                        stats_n_packets, stats_n_bytes);
436                   stats_sw_if_index = sw_if_index0;
437                   stats_n_packets = 4;
438                   stats_n_bytes = len0 + len1 + len2 + len3;
439                 }
440               else
441                 {
442                   vlib_increment_combined_counter
443                       (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
444                        thread_index, sw_if_index0, 1, len0);
445                   vlib_increment_combined_counter
446                       (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
447                        thread_index, sw_if_index1, 1, len1);
448                   vlib_increment_combined_counter
449                       (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
450                        thread_index, sw_if_index2, 1, len2);
451                   vlib_increment_combined_counter
452                       (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
453                        thread_index, sw_if_index3, 1, len3);
454                 }
455             }
456
457           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
458             {
459               gtpu_encap_trace_t *tr =
460                 vlib_add_trace (vm, node, b0, sizeof (*tr));
461               tr->tunnel_index = t0 - gtm->tunnels;
462               tr->teid = t0->teid;
463            }
464
465           if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
466             {
467               gtpu_encap_trace_t *tr =
468                 vlib_add_trace (vm, node, b1, sizeof (*tr));
469               tr->tunnel_index = t1 - gtm->tunnels;
470               tr->teid = t1->teid;
471             }
472
473           vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
474                                            to_next, n_left_to_next,
475                                            bi0, bi1, bi2, bi3,
476                                            next0, next1, next2, next3);
477         }
478
479       while (n_left_from > 0 && n_left_to_next > 0)
480         {
481           u32 bi0;
482           vlib_buffer_t * b0;
483           u32 flow_hash0;
484           u32 len0;
485           ip4_header_t * ip4_0;
486           ip6_header_t * ip6_0;
487           udp_header_t * udp0;
488           gtpu_header_t * gtpu0;
489           u64 * copy_src0, * copy_dst0;
490           u32 * copy_src_last0, * copy_dst_last0;
491           u16 new_l0;
492           ip_csum_t sum0;
493
494           bi0 = from[0];
495           to_next[0] = bi0;
496           from += 1;
497           to_next += 1;
498           n_left_from -= 1;
499           n_left_to_next -= 1;
500
501           b0 = vlib_get_buffer (vm, bi0);
502
503           flow_hash0 = vnet_l2_compute_flow_hash(b0);
504
505           /* Get next node index and adj index from tunnel next_dpo */
506           sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
507           hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
508           t0 = &gtm->tunnels[hi0->dev_instance];
509           /* Note: change to always set next0 if it may be set to drop */
510           next0 = t0->next_dpo.dpoi_next_node;
511           vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
512
513           /* Apply the rewrite string. $$$$ vnet_rewrite? */
514           vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
515
516           if (is_ip4)
517             {
518               ip4_0 = vlib_buffer_get_current(b0);
519
520               /* Copy the fixed header */
521               copy_dst0 = (u64 *) ip4_0;
522               copy_src0 = (u64 *) t0->rewrite;
523               /* Copy first 32 octets 8-bytes at a time */
524 #define _(offs) copy_dst0[offs] = copy_src0[offs];
525               foreach_fixed_header4_offset;
526 #undef _
527               /* Last 4 octets. Hopefully gcc will be our friend */
528               copy_dst_last0 = (u32 *)(&copy_dst0[4]);
529               copy_src_last0 = (u32 *)(&copy_src0[4]);
530               copy_dst_last0[0] = copy_src_last0[0];
531
532               /* Fix the IP4 checksum and length */
533               sum0 = ip4_0->checksum;
534               new_l0 = /* old_l0 always 0, see the rewrite setup */
535                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
536               sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
537                                      length /* changed member */);
538               ip4_0->checksum = ip_csum_fold (sum0);
539               ip4_0->length = new_l0;
540
541               /* Fix UDP length and set source port */
542               udp0 = (udp_header_t *)(ip4_0+1);
543               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
544                                              - sizeof (*ip4_0));
545               udp0->length = new_l0;
546               udp0->src_port = flow_hash0;
547
548               /* Fix GTPU length */
549               gtpu0 = (gtpu_header_t *)(udp0+1);
550               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
551                                              - sizeof (*ip4_0) - sizeof(*udp0)
552                                              - GTPU_V1_HDR_LEN);
553               gtpu0->length = new_l0;
554             }
555
556           else /* ip6 path */
557             {
558               int bogus = 0;
559
560               ip6_0 = vlib_buffer_get_current(b0);
561               /* Copy the fixed header */
562               copy_dst0 = (u64 *) ip6_0;
563               copy_src0 = (u64 *) t0->rewrite;
564               /* Copy first 56 (ip6) octets 8-bytes at a time */
565 #define _(offs) copy_dst0[offs] = copy_src0[offs];
566               foreach_fixed_header6_offset;
567 #undef _
568               /* Fix IP6 payload length */
569               new_l0 =
570                 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
571                                       - sizeof(*ip6_0));
572               ip6_0->payload_length = new_l0;
573
574               /* Fix UDP length  and set source port */
575               udp0 = (udp_header_t *)(ip6_0+1);
576               udp0->length = new_l0;
577               udp0->src_port = flow_hash0;
578
579               /* Fix GTPU length */
580               gtpu0 = (gtpu_header_t *)(udp0+1);
581               new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
582                                              - sizeof (*ip4_0) - sizeof(*udp0)
583                                              - GTPU_V1_HDR_LEN);
584               gtpu0->length = new_l0;
585
586               /* IPv6 UDP checksum is mandatory */
587               udp0->checksum = ip6_tcp_udp_icmp_compute_checksum(vm, b0,
588                                                                  ip6_0, &bogus);
589               if (udp0->checksum == 0)
590                 udp0->checksum = 0xffff;
591             }
592
593           pkts_encapsulated ++;
594           len0 = vlib_buffer_length_in_chain (vm, b0);
595           stats_n_packets += 1;
596           stats_n_bytes += len0;
597
598           /* Batch stats increment on the same gtpu tunnel so counter is not
599              incremented per packet. Note stats are still incremented for deleted
600              and admin-down tunnel where packets are dropped. It is not worthwhile
601              to check for this rare case and affect normal path performance. */
602           if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
603             {
604               stats_n_packets -= 1;
605               stats_n_bytes -= len0;
606               if (stats_n_packets)
607                 vlib_increment_combined_counter
608                   (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
609                    thread_index, stats_sw_if_index,
610                    stats_n_packets, stats_n_bytes);
611               stats_n_packets = 1;
612               stats_n_bytes = len0;
613               stats_sw_if_index = sw_if_index0;
614             }
615
616           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
617             {
618               gtpu_encap_trace_t *tr =
619                 vlib_add_trace (vm, node, b0, sizeof (*tr));
620               tr->tunnel_index = t0 - gtm->tunnels;
621               tr->teid = t0->teid;
622             }
623           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
624                                            to_next, n_left_to_next,
625                                            bi0, next0);
626         }
627
628       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
629     }
630
631   /* Do we still need this now that tunnel tx stats is kept? */
632   vlib_node_increment_counter (vm, node->node_index,
633                                GTPU_ENCAP_ERROR_ENCAPSULATED,
634                                pkts_encapsulated);
635
636   /* Increment any remaining batch stats */
637   if (stats_n_packets)
638     {
639       vlib_increment_combined_counter
640         (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
641          thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
642       node->runtime_data[0] = stats_sw_if_index;
643     }
644
645   return from_frame->n_vectors;
646 }
647
648 VLIB_NODE_FN (gtpu4_encap_node) (vlib_main_t * vm,
649               vlib_node_runtime_t * node,
650               vlib_frame_t * from_frame)
651 {
652   return gtpu_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
653 }
654
655 VLIB_NODE_FN (gtpu6_encap_node) (vlib_main_t * vm,
656               vlib_node_runtime_t * node,
657               vlib_frame_t * from_frame)
658 {
659   return gtpu_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
660 }
661
662 VLIB_REGISTER_NODE (gtpu4_encap_node) = {
663   .name = "gtpu4-encap",
664   .vector_size = sizeof (u32),
665   .format_trace = format_gtpu_encap_trace,
666   .type = VLIB_NODE_TYPE_INTERNAL,
667   .n_errors = ARRAY_LEN(gtpu_encap_error_strings),
668   .error_strings = gtpu_encap_error_strings,
669   .n_next_nodes = GTPU_ENCAP_N_NEXT,
670   .next_nodes = {
671 #define _(s,n) [GTPU_ENCAP_NEXT_##s] = n,
672     foreach_gtpu_encap_next
673 #undef _
674   },
675 };
676
677 VLIB_REGISTER_NODE (gtpu6_encap_node) = {
678   .name = "gtpu6-encap",
679   .vector_size = sizeof (u32),
680   .format_trace = format_gtpu_encap_trace,
681   .type = VLIB_NODE_TYPE_INTERNAL,
682   .n_errors = ARRAY_LEN(gtpu_encap_error_strings),
683   .error_strings = gtpu_encap_error_strings,
684   .n_next_nodes = GTPU_ENCAP_N_NEXT,
685   .next_nodes = {
686 #define _(s,n) [GTPU_ENCAP_NEXT_##s] = n,
687     foreach_gtpu_encap_next
688 #undef _
689   },
690 };
691