Revert "Add support for MAP-T CE (VPP-1058)"
[vpp.git] / src / plugins / map / ip6_map_t.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip4_to_ip6.h>
20
21 #define IP6_MAP_T_DUAL_LOOP
22
23 typedef enum
24 {
25   IP6_MAPT_NEXT_MAPT_TCP_UDP,
26   IP6_MAPT_NEXT_MAPT_ICMP,
27   IP6_MAPT_NEXT_MAPT_FRAGMENTED,
28   IP6_MAPT_NEXT_DROP,
29   IP6_MAPT_N_NEXT
30 } ip6_mapt_next_t;
31
32 typedef enum
33 {
34   IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
35   IP6_MAPT_ICMP_NEXT_IP4_FRAG,
36   IP6_MAPT_ICMP_NEXT_DROP,
37   IP6_MAPT_ICMP_N_NEXT
38 } ip6_mapt_icmp_next_t;
39
40 typedef enum
41 {
42   IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
43   IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
44   IP6_MAPT_TCP_UDP_NEXT_DROP,
45   IP6_MAPT_TCP_UDP_N_NEXT
46 } ip6_mapt_tcp_udp_next_t;
47
48 typedef enum
49 {
50   IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
51   IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
52   IP6_MAPT_FRAGMENTED_NEXT_DROP,
53   IP6_MAPT_FRAGMENTED_N_NEXT
54 } ip6_mapt_fragmented_next_t;
55
56 static_always_inline int
57 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
58                         map_domain_t * d, u16 port)
59 {
60   u32 *ignore = NULL;
61   map_ip4_reass_lock ();
62   map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
63                                                        d->flags),
64                                           ip6_map_t_embedded_address (d,
65                                                                       &ip6->
66                                                                       dst_address),
67                                           frag_id_6to4 (frag->identification),
68                                           (ip6->protocol ==
69                                            IP_PROTOCOL_ICMP6) ?
70                                           IP_PROTOCOL_ICMP : ip6->protocol,
71                                           &ignore);
72   if (r)
73     r->port = port;
74
75   map_ip4_reass_unlock ();
76   return !r;
77 }
78
79 /* Returns the associated port or -1 */
80 static_always_inline i32
81 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
82                       map_domain_t * d)
83 {
84   u32 *ignore = NULL;
85   map_ip4_reass_lock ();
86   map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
87                                                        d->flags),
88                                           ip6_map_t_embedded_address (d,
89                                                                       &ip6->
90                                                                       dst_address),
91                                           frag_id_6to4 (frag->identification),
92                                           (ip6->protocol ==
93                                            IP_PROTOCOL_ICMP6) ?
94                                           IP_PROTOCOL_ICMP : ip6->protocol,
95                                           &ignore);
96   i32 ret = r ? r->port : -1;
97   map_ip4_reass_unlock ();
98   return ret;
99 }
100
101 typedef struct
102 {
103   map_domain_t *d;
104   u16 sender_port;
105 } icmp6_to_icmp_ctx_t;
106
107 static int
108 ip6_to_ip4_set_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
109 {
110   icmp6_to_icmp_ctx_t *ctx = arg;
111   u32 ip4_sadr;
112
113   //Security check
114   //Note that this prevents an intermediate IPv6 router from answering the request
115   ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
116   if (ip6->src_address.as_u64[0] !=
117       map_get_pfx_net (ctx->d, ip4_sadr, ctx->sender_port)
118       || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
119                                                         ctx->sender_port))
120     return -1;
121
122   ip4->dst_address.as_u32 =
123     ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
124   ip4->src_address.as_u32 = ip4_sadr;
125
126   return 0;
127 }
128
129 static int
130 ip6_to_ip4_set_inner_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4,
131                               void *arg)
132 {
133   icmp6_to_icmp_ctx_t *ctx = arg;
134   u32 inner_ip4_dadr;
135
136   //Security check of inner packet
137   inner_ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
138   if (ip6->dst_address.as_u64[0] !=
139       map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->sender_port)
140       || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
141                                                         inner_ip4_dadr,
142                                                         ctx->sender_port))
143     return -1;
144
145   ip4->dst_address.as_u32 = inner_ip4_dadr;
146   ip4->src_address.as_u32 =
147     ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
148
149   return 0;
150 }
151
152 static uword
153 ip6_map_t_icmp (vlib_main_t * vm,
154                 vlib_node_runtime_t * node, vlib_frame_t * frame)
155 {
156   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
157   vlib_node_runtime_t *error_node =
158     vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
159   from = vlib_frame_vector_args (frame);
160   n_left_from = frame->n_vectors;
161   next_index = node->cached_next_index;
162   vlib_combined_counter_main_t *cm = map_main.domain_counters;
163   u32 thread_index = vm->thread_index;
164
165   while (n_left_from > 0)
166     {
167       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
168
169       while (n_left_from > 0 && n_left_to_next > 0)
170         {
171           u32 pi0;
172           vlib_buffer_t *p0;
173           u8 error0;
174           ip6_mapt_icmp_next_t next0;
175           map_domain_t *d0;
176           u16 len0;
177           icmp6_to_icmp_ctx_t ctx0;
178           ip6_header_t *ip60;
179
180           pi0 = to_next[0] = from[0];
181           from += 1;
182           n_left_from -= 1;
183           to_next += 1;
184           n_left_to_next -= 1;
185           error0 = MAP_ERROR_NONE;
186           next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
187
188           p0 = vlib_get_buffer (vm, pi0);
189           ip60 = vlib_buffer_get_current (p0);
190           len0 = clib_net_to_host_u16 (ip60->payload_length);
191           d0 =
192             pool_elt_at_index (map_main.domains,
193                                vnet_buffer (p0)->map_t.map_domain_index);
194           ctx0.sender_port = ip6_get_port (ip60, 0, p0->current_length);
195           ctx0.d = d0;
196           if (ctx0.sender_port == 0)
197             {
198               // In case of 1:1 mapping, we don't care about the port
199               if (!(d0->ea_bits_len == 0 && d0->rules))
200                 {
201                   error0 = MAP_ERROR_ICMP;
202                   goto err0;
203                 }
204             }
205
206           if (icmp6_to_icmp
207               (p0, ip6_to_ip4_set_icmp_cb, &ctx0,
208                ip6_to_ip4_set_inner_icmp_cb, &ctx0))
209             {
210               error0 = MAP_ERROR_ICMP;
211               goto err0;
212             }
213
214           if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
215             {
216               //Send to fragmentation node if necessary
217               vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
218               vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
219               next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
220             }
221         err0:
222           if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
223             {
224               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
225                                                thread_index,
226                                                vnet_buffer (p0)->
227                                                map_t.map_domain_index, 1,
228                                                len0);
229             }
230           else
231             {
232               next0 = IP6_MAPT_ICMP_NEXT_DROP;
233             }
234
235           p0->error = error_node->errors[error0];
236           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
237                                            to_next, n_left_to_next, pi0,
238                                            next0);
239         }
240       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
241     }
242   return frame->n_vectors;
243 }
244
245 static int
246 ip6_to_ip4_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *ctx)
247 {
248   vlib_buffer_t *p = ctx;
249
250   ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
251   ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
252
253   return 0;
254 }
255
256 static uword
257 ip6_map_t_fragmented (vlib_main_t * vm,
258                       vlib_node_runtime_t * node, vlib_frame_t * frame)
259 {
260   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
261   from = vlib_frame_vector_args (frame);
262   n_left_from = frame->n_vectors;
263   next_index = node->cached_next_index;
264   vlib_node_runtime_t *error_node =
265     vlib_node_get_runtime (vm, ip6_map_t_fragmented_node.index);
266
267   while (n_left_from > 0)
268     {
269       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
270
271 #ifdef IP6_MAP_T_DUAL_LOOP
272       while (n_left_from >= 4 && n_left_to_next >= 2)
273         {
274           u32 pi0, pi1;
275           vlib_buffer_t *p0, *p1;
276           u32 next0, next1;
277
278           pi0 = to_next[0] = from[0];
279           pi1 = to_next[1] = from[1];
280           from += 2;
281           n_left_from -= 2;
282           to_next += 2;
283           n_left_to_next -= 2;
284
285           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
286           next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
287           p0 = vlib_get_buffer (vm, pi0);
288           p1 = vlib_get_buffer (vm, pi1);
289
290           if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
291             {
292               p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
293               next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
294             }
295           else
296             {
297               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
298                 {
299                   //Send to fragmentation node if necessary
300                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
301                   vnet_buffer (p0)->ip_frag.next_index =
302                     IP4_FRAG_NEXT_IP4_LOOKUP;
303                   next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
304                 }
305             }
306
307           if (ip6_to_ip4_fragmented (p1, ip6_to_ip4_set_cb, p1))
308             {
309               p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
310               next1 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
311             }
312           else
313             {
314               if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
315                 {
316                   //Send to fragmentation node if necessary
317                   vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
318                   vnet_buffer (p1)->ip_frag.next_index =
319                     IP4_FRAG_NEXT_IP4_LOOKUP;
320                   next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
321                 }
322             }
323
324           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
325                                            to_next, n_left_to_next, pi0, pi1,
326                                            next0, next1);
327         }
328 #endif
329
330       while (n_left_from > 0 && n_left_to_next > 0)
331         {
332           u32 pi0;
333           vlib_buffer_t *p0;
334           u32 next0;
335
336           pi0 = to_next[0] = from[0];
337           from += 1;
338           n_left_from -= 1;
339           to_next += 1;
340           n_left_to_next -= 1;
341
342           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
343           p0 = vlib_get_buffer (vm, pi0);
344
345           if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
346             {
347               p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
348               next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
349             }
350           else
351             {
352               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
353                 {
354                   //Send to fragmentation node if necessary
355                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
356                   vnet_buffer (p0)->ip_frag.next_index =
357                     IP4_FRAG_NEXT_IP4_LOOKUP;
358                   next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
359                 }
360             }
361
362           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
363                                            to_next, n_left_to_next, pi0,
364                                            next0);
365         }
366       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
367     }
368   return frame->n_vectors;
369 }
370
371 static uword
372 ip6_map_t_tcp_udp (vlib_main_t * vm,
373                    vlib_node_runtime_t * node, vlib_frame_t * frame)
374 {
375   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
376   vlib_node_runtime_t *error_node =
377     vlib_node_get_runtime (vm, ip6_map_t_tcp_udp_node.index);
378
379   from = vlib_frame_vector_args (frame);
380   n_left_from = frame->n_vectors;
381   next_index = node->cached_next_index;
382   while (n_left_from > 0)
383     {
384       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
385
386 #ifdef IP6_MAP_T_DUAL_LOOP
387       while (n_left_from >= 4 && n_left_to_next >= 2)
388         {
389           u32 pi0, pi1;
390           vlib_buffer_t *p0, *p1;
391           ip6_mapt_tcp_udp_next_t next0, next1;
392
393           pi0 = to_next[0] = from[0];
394           pi1 = to_next[1] = from[1];
395           from += 2;
396           n_left_from -= 2;
397           to_next += 2;
398           n_left_to_next -= 2;
399           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
400           next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
401
402           p0 = vlib_get_buffer (vm, pi0);
403           p1 = vlib_get_buffer (vm, pi1);
404
405           if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
406             {
407               p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
408               next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
409             }
410           else
411             {
412               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
413                 {
414                   //Send to fragmentation node if necessary
415                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
416                   vnet_buffer (p0)->ip_frag.next_index =
417                     IP4_FRAG_NEXT_IP4_LOOKUP;
418                   next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
419                 }
420             }
421
422           if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1))
423             {
424               p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
425               next1 = IP6_MAPT_TCP_UDP_NEXT_DROP;
426             }
427           else
428             {
429               if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
430                 {
431                   //Send to fragmentation node if necessary
432                   vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
433                   vnet_buffer (p1)->ip_frag.next_index =
434                     IP4_FRAG_NEXT_IP4_LOOKUP;
435                   next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
436                 }
437             }
438
439           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
440                                            n_left_to_next, pi0, pi1, next0,
441                                            next1);
442         }
443 #endif
444
445       while (n_left_from > 0 && n_left_to_next > 0)
446         {
447           u32 pi0;
448           vlib_buffer_t *p0;
449           ip6_mapt_tcp_udp_next_t next0;
450
451           pi0 = to_next[0] = from[0];
452           from += 1;
453           n_left_from -= 1;
454           to_next += 1;
455           n_left_to_next -= 1;
456           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
457
458           p0 = vlib_get_buffer (vm, pi0);
459
460           if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
461             {
462               p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
463               next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
464             }
465           else
466             {
467               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
468                 {
469                   //Send to fragmentation node if necessary
470                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
471                   vnet_buffer (p0)->ip_frag.next_index =
472                     IP4_FRAG_NEXT_IP4_LOOKUP;
473                   next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
474                 }
475             }
476
477           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
478                                            to_next, n_left_to_next, pi0,
479                                            next0);
480         }
481       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
482     }
483   return frame->n_vectors;
484 }
485
486 static_always_inline void
487 ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60,
488                     map_domain_t * d0, i32 * src_port0,
489                     u8 * error0, ip6_mapt_next_t * next0,
490                     u32 l4_len0, ip6_frag_hdr_t * frag0)
491 {
492   if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
493                      ip6_frag_hdr_offset (frag0)))
494     {
495       *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
496       if (d0->ea_bits_len == 0 && d0->rules)
497         {
498           *src_port0 = 0;
499         }
500       else
501         {
502           *src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
503           *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
504         }
505     }
506   else
507     if (PREDICT_TRUE
508         (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
509     {
510       *error0 =
511         l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
512       vnet_buffer (p0)->map_t.checksum_offset =
513         vnet_buffer (p0)->map_t.v6.l4_offset + 16;
514       *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
515       *src_port0 =
516         (i32) *
517         ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
518     }
519   else
520     if (PREDICT_TRUE
521         (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
522     {
523       *error0 =
524         l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
525       vnet_buffer (p0)->map_t.checksum_offset =
526         vnet_buffer (p0)->map_t.v6.l4_offset + 6;
527       *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
528       *src_port0 =
529         (i32) *
530         ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
531     }
532   else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
533     {
534       *error0 =
535         l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
536       *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
537       if (d0->ea_bits_len == 0 && d0->rules)
538         {
539           *src_port0 = 0;
540         }
541       else
542         if (((icmp46_header_t *)
543              u8_ptr_add (ip60,
544                          vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
545             ICMP6_echo_reply
546             || ((icmp46_header_t *)
547                 u8_ptr_add (ip60,
548                             vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
549             ICMP6_echo_request)
550         {
551           *src_port0 =
552             (i32) *
553             ((u16 *)
554              u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
555         }
556     }
557   else
558     {
559       //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
560       *error0 = MAP_ERROR_BAD_PROTOCOL;
561     }
562 }
563
564 static uword
565 ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
566 {
567   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
568   vlib_node_runtime_t *error_node =
569     vlib_node_get_runtime (vm, ip6_map_t_node.index);
570   vlib_combined_counter_main_t *cm = map_main.domain_counters;
571   u32 thread_index = vm->thread_index;
572
573   from = vlib_frame_vector_args (frame);
574   n_left_from = frame->n_vectors;
575   next_index = node->cached_next_index;
576   while (n_left_from > 0)
577     {
578       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
579
580 #ifdef IP6_MAP_T_DUAL_LOOP
581       while (n_left_from >= 4 && n_left_to_next >= 2)
582         {
583           u32 pi0, pi1;
584           vlib_buffer_t *p0, *p1;
585           ip6_header_t *ip60, *ip61;
586           u8 error0, error1;
587           ip6_mapt_next_t next0, next1;
588           u32 l4_len0, l4_len1;
589           i32 src_port0, src_port1;
590           map_domain_t *d0, *d1;
591           ip6_frag_hdr_t *frag0, *frag1;
592           u32 saddr0, saddr1;
593           next0 = next1 = 0;    //Because compiler whines
594
595           pi0 = to_next[0] = from[0];
596           pi1 = to_next[1] = from[1];
597           from += 2;
598           n_left_from -= 2;
599           to_next += 2;
600           n_left_to_next -= 2;
601
602           error0 = MAP_ERROR_NONE;
603           error1 = MAP_ERROR_NONE;
604
605           p0 = vlib_get_buffer (vm, pi0);
606           p1 = vlib_get_buffer (vm, pi1);
607           ip60 = vlib_buffer_get_current (p0);
608           ip61 = vlib_buffer_get_current (p1);
609
610           saddr0 = 0;           /* TODO */
611           saddr1 = 0;           /* TODO */
612           /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
613
614           d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
615                                    (ip4_address_t *) & saddr0,
616                                    &vnet_buffer (p0)->map_t.map_domain_index,
617                                    &error0);
618           d1 =
619             ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
620                                 (ip4_address_t *) & saddr1,
621                                 &vnet_buffer (p1)->map_t.map_domain_index,
622                                 &error1);
623
624           saddr0 = map_get_ip4 (&ip60->src_address, d0->flags);
625           saddr1 = map_get_ip4 (&ip61->src_address, d1->flags);
626
627           vnet_buffer (p0)->map_t.v6.saddr = saddr0;
628           vnet_buffer (p1)->map_t.v6.saddr = saddr1;
629           vnet_buffer (p0)->map_t.v6.daddr =
630             ip6_map_t_embedded_address (d0, &ip60->dst_address);
631           vnet_buffer (p1)->map_t.v6.daddr =
632             ip6_map_t_embedded_address (d1, &ip61->dst_address);
633           vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
634           vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
635
636           if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
637                                         &(vnet_buffer (p0)->map_t.
638                                           v6.l4_protocol),
639                                         &(vnet_buffer (p0)->map_t.
640                                           v6.l4_offset),
641                                         &(vnet_buffer (p0)->map_t.
642                                           v6.frag_offset))))
643             {
644               error0 = MAP_ERROR_MALFORMED;
645               next0 = IP6_MAPT_NEXT_DROP;
646             }
647
648           if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
649                                         &(vnet_buffer (p1)->map_t.
650                                           v6.l4_protocol),
651                                         &(vnet_buffer (p1)->map_t.
652                                           v6.l4_offset),
653                                         &(vnet_buffer (p1)->map_t.
654                                           v6.frag_offset))))
655             {
656               error1 = MAP_ERROR_MALFORMED;
657               next1 = IP6_MAPT_NEXT_DROP;
658             }
659
660           src_port0 = src_port1 = -1;
661           l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
662             sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
663           l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
664             sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
665           frag0 =
666             (ip6_frag_hdr_t *) u8_ptr_add (ip60,
667                                            vnet_buffer (p0)->map_t.
668                                            v6.frag_offset);
669           frag1 =
670             (ip6_frag_hdr_t *) u8_ptr_add (ip61,
671                                            vnet_buffer (p1)->map_t.
672                                            v6.frag_offset);
673
674           ip6_map_t_classify (p0, ip60, d0, &src_port0, &error0, &next0,
675                               l4_len0, frag0);
676           ip6_map_t_classify (p1, ip61, d1, &src_port1, &error1, &next1,
677                               l4_len1, frag1);
678
679           if (PREDICT_FALSE
680               ((src_port0 != -1)
681                && (ip60->src_address.as_u64[0] !=
682                    map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
683                                     src_port0)
684                    || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
685                                                                       vnet_buffer
686                                                                       (p0)->map_t.v6.saddr,
687                                                                       src_port0))))
688             {
689               error0 = MAP_ERROR_SEC_CHECK;
690             }
691
692           if (PREDICT_FALSE
693               ((src_port1 != -1)
694                && (ip61->src_address.as_u64[0] !=
695                    map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
696                                     src_port1)
697                    || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
698                                                                       vnet_buffer
699                                                                       (p1)->map_t.v6.saddr,
700                                                                       src_port1))))
701             {
702               error1 = MAP_ERROR_SEC_CHECK;
703             }
704
705           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
706                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
707                                                    u8_ptr_add (ip60,
708                                                                vnet_buffer
709                                                                (p0)->map_t.
710                                                                v6.frag_offset)))
711               && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
712               && (error0 == MAP_ERROR_NONE))
713             {
714               ip6_map_fragment_cache (ip60,
715                                       (ip6_frag_hdr_t *) u8_ptr_add (ip60,
716                                                                      vnet_buffer
717                                                                      (p0)->map_t.
718                                                                      v6.frag_offset),
719                                       d0, src_port0);
720             }
721
722           if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
723                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
724                                                    u8_ptr_add (ip61,
725                                                                vnet_buffer
726                                                                (p1)->map_t.
727                                                                v6.frag_offset)))
728               && (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
729               && (error1 == MAP_ERROR_NONE))
730             {
731               ip6_map_fragment_cache (ip61,
732                                       (ip6_frag_hdr_t *) u8_ptr_add (ip61,
733                                                                      vnet_buffer
734                                                                      (p1)->map_t.
735                                                                      v6.frag_offset),
736                                       d1, src_port1);
737             }
738
739           if (PREDICT_TRUE
740               (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
741             {
742               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
743                                                thread_index,
744                                                vnet_buffer (p0)->
745                                                map_t.map_domain_index, 1,
746                                                clib_net_to_host_u16
747                                                (ip60->payload_length));
748             }
749
750           if (PREDICT_TRUE
751               (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
752             {
753               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
754                                                thread_index,
755                                                vnet_buffer (p1)->
756                                                map_t.map_domain_index, 1,
757                                                clib_net_to_host_u16
758                                                (ip61->payload_length));
759             }
760
761           next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
762           next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
763           p0->error = error_node->errors[error0];
764           p1->error = error_node->errors[error1];
765           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
766                                            n_left_to_next, pi0, pi1, next0,
767                                            next1);
768         }
769 #endif
770
771       while (n_left_from > 0 && n_left_to_next > 0)
772         {
773           u32 pi0;
774           vlib_buffer_t *p0;
775           ip6_header_t *ip60;
776           u8 error0;
777           u32 l4_len0;
778           i32 src_port0;
779           map_domain_t *d0;
780           ip6_frag_hdr_t *frag0;
781           ip6_mapt_next_t next0 = 0;
782           u32 saddr;
783
784           pi0 = to_next[0] = from[0];
785           from += 1;
786           n_left_from -= 1;
787           to_next += 1;
788           n_left_to_next -= 1;
789           error0 = MAP_ERROR_NONE;
790
791           p0 = vlib_get_buffer (vm, pi0);
792           ip60 = vlib_buffer_get_current (p0);
793
794           //Save saddr in a different variable to not overwrite ip.adj_index
795           saddr = 0;            /* TODO */
796           /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
797
798           d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
799                                    (ip4_address_t *) & saddr,
800                                    &vnet_buffer (p0)->map_t.map_domain_index,
801                                    &error0);
802
803           saddr = map_get_ip4 (&ip60->src_address, d0->flags);
804
805           //FIXME: What if d0 is null
806           vnet_buffer (p0)->map_t.v6.saddr = saddr;
807           vnet_buffer (p0)->map_t.v6.daddr =
808             ip6_map_t_embedded_address (d0, &ip60->dst_address);
809           vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
810
811           if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
812                                         &(vnet_buffer (p0)->map_t.
813                                           v6.l4_protocol),
814                                         &(vnet_buffer (p0)->map_t.
815                                           v6.l4_offset),
816                                         &(vnet_buffer (p0)->map_t.
817                                           v6.frag_offset))))
818             {
819               error0 = MAP_ERROR_MALFORMED;
820               next0 = IP6_MAPT_NEXT_DROP;
821             }
822
823           src_port0 = -1;
824           l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
825             sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
826           frag0 =
827             (ip6_frag_hdr_t *) u8_ptr_add (ip60,
828                                            vnet_buffer (p0)->map_t.
829                                            v6.frag_offset);
830
831
832           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
833                              ip6_frag_hdr_offset (frag0)))
834             {
835               src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
836               error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
837               next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
838             }
839           else
840             if (PREDICT_TRUE
841                 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
842             {
843               error0 =
844                 l4_len0 <
845                 sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
846               vnet_buffer (p0)->map_t.checksum_offset =
847                 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
848               next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
849               src_port0 =
850                 (i32) *
851                 ((u16 *)
852                  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
853             }
854           else
855             if (PREDICT_TRUE
856                 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
857             {
858               error0 =
859                 l4_len0 <
860                 sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
861               vnet_buffer (p0)->map_t.checksum_offset =
862                 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
863               next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
864               src_port0 =
865                 (i32) *
866                 ((u16 *)
867                  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
868             }
869           else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
870                    IP_PROTOCOL_ICMP6)
871             {
872               error0 =
873                 l4_len0 <
874                 sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
875               next0 = IP6_MAPT_NEXT_MAPT_ICMP;
876               if (((icmp46_header_t *)
877                    u8_ptr_add (ip60,
878                                vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
879                   ICMP6_echo_reply
880                   || ((icmp46_header_t *)
881                       u8_ptr_add (ip60,
882                                   vnet_buffer (p0)->map_t.v6.
883                                   l4_offset))->code == ICMP6_echo_request)
884                 src_port0 =
885                   (i32) *
886                   ((u16 *)
887                    u8_ptr_add (ip60,
888                                vnet_buffer (p0)->map_t.v6.l4_offset + 6));
889             }
890           else
891             {
892               //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
893               error0 = MAP_ERROR_BAD_PROTOCOL;
894             }
895
896           //Security check
897           if (PREDICT_FALSE
898               ((src_port0 != -1)
899                && (ip60->src_address.as_u64[0] !=
900                    map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
901                                     src_port0)
902                    || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
903                                                                       vnet_buffer
904                                                                       (p0)->map_t.v6.saddr,
905                                                                       src_port0))))
906             {
907               //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
908               error0 = MAP_ERROR_SEC_CHECK;
909             }
910
911           //Fragmented first packet needs to be cached for following packets
912           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
913                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
914                                                    u8_ptr_add (ip60,
915                                                                vnet_buffer
916                                                                (p0)->map_t.
917                                                                v6.frag_offset)))
918               && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
919               && (error0 == MAP_ERROR_NONE))
920             {
921               ip6_map_fragment_cache (ip60,
922                                       (ip6_frag_hdr_t *) u8_ptr_add (ip60,
923                                                                      vnet_buffer
924                                                                      (p0)->map_t.
925                                                                      v6.frag_offset),
926                                       d0, src_port0);
927             }
928
929           if (PREDICT_TRUE
930               (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
931             {
932               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
933                                                thread_index,
934                                                vnet_buffer (p0)->
935                                                map_t.map_domain_index, 1,
936                                                clib_net_to_host_u16
937                                                (ip60->payload_length));
938             }
939
940           next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
941           p0->error = error_node->errors[error0];
942           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
943                                            to_next, n_left_to_next, pi0,
944                                            next0);
945         }
946       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
947     }
948   return frame->n_vectors;
949 }
950
951 static char *map_t_error_strings[] = {
952 #define _(sym,string) string,
953   foreach_map_error
954 #undef _
955 };
956
957 /* *INDENT-OFF* */
958 VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
959   .function = ip6_map_t_fragmented,
960   .name = "ip6-map-t-fragmented",
961   .vector_size = sizeof (u32),
962   .format_trace = format_map_trace,
963   .type = VLIB_NODE_TYPE_INTERNAL,
964
965   .n_errors = MAP_N_ERROR,
966   .error_strings = map_t_error_strings,
967
968   .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
969   .next_nodes = {
970       [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
971       [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
972       [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
973   },
974 };
975 /* *INDENT-ON* */
976
977 /* *INDENT-OFF* */
978 VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
979   .function = ip6_map_t_icmp,
980   .name = "ip6-map-t-icmp",
981   .vector_size = sizeof (u32),
982   .format_trace = format_map_trace,
983   .type = VLIB_NODE_TYPE_INTERNAL,
984
985   .n_errors = MAP_N_ERROR,
986   .error_strings = map_t_error_strings,
987
988   .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
989   .next_nodes = {
990       [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
991       [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
992       [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
993   },
994 };
995 /* *INDENT-ON* */
996
997 /* *INDENT-OFF* */
998 VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
999   .function = ip6_map_t_tcp_udp,
1000   .name = "ip6-map-t-tcp-udp",
1001   .vector_size = sizeof (u32),
1002   .format_trace = format_map_trace,
1003   .type = VLIB_NODE_TYPE_INTERNAL,
1004
1005   .n_errors = MAP_N_ERROR,
1006   .error_strings = map_t_error_strings,
1007
1008   .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1009   .next_nodes = {
1010       [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1011       [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1012       [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1013   },
1014 };
1015 /* *INDENT-ON* */
1016
1017 /* *INDENT-OFF* */
1018 VLIB_REGISTER_NODE(ip6_map_t_node) = {
1019   .function = ip6_map_t,
1020   .name = "ip6-map-t",
1021   .vector_size = sizeof(u32),
1022   .format_trace = format_map_trace,
1023   .type = VLIB_NODE_TYPE_INTERNAL,
1024
1025   .n_errors = MAP_N_ERROR,
1026   .error_strings = map_t_error_strings,
1027
1028   .n_next_nodes = IP6_MAPT_N_NEXT,
1029   .next_nodes = {
1030       [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1031       [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1032       [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1033       [IP6_MAPT_NEXT_DROP] = "error-drop",
1034   },
1035 };
1036 /* *INDENT-ON* */
1037
1038 /*
1039  * fd.io coding-style-patch-verification: ON
1040  *
1041  * Local Variables:
1042  * eval: (c-set-style "gnu")
1043  * End:
1044  */