MAP: Add RFC6052 mapping to MAP-T
[vpp.git] / src / vnet / map / ip6_map_t.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16
17 #include "../ip/ip_frag.h"
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip4_to_ip6.h>
20
21 #define IP6_MAP_T_DUAL_LOOP
22
23 typedef enum
24 {
25   IP6_MAPT_NEXT_MAPT_TCP_UDP,
26   IP6_MAPT_NEXT_MAPT_ICMP,
27   IP6_MAPT_NEXT_MAPT_FRAGMENTED,
28   IP6_MAPT_NEXT_DROP,
29   IP6_MAPT_N_NEXT
30 } ip6_mapt_next_t;
31
32 typedef enum
33 {
34   IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
35   IP6_MAPT_ICMP_NEXT_IP4_FRAG,
36   IP6_MAPT_ICMP_NEXT_DROP,
37   IP6_MAPT_ICMP_N_NEXT
38 } ip6_mapt_icmp_next_t;
39
40 typedef enum
41 {
42   IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
43   IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
44   IP6_MAPT_TCP_UDP_NEXT_DROP,
45   IP6_MAPT_TCP_UDP_N_NEXT
46 } ip6_mapt_tcp_udp_next_t;
47
48 typedef enum
49 {
50   IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
51   IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
52   IP6_MAPT_FRAGMENTED_NEXT_DROP,
53   IP6_MAPT_FRAGMENTED_N_NEXT
54 } ip6_mapt_fragmented_next_t;
55
56 static_always_inline int
57 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
58                         map_domain_t * d, u16 port)
59 {
60   u32 *ignore = NULL;
61   map_ip4_reass_lock ();
62   map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
63                                                        d->flags),
64                                           ip6_map_t_embedded_address (d,
65                                                                       &ip6->
66                                                                       dst_address),
67                                           frag_id_6to4 (frag->identification),
68                                           (ip6->protocol ==
69                                            IP_PROTOCOL_ICMP6) ?
70                                           IP_PROTOCOL_ICMP : ip6->protocol,
71                                           &ignore);
72   if (r)
73     r->port = port;
74
75   map_ip4_reass_unlock ();
76   return !r;
77 }
78
79 /* Returns the associated port or -1 */
80 static_always_inline i32
81 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
82                       map_domain_t * d)
83 {
84   u32 *ignore = NULL;
85   map_ip4_reass_lock ();
86   map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
87                                                        d->flags),
88                                           ip6_map_t_embedded_address (d,
89                                                                       &ip6->
90                                                                       dst_address),
91                                           frag_id_6to4 (frag->identification),
92                                           (ip6->protocol ==
93                                            IP_PROTOCOL_ICMP6) ?
94                                           IP_PROTOCOL_ICMP : ip6->protocol,
95                                           &ignore);
96   i32 ret = r ? r->port : -1;
97   map_ip4_reass_unlock ();
98   return ret;
99 }
100
101 typedef struct
102 {
103   map_domain_t *d;
104   u16 sender_port;
105 } icmp6_to_icmp_ctx_t;
106
107 static int
108 ip6_to_ip4_set_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
109 {
110   icmp6_to_icmp_ctx_t *ctx = arg;
111   u32 ip4_sadr;
112
113   //Security check
114   //Note that this prevents an intermediate IPv6 router from answering the request
115   ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
116   if (ip6->src_address.as_u64[0] !=
117       map_get_pfx_net (ctx->d, ip4_sadr, ctx->sender_port)
118       || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
119                                                         ctx->sender_port))
120     return -1;
121
122   ip4->dst_address.as_u32 =
123     ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
124   ip4->src_address.as_u32 = ip4_sadr;
125
126   return 0;
127 }
128
129 static int
130 ip6_to_ip4_set_inner_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4,
131                               void *arg)
132 {
133   icmp6_to_icmp_ctx_t *ctx = arg;
134   u32 inner_ip4_dadr;
135
136   //Security check of inner packet
137   inner_ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
138   if (ip6->dst_address.as_u64[0] !=
139       map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->sender_port)
140       || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
141                                                         inner_ip4_dadr,
142                                                         ctx->sender_port))
143     return -1;
144
145   ip4->dst_address.as_u32 = inner_ip4_dadr;
146   ip4->src_address.as_u32 =
147     ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
148
149   return 0;
150 }
151
152 static uword
153 ip6_map_t_icmp (vlib_main_t * vm,
154                 vlib_node_runtime_t * node, vlib_frame_t * frame)
155 {
156   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
157   vlib_node_runtime_t *error_node =
158     vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
159   from = vlib_frame_vector_args (frame);
160   n_left_from = frame->n_vectors;
161   next_index = node->cached_next_index;
162   vlib_combined_counter_main_t *cm = map_main.domain_counters;
163   u32 thread_index = vlib_get_thread_index ();
164
165   while (n_left_from > 0)
166     {
167       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
168
169       while (n_left_from > 0 && n_left_to_next > 0)
170         {
171           u32 pi0;
172           vlib_buffer_t *p0;
173           u8 error0;
174           ip6_mapt_icmp_next_t next0;
175           map_domain_t *d0;
176           u16 len0;
177           icmp6_to_icmp_ctx_t ctx0;
178           ip6_header_t *ip60;
179
180           pi0 = to_next[0] = from[0];
181           from += 1;
182           n_left_from -= 1;
183           to_next += 1;
184           n_left_to_next -= 1;
185           error0 = MAP_ERROR_NONE;
186           next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
187
188           p0 = vlib_get_buffer (vm, pi0);
189           ip60 = vlib_buffer_get_current (p0);
190           len0 = clib_net_to_host_u16 (ip60->payload_length);
191           d0 =
192             pool_elt_at_index (map_main.domains,
193                                vnet_buffer (p0)->map_t.map_domain_index);
194           ctx0.sender_port = ip6_get_port (ip60, 0, p0->current_length);
195           ctx0.d = d0;
196           if (ctx0.sender_port == 0)
197             {
198               // In case of 1:1 mapping, we don't care about the port
199               if (!(d0->ea_bits_len == 0 && d0->rules))
200                 {
201                   error0 = MAP_ERROR_ICMP;
202                   goto err0;
203                 }
204             }
205
206           if (icmp6_to_icmp
207               (p0, ip6_to_ip4_set_icmp_cb, &ctx0,
208                ip6_to_ip4_set_inner_icmp_cb, &ctx0))
209             {
210               error0 = MAP_ERROR_ICMP;
211               goto err0;
212             }
213
214           if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
215             {
216               //Send to fragmentation node if necessary
217               vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
218               vnet_buffer (p0)->ip_frag.header_offset = 0;
219               vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
220               next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
221             }
222         err0:
223           if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
224             {
225               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
226                                                thread_index,
227                                                vnet_buffer (p0)->
228                                                map_t.map_domain_index, 1,
229                                                len0);
230             }
231           else
232             {
233               next0 = IP6_MAPT_ICMP_NEXT_DROP;
234             }
235
236           p0->error = error_node->errors[error0];
237           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
238                                            to_next, n_left_to_next, pi0,
239                                            next0);
240         }
241       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
242     }
243   return frame->n_vectors;
244 }
245
246 static int
247 ip6_to_ip4_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *ctx)
248 {
249   vlib_buffer_t *p = ctx;
250
251   ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
252   ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
253
254   return 0;
255 }
256
257 static uword
258 ip6_map_t_fragmented (vlib_main_t * vm,
259                       vlib_node_runtime_t * node, vlib_frame_t * frame)
260 {
261   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
262   from = vlib_frame_vector_args (frame);
263   n_left_from = frame->n_vectors;
264   next_index = node->cached_next_index;
265   vlib_node_runtime_t *error_node =
266     vlib_node_get_runtime (vm, ip6_map_t_fragmented_node.index);
267
268   while (n_left_from > 0)
269     {
270       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
271
272 #ifdef IP6_MAP_T_DUAL_LOOP
273       while (n_left_from >= 4 && n_left_to_next >= 2)
274         {
275           u32 pi0, pi1;
276           vlib_buffer_t *p0, *p1;
277           u32 next0, next1;
278
279           pi0 = to_next[0] = from[0];
280           pi1 = to_next[1] = from[1];
281           from += 2;
282           n_left_from -= 2;
283           to_next += 2;
284           n_left_to_next -= 2;
285
286           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
287           next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
288           p0 = vlib_get_buffer (vm, pi0);
289           p1 = vlib_get_buffer (vm, pi1);
290
291           if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
292             {
293               p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
294               next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
295             }
296           else
297             {
298               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
299                 {
300                   //Send to fragmentation node if necessary
301                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
302                   vnet_buffer (p0)->ip_frag.header_offset = 0;
303                   vnet_buffer (p0)->ip_frag.next_index =
304                     IP4_FRAG_NEXT_IP4_LOOKUP;
305                   next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
306                 }
307             }
308
309           if (ip6_to_ip4_fragmented (p1, ip6_to_ip4_set_cb, p1))
310             {
311               p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
312               next1 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
313             }
314           else
315             {
316               if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
317                 {
318                   //Send to fragmentation node if necessary
319                   vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
320                   vnet_buffer (p1)->ip_frag.header_offset = 0;
321                   vnet_buffer (p1)->ip_frag.next_index =
322                     IP4_FRAG_NEXT_IP4_LOOKUP;
323                   next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
324                 }
325             }
326
327           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
328                                            to_next, n_left_to_next, pi0, pi1,
329                                            next0, next1);
330         }
331 #endif
332
333       while (n_left_from > 0 && n_left_to_next > 0)
334         {
335           u32 pi0;
336           vlib_buffer_t *p0;
337           u32 next0;
338
339           pi0 = to_next[0] = from[0];
340           from += 1;
341           n_left_from -= 1;
342           to_next += 1;
343           n_left_to_next -= 1;
344
345           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
346           p0 = vlib_get_buffer (vm, pi0);
347
348           if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
349             {
350               p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
351               next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
352             }
353           else
354             {
355               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
356                 {
357                   //Send to fragmentation node if necessary
358                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
359                   vnet_buffer (p0)->ip_frag.header_offset = 0;
360                   vnet_buffer (p0)->ip_frag.next_index =
361                     IP4_FRAG_NEXT_IP4_LOOKUP;
362                   next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
363                 }
364             }
365
366           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
367                                            to_next, n_left_to_next, pi0,
368                                            next0);
369         }
370       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
371     }
372   return frame->n_vectors;
373 }
374
375 static uword
376 ip6_map_t_tcp_udp (vlib_main_t * vm,
377                    vlib_node_runtime_t * node, vlib_frame_t * frame)
378 {
379   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
380   vlib_node_runtime_t *error_node =
381     vlib_node_get_runtime (vm, ip6_map_t_tcp_udp_node.index);
382
383   from = vlib_frame_vector_args (frame);
384   n_left_from = frame->n_vectors;
385   next_index = node->cached_next_index;
386   while (n_left_from > 0)
387     {
388       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
389
390 #ifdef IP6_MAP_T_DUAL_LOOP
391       while (n_left_from >= 4 && n_left_to_next >= 2)
392         {
393           u32 pi0, pi1;
394           vlib_buffer_t *p0, *p1;
395           ip6_mapt_tcp_udp_next_t next0, next1;
396
397           pi0 = to_next[0] = from[0];
398           pi1 = to_next[1] = from[1];
399           from += 2;
400           n_left_from -= 2;
401           to_next += 2;
402           n_left_to_next -= 2;
403           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
404           next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
405
406           p0 = vlib_get_buffer (vm, pi0);
407           p1 = vlib_get_buffer (vm, pi1);
408
409           if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
410             {
411               p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
412               next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
413             }
414           else
415             {
416               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
417                 {
418                   //Send to fragmentation node if necessary
419                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
420                   vnet_buffer (p0)->ip_frag.header_offset = 0;
421                   vnet_buffer (p0)->ip_frag.next_index =
422                     IP4_FRAG_NEXT_IP4_LOOKUP;
423                   next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
424                 }
425             }
426
427           if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1))
428             {
429               p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
430               next1 = IP6_MAPT_TCP_UDP_NEXT_DROP;
431             }
432           else
433             {
434               if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
435                 {
436                   //Send to fragmentation node if necessary
437                   vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
438                   vnet_buffer (p1)->ip_frag.header_offset = 0;
439                   vnet_buffer (p1)->ip_frag.next_index =
440                     IP4_FRAG_NEXT_IP4_LOOKUP;
441                   next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
442                 }
443             }
444
445           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
446                                            n_left_to_next, pi0, pi1, next0,
447                                            next1);
448         }
449 #endif
450
451       while (n_left_from > 0 && n_left_to_next > 0)
452         {
453           u32 pi0;
454           vlib_buffer_t *p0;
455           ip6_mapt_tcp_udp_next_t next0;
456
457           pi0 = to_next[0] = from[0];
458           from += 1;
459           n_left_from -= 1;
460           to_next += 1;
461           n_left_to_next -= 1;
462           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
463
464           p0 = vlib_get_buffer (vm, pi0);
465
466           if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
467             {
468               p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
469               next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
470             }
471           else
472             {
473               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
474                 {
475                   //Send to fragmentation node if necessary
476                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
477                   vnet_buffer (p0)->ip_frag.header_offset = 0;
478                   vnet_buffer (p0)->ip_frag.next_index =
479                     IP4_FRAG_NEXT_IP4_LOOKUP;
480                   next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
481                 }
482             }
483
484           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
485                                            to_next, n_left_to_next, pi0,
486                                            next0);
487         }
488       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
489     }
490   return frame->n_vectors;
491 }
492
493 static_always_inline void
494 ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60,
495                     map_domain_t * d0, i32 * src_port0,
496                     u8 * error0, ip6_mapt_next_t * next0,
497                     u32 l4_len0, ip6_frag_hdr_t * frag0)
498 {
499   if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
500                      ip6_frag_hdr_offset (frag0)))
501     {
502       *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
503       if (d0->ea_bits_len == 0 && d0->rules)
504         {
505           *src_port0 = 0;
506         }
507       else
508         {
509           *src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
510           *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
511         }
512     }
513   else
514     if (PREDICT_TRUE
515         (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
516     {
517       *error0 =
518         l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
519       vnet_buffer (p0)->map_t.checksum_offset =
520         vnet_buffer (p0)->map_t.v6.l4_offset + 16;
521       *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
522       *src_port0 =
523         (i32) *
524         ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
525     }
526   else
527     if (PREDICT_TRUE
528         (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
529     {
530       *error0 =
531         l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
532       vnet_buffer (p0)->map_t.checksum_offset =
533         vnet_buffer (p0)->map_t.v6.l4_offset + 6;
534       *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
535       *src_port0 =
536         (i32) *
537         ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
538     }
539   else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
540     {
541       *error0 =
542         l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
543       *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
544       if (d0->ea_bits_len == 0 && d0->rules)
545         {
546           *src_port0 = 0;
547         }
548       else
549         if (((icmp46_header_t *)
550              u8_ptr_add (ip60,
551                          vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
552             ICMP6_echo_reply
553             || ((icmp46_header_t *)
554                 u8_ptr_add (ip60,
555                             vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
556             ICMP6_echo_request)
557         {
558           *src_port0 =
559             (i32) *
560             ((u16 *)
561              u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
562         }
563     }
564   else
565     {
566       //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
567       *error0 = MAP_ERROR_BAD_PROTOCOL;
568     }
569 }
570
571 static uword
572 ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
573 {
574   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
575   vlib_node_runtime_t *error_node =
576     vlib_node_get_runtime (vm, ip6_map_t_node.index);
577   vlib_combined_counter_main_t *cm = map_main.domain_counters;
578   u32 thread_index = vlib_get_thread_index ();
579
580   from = vlib_frame_vector_args (frame);
581   n_left_from = frame->n_vectors;
582   next_index = node->cached_next_index;
583   while (n_left_from > 0)
584     {
585       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
586
587 #ifdef IP6_MAP_T_DUAL_LOOP
588       while (n_left_from >= 4 && n_left_to_next >= 2)
589         {
590           u32 pi0, pi1;
591           vlib_buffer_t *p0, *p1;
592           ip6_header_t *ip60, *ip61;
593           u8 error0, error1;
594           ip6_mapt_next_t next0, next1;
595           u32 l4_len0, l4_len1;
596           i32 src_port0, src_port1;
597           map_domain_t *d0, *d1;
598           ip6_frag_hdr_t *frag0, *frag1;
599           u32 saddr0, saddr1;
600           next0 = next1 = 0;    //Because compiler whines
601
602           pi0 = to_next[0] = from[0];
603           pi1 = to_next[1] = from[1];
604           from += 2;
605           n_left_from -= 2;
606           to_next += 2;
607           n_left_to_next -= 2;
608
609           error0 = MAP_ERROR_NONE;
610           error1 = MAP_ERROR_NONE;
611
612           p0 = vlib_get_buffer (vm, pi0);
613           p1 = vlib_get_buffer (vm, pi1);
614           ip60 = vlib_buffer_get_current (p0);
615           ip61 = vlib_buffer_get_current (p1);
616
617           saddr0 = 0;           /* TODO */
618           saddr1 = 0;           /* TODO */
619           /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
620
621           d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
622                                    (ip4_address_t *) & saddr0,
623                                    &vnet_buffer (p0)->map_t.map_domain_index,
624                                    &error0);
625           d1 =
626             ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
627                                 (ip4_address_t *) & saddr1,
628                                 &vnet_buffer (p1)->map_t.map_domain_index,
629                                 &error1);
630
631           saddr0 = map_get_ip4 (&ip60->src_address, d0->flags);
632           saddr1 = map_get_ip4 (&ip61->src_address, d1->flags);
633
634           vnet_buffer (p0)->map_t.v6.saddr = saddr0;
635           vnet_buffer (p1)->map_t.v6.saddr = saddr1;
636           vnet_buffer (p0)->map_t.v6.daddr =
637             ip6_map_t_embedded_address (d0, &ip60->dst_address);
638           vnet_buffer (p1)->map_t.v6.daddr =
639             ip6_map_t_embedded_address (d1, &ip61->dst_address);
640           vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
641           vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
642
643           if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
644                                         &(vnet_buffer (p0)->map_t.
645                                           v6.l4_protocol),
646                                         &(vnet_buffer (p0)->map_t.
647                                           v6.l4_offset),
648                                         &(vnet_buffer (p0)->map_t.
649                                           v6.frag_offset))))
650             {
651               error0 = MAP_ERROR_MALFORMED;
652               next0 = IP6_MAPT_NEXT_DROP;
653             }
654
655           if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
656                                         &(vnet_buffer (p1)->map_t.
657                                           v6.l4_protocol),
658                                         &(vnet_buffer (p1)->map_t.
659                                           v6.l4_offset),
660                                         &(vnet_buffer (p1)->map_t.
661                                           v6.frag_offset))))
662             {
663               error1 = MAP_ERROR_MALFORMED;
664               next1 = IP6_MAPT_NEXT_DROP;
665             }
666
667           src_port0 = src_port1 = -1;
668           l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
669             sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
670           l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
671             sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
672           frag0 =
673             (ip6_frag_hdr_t *) u8_ptr_add (ip60,
674                                            vnet_buffer (p0)->map_t.
675                                            v6.frag_offset);
676           frag1 =
677             (ip6_frag_hdr_t *) u8_ptr_add (ip61,
678                                            vnet_buffer (p1)->map_t.
679                                            v6.frag_offset);
680
681           ip6_map_t_classify (p0, ip60, d0, &src_port0, &error0, &next0,
682                               l4_len0, frag0);
683           ip6_map_t_classify (p1, ip61, d1, &src_port1, &error1, &next1,
684                               l4_len1, frag1);
685
686           if (PREDICT_FALSE
687               ((src_port0 != -1)
688                && (ip60->src_address.as_u64[0] !=
689                    map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
690                                     src_port0)
691                    || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
692                                                                       vnet_buffer
693                                                                       (p0)->map_t.v6.saddr,
694                                                                       src_port0))))
695             {
696               error0 = MAP_ERROR_SEC_CHECK;
697             }
698
699           if (PREDICT_FALSE
700               ((src_port1 != -1)
701                && (ip61->src_address.as_u64[0] !=
702                    map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
703                                     src_port1)
704                    || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
705                                                                       vnet_buffer
706                                                                       (p1)->map_t.v6.saddr,
707                                                                       src_port1))))
708             {
709               error1 = MAP_ERROR_SEC_CHECK;
710             }
711
712           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
713                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
714                                                    u8_ptr_add (ip60,
715                                                                vnet_buffer
716                                                                (p0)->map_t.
717                                                                v6.frag_offset)))
718               && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
719               && (error0 == MAP_ERROR_NONE))
720             {
721               ip6_map_fragment_cache (ip60,
722                                       (ip6_frag_hdr_t *) u8_ptr_add (ip60,
723                                                                      vnet_buffer
724                                                                      (p0)->map_t.
725                                                                      v6.frag_offset),
726                                       d0, src_port0);
727             }
728
729           if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
730                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
731                                                    u8_ptr_add (ip61,
732                                                                vnet_buffer
733                                                                (p1)->map_t.
734                                                                v6.frag_offset)))
735               && (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
736               && (error1 == MAP_ERROR_NONE))
737             {
738               ip6_map_fragment_cache (ip61,
739                                       (ip6_frag_hdr_t *) u8_ptr_add (ip61,
740                                                                      vnet_buffer
741                                                                      (p1)->map_t.
742                                                                      v6.frag_offset),
743                                       d1, src_port1);
744             }
745
746           if (PREDICT_TRUE
747               (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
748             {
749               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
750                                                thread_index,
751                                                vnet_buffer (p0)->
752                                                map_t.map_domain_index, 1,
753                                                clib_net_to_host_u16
754                                                (ip60->payload_length));
755             }
756
757           if (PREDICT_TRUE
758               (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
759             {
760               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
761                                                thread_index,
762                                                vnet_buffer (p1)->
763                                                map_t.map_domain_index, 1,
764                                                clib_net_to_host_u16
765                                                (ip61->payload_length));
766             }
767
768           next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
769           next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
770           p0->error = error_node->errors[error0];
771           p1->error = error_node->errors[error1];
772           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
773                                            n_left_to_next, pi0, pi1, next0,
774                                            next1);
775         }
776 #endif
777
778       while (n_left_from > 0 && n_left_to_next > 0)
779         {
780           u32 pi0;
781           vlib_buffer_t *p0;
782           ip6_header_t *ip60;
783           u8 error0;
784           u32 l4_len0;
785           i32 src_port0;
786           map_domain_t *d0;
787           ip6_frag_hdr_t *frag0;
788           ip6_mapt_next_t next0 = 0;
789           u32 saddr;
790
791           pi0 = to_next[0] = from[0];
792           from += 1;
793           n_left_from -= 1;
794           to_next += 1;
795           n_left_to_next -= 1;
796           error0 = MAP_ERROR_NONE;
797
798           p0 = vlib_get_buffer (vm, pi0);
799           ip60 = vlib_buffer_get_current (p0);
800
801           //Save saddr in a different variable to not overwrite ip.adj_index
802           saddr = 0;            /* TODO */
803           /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
804
805           d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
806                                    (ip4_address_t *) & saddr,
807                                    &vnet_buffer (p0)->map_t.map_domain_index,
808                                    &error0);
809
810           saddr = map_get_ip4 (&ip60->src_address, d0->flags);
811
812           //FIXME: What if d0 is null
813           vnet_buffer (p0)->map_t.v6.saddr = saddr;
814           vnet_buffer (p0)->map_t.v6.daddr =
815             ip6_map_t_embedded_address (d0, &ip60->dst_address);
816           vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
817
818           if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
819                                         &(vnet_buffer (p0)->map_t.
820                                           v6.l4_protocol),
821                                         &(vnet_buffer (p0)->map_t.
822                                           v6.l4_offset),
823                                         &(vnet_buffer (p0)->map_t.
824                                           v6.frag_offset))))
825             {
826               error0 = MAP_ERROR_MALFORMED;
827               next0 = IP6_MAPT_NEXT_DROP;
828             }
829
830           src_port0 = -1;
831           l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
832             sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
833           frag0 =
834             (ip6_frag_hdr_t *) u8_ptr_add (ip60,
835                                            vnet_buffer (p0)->map_t.
836                                            v6.frag_offset);
837
838
839           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
840                              ip6_frag_hdr_offset (frag0)))
841             {
842               src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
843               error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
844               next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
845             }
846           else
847             if (PREDICT_TRUE
848                 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
849             {
850               error0 =
851                 l4_len0 <
852                 sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
853               vnet_buffer (p0)->map_t.checksum_offset =
854                 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
855               next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
856               src_port0 =
857                 (i32) *
858                 ((u16 *)
859                  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
860             }
861           else
862             if (PREDICT_TRUE
863                 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
864             {
865               error0 =
866                 l4_len0 <
867                 sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
868               vnet_buffer (p0)->map_t.checksum_offset =
869                 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
870               next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
871               src_port0 =
872                 (i32) *
873                 ((u16 *)
874                  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
875             }
876           else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
877                    IP_PROTOCOL_ICMP6)
878             {
879               error0 =
880                 l4_len0 <
881                 sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
882               next0 = IP6_MAPT_NEXT_MAPT_ICMP;
883               if (((icmp46_header_t *)
884                    u8_ptr_add (ip60,
885                                vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
886                   ICMP6_echo_reply
887                   || ((icmp46_header_t *)
888                       u8_ptr_add (ip60,
889                                   vnet_buffer (p0)->map_t.v6.
890                                   l4_offset))->code == ICMP6_echo_request)
891                 src_port0 =
892                   (i32) *
893                   ((u16 *)
894                    u8_ptr_add (ip60,
895                                vnet_buffer (p0)->map_t.v6.l4_offset + 6));
896             }
897           else
898             {
899               //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
900               error0 = MAP_ERROR_BAD_PROTOCOL;
901             }
902
903           //Security check
904           if (PREDICT_FALSE
905               ((src_port0 != -1)
906                && (ip60->src_address.as_u64[0] !=
907                    map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
908                                     src_port0)
909                    || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
910                                                                       vnet_buffer
911                                                                       (p0)->map_t.v6.saddr,
912                                                                       src_port0))))
913             {
914               //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
915               error0 = MAP_ERROR_SEC_CHECK;
916             }
917
918           //Fragmented first packet needs to be cached for following packets
919           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
920                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
921                                                    u8_ptr_add (ip60,
922                                                                vnet_buffer
923                                                                (p0)->map_t.
924                                                                v6.frag_offset)))
925               && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
926               && (error0 == MAP_ERROR_NONE))
927             {
928               ip6_map_fragment_cache (ip60,
929                                       (ip6_frag_hdr_t *) u8_ptr_add (ip60,
930                                                                      vnet_buffer
931                                                                      (p0)->map_t.
932                                                                      v6.frag_offset),
933                                       d0, src_port0);
934             }
935
936           if (PREDICT_TRUE
937               (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
938             {
939               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
940                                                thread_index,
941                                                vnet_buffer (p0)->
942                                                map_t.map_domain_index, 1,
943                                                clib_net_to_host_u16
944                                                (ip60->payload_length));
945             }
946
947           next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
948           p0->error = error_node->errors[error0];
949           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
950                                            to_next, n_left_to_next, pi0,
951                                            next0);
952         }
953       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
954     }
955   return frame->n_vectors;
956 }
957
958 static char *map_t_error_strings[] = {
959 #define _(sym,string) string,
960   foreach_map_error
961 #undef _
962 };
963
964 /* *INDENT-OFF* */
965 VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
966   .function = ip6_map_t_fragmented,
967   .name = "ip6-map-t-fragmented",
968   .vector_size = sizeof (u32),
969   .format_trace = format_map_trace,
970   .type = VLIB_NODE_TYPE_INTERNAL,
971
972   .n_errors = MAP_N_ERROR,
973   .error_strings = map_t_error_strings,
974
975   .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
976   .next_nodes = {
977       [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
978       [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
979       [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
980   },
981 };
982 /* *INDENT-ON* */
983
984 /* *INDENT-OFF* */
985 VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
986   .function = ip6_map_t_icmp,
987   .name = "ip6-map-t-icmp",
988   .vector_size = sizeof (u32),
989   .format_trace = format_map_trace,
990   .type = VLIB_NODE_TYPE_INTERNAL,
991
992   .n_errors = MAP_N_ERROR,
993   .error_strings = map_t_error_strings,
994
995   .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
996   .next_nodes = {
997       [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
998       [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
999       [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
1000   },
1001 };
1002 /* *INDENT-ON* */
1003
1004 /* *INDENT-OFF* */
1005 VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
1006   .function = ip6_map_t_tcp_udp,
1007   .name = "ip6-map-t-tcp-udp",
1008   .vector_size = sizeof (u32),
1009   .format_trace = format_map_trace,
1010   .type = VLIB_NODE_TYPE_INTERNAL,
1011
1012   .n_errors = MAP_N_ERROR,
1013   .error_strings = map_t_error_strings,
1014
1015   .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1016   .next_nodes = {
1017       [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1018       [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1019       [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1020   },
1021 };
1022 /* *INDENT-ON* */
1023
1024 /* *INDENT-OFF* */
1025 VLIB_REGISTER_NODE(ip6_map_t_node) = {
1026   .function = ip6_map_t,
1027   .name = "ip6-map-t",
1028   .vector_size = sizeof(u32),
1029   .format_trace = format_map_trace,
1030   .type = VLIB_NODE_TYPE_INTERNAL,
1031
1032   .n_errors = MAP_N_ERROR,
1033   .error_strings = map_t_error_strings,
1034
1035   .n_next_nodes = IP6_MAPT_N_NEXT,
1036   .next_nodes = {
1037       [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1038       [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1039       [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1040       [IP6_MAPT_NEXT_DROP] = "error-drop",
1041   },
1042 };
1043 /* *INDENT-ON* */
1044
1045 /*
1046  * fd.io coding-style-patch-verification: ON
1047  *
1048  * Local Variables:
1049  * eval: (c-set-style "gnu")
1050  * End:
1051  */