avoid using thread local storage for thread index
[vpp.git] / src / plugins / map / ip6_map_t.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip4_to_ip6.h>
20
21 #define IP6_MAP_T_DUAL_LOOP
22
23 typedef enum
24 {
25   IP6_MAPT_NEXT_MAPT_TCP_UDP,
26   IP6_MAPT_NEXT_MAPT_ICMP,
27   IP6_MAPT_NEXT_MAPT_FRAGMENTED,
28   IP6_MAPT_NEXT_DROP,
29   IP6_MAPT_N_NEXT
30 } ip6_mapt_next_t;
31
32 typedef enum
33 {
34   IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
35   IP6_MAPT_ICMP_NEXT_IP4_FRAG,
36   IP6_MAPT_ICMP_NEXT_DROP,
37   IP6_MAPT_ICMP_N_NEXT
38 } ip6_mapt_icmp_next_t;
39
40 typedef enum
41 {
42   IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
43   IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
44   IP6_MAPT_TCP_UDP_NEXT_DROP,
45   IP6_MAPT_TCP_UDP_N_NEXT
46 } ip6_mapt_tcp_udp_next_t;
47
48 typedef enum
49 {
50   IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
51   IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
52   IP6_MAPT_FRAGMENTED_NEXT_DROP,
53   IP6_MAPT_FRAGMENTED_N_NEXT
54 } ip6_mapt_fragmented_next_t;
55
56 static_always_inline int
57 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
58                         map_domain_t * d, u16 port)
59 {
60   u32 *ignore = NULL;
61   map_ip4_reass_lock ();
62   map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
63                                                        d->flags),
64                                           ip6_map_t_embedded_address (d,
65                                                                       &ip6->
66                                                                       dst_address),
67                                           frag_id_6to4 (frag->identification),
68                                           (ip6->protocol ==
69                                            IP_PROTOCOL_ICMP6) ?
70                                           IP_PROTOCOL_ICMP : ip6->protocol,
71                                           &ignore);
72   if (r)
73     r->port = port;
74
75   map_ip4_reass_unlock ();
76   return !r;
77 }
78
79 /* Returns the associated port or -1 */
80 static_always_inline i32
81 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
82                       map_domain_t * d)
83 {
84   u32 *ignore = NULL;
85   map_ip4_reass_lock ();
86   map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
87                                                        d->flags),
88                                           ip6_map_t_embedded_address (d,
89                                                                       &ip6->
90                                                                       dst_address),
91                                           frag_id_6to4 (frag->identification),
92                                           (ip6->protocol ==
93                                            IP_PROTOCOL_ICMP6) ?
94                                           IP_PROTOCOL_ICMP : ip6->protocol,
95                                           &ignore);
96   i32 ret = r ? r->port : -1;
97   map_ip4_reass_unlock ();
98   return ret;
99 }
100
101 typedef struct
102 {
103   map_domain_t *d;
104   u16 id;
105 } icmp6_to_icmp_ctx_t;
106
107 static int
108 ip6_to_ip4_set_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
109 {
110   icmp6_to_icmp_ctx_t *ctx = arg;
111   map_main_t *mm = &map_main;
112
113   if (mm->is_ce)
114     {
115       u32 ip4_dadr;
116
117       //Security check
118       //Note that this prevents an intermediate IPv6 router from answering the request
119       ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
120       if (ip6->dst_address.as_u64[0] !=
121           map_get_pfx_net (ctx->d, ip4_dadr, ctx->id)
122           || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_dadr,
123                                                             ctx->id))
124         return -1;
125
126       ip4->src_address.as_u32 =
127         ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
128       ip4->dst_address.as_u32 = ip4_dadr;
129     }
130   else
131     {
132       u32 ip4_sadr;
133
134       //Security check
135       //Note that this prevents an intermediate IPv6 router from answering the request
136       ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
137       if (ip6->src_address.as_u64[0] !=
138           map_get_pfx_net (ctx->d, ip4_sadr, ctx->id)
139           || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
140                                                             ctx->id))
141         return -1;
142
143       ip4->dst_address.as_u32 =
144         ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
145       ip4->src_address.as_u32 = ip4_sadr;
146     }
147
148   return 0;
149 }
150
151 static int
152 ip6_to_ip4_set_inner_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4,
153                               void *arg)
154 {
155   icmp6_to_icmp_ctx_t *ctx = arg;
156   map_main_t *mm = &map_main;
157
158   if (mm->is_ce)
159     {
160       u32 inner_ip4_sadr;
161
162       //Security check of inner packet
163       inner_ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
164       if (ip6->src_address.as_u64[0] !=
165           map_get_pfx_net (ctx->d, inner_ip4_sadr, ctx->id)
166           || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d,
167                                                             inner_ip4_sadr,
168                                                             ctx->id))
169         return -1;
170
171       ip4->src_address.as_u32 = inner_ip4_sadr;
172       ip4->dst_address.as_u32 =
173         ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
174     }
175   else
176     {
177       u32 inner_ip4_dadr;
178
179       //Security check of inner packet
180       inner_ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
181       if (ip6->dst_address.as_u64[0] !=
182           map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->id)
183           || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
184                                                             inner_ip4_dadr,
185                                                             ctx->id))
186         return -1;
187
188       ip4->dst_address.as_u32 = inner_ip4_dadr;
189       ip4->src_address.as_u32 =
190         ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
191     }
192
193   return 0;
194 }
195
196 static uword
197 ip6_map_t_icmp (vlib_main_t * vm,
198                 vlib_node_runtime_t * node, vlib_frame_t * frame)
199 {
200   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
201   vlib_node_runtime_t *error_node =
202     vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
203   from = vlib_frame_vector_args (frame);
204   n_left_from = frame->n_vectors;
205   next_index = node->cached_next_index;
206   vlib_combined_counter_main_t *cm = map_main.domain_counters;
207   u32 thread_index = vm->thread_index;
208
209   while (n_left_from > 0)
210     {
211       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
212
213       while (n_left_from > 0 && n_left_to_next > 0)
214         {
215           u32 pi0;
216           vlib_buffer_t *p0;
217           u8 error0;
218           ip6_mapt_icmp_next_t next0;
219           map_domain_t *d0;
220           u16 len0;
221           icmp6_to_icmp_ctx_t ctx0;
222           ip6_header_t *ip60;
223           icmp46_header_t *icmp0;
224
225           pi0 = to_next[0] = from[0];
226           from += 1;
227           n_left_from -= 1;
228           to_next += 1;
229           n_left_to_next -= 1;
230           error0 = MAP_ERROR_NONE;
231           next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
232
233           p0 = vlib_get_buffer (vm, pi0);
234           ip60 = vlib_buffer_get_current (p0);
235           len0 = clib_net_to_host_u16 (ip60->payload_length);
236           icmp0 = (icmp46_header_t *) (ip60 + 1);
237           d0 =
238             pool_elt_at_index (map_main.domains,
239                                vnet_buffer (p0)->map_t.map_domain_index);
240
241           ctx0.id =
242             ip6_get_port (ip60, icmp0->type == ICMP6_echo_request,
243                           p0->current_length);
244           ctx0.d = d0;
245           if (ctx0.id == 0)
246             {
247               // In case of 1:1 mapping, we don't care about the port
248               if (!(d0->ea_bits_len == 0 && d0->rules))
249                 {
250                   error0 = MAP_ERROR_ICMP;
251                   goto err0;
252                 }
253             }
254
255           if (icmp6_to_icmp
256               (p0, ip6_to_ip4_set_icmp_cb, &ctx0,
257                ip6_to_ip4_set_inner_icmp_cb, &ctx0))
258             {
259               error0 = MAP_ERROR_ICMP;
260               goto err0;
261             }
262
263           if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
264             {
265               //Send to fragmentation node if necessary
266               vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
267               vnet_buffer (p0)->ip_frag.header_offset = 0;
268               vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
269               next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
270             }
271         err0:
272           if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
273             {
274               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
275                                                thread_index,
276                                                vnet_buffer (p0)->
277                                                map_t.map_domain_index, 1,
278                                                len0);
279             }
280           else
281             {
282               next0 = IP6_MAPT_ICMP_NEXT_DROP;
283             }
284
285           p0->error = error_node->errors[error0];
286           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
287                                            to_next, n_left_to_next, pi0,
288                                            next0);
289         }
290       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
291     }
292   return frame->n_vectors;
293 }
294
295 static int
296 ip6_to_ip4_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *ctx)
297 {
298   vlib_buffer_t *p = ctx;
299
300   ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
301   ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
302
303   return 0;
304 }
305
306 static uword
307 ip6_map_t_fragmented (vlib_main_t * vm,
308                       vlib_node_runtime_t * node, vlib_frame_t * frame)
309 {
310   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
311   from = vlib_frame_vector_args (frame);
312   n_left_from = frame->n_vectors;
313   next_index = node->cached_next_index;
314   vlib_node_runtime_t *error_node =
315     vlib_node_get_runtime (vm, ip6_map_t_fragmented_node.index);
316
317   while (n_left_from > 0)
318     {
319       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
320
321 #ifdef IP6_MAP_T_DUAL_LOOP
322       while (n_left_from >= 4 && n_left_to_next >= 2)
323         {
324           u32 pi0, pi1;
325           vlib_buffer_t *p0, *p1;
326           u32 next0, next1;
327
328           pi0 = to_next[0] = from[0];
329           pi1 = to_next[1] = from[1];
330           from += 2;
331           n_left_from -= 2;
332           to_next += 2;
333           n_left_to_next -= 2;
334
335           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
336           next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
337           p0 = vlib_get_buffer (vm, pi0);
338           p1 = vlib_get_buffer (vm, pi1);
339
340           if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
341             {
342               p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
343               next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
344             }
345           else
346             {
347               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
348                 {
349                   //Send to fragmentation node if necessary
350                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
351                   vnet_buffer (p0)->ip_frag.header_offset = 0;
352                   vnet_buffer (p0)->ip_frag.next_index =
353                     IP4_FRAG_NEXT_IP4_LOOKUP;
354                   next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
355                 }
356             }
357
358           if (ip6_to_ip4_fragmented (p1, ip6_to_ip4_set_cb, p1))
359             {
360               p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
361               next1 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
362             }
363           else
364             {
365               if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
366                 {
367                   //Send to fragmentation node if necessary
368                   vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
369                   vnet_buffer (p1)->ip_frag.header_offset = 0;
370                   vnet_buffer (p1)->ip_frag.next_index =
371                     IP4_FRAG_NEXT_IP4_LOOKUP;
372                   next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
373                 }
374             }
375
376           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
377                                            to_next, n_left_to_next, pi0, pi1,
378                                            next0, next1);
379         }
380 #endif
381
382       while (n_left_from > 0 && n_left_to_next > 0)
383         {
384           u32 pi0;
385           vlib_buffer_t *p0;
386           u32 next0;
387
388           pi0 = to_next[0] = from[0];
389           from += 1;
390           n_left_from -= 1;
391           to_next += 1;
392           n_left_to_next -= 1;
393
394           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
395           p0 = vlib_get_buffer (vm, pi0);
396
397           if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
398             {
399               p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
400               next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
401             }
402           else
403             {
404               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
405                 {
406                   //Send to fragmentation node if necessary
407                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
408                   vnet_buffer (p0)->ip_frag.header_offset = 0;
409                   vnet_buffer (p0)->ip_frag.next_index =
410                     IP4_FRAG_NEXT_IP4_LOOKUP;
411                   next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
412                 }
413             }
414
415           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
416                                            to_next, n_left_to_next, pi0,
417                                            next0);
418         }
419       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
420     }
421   return frame->n_vectors;
422 }
423
424 static uword
425 ip6_map_t_tcp_udp (vlib_main_t * vm,
426                    vlib_node_runtime_t * node, vlib_frame_t * frame)
427 {
428   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
429   vlib_node_runtime_t *error_node =
430     vlib_node_get_runtime (vm, ip6_map_t_tcp_udp_node.index);
431
432   from = vlib_frame_vector_args (frame);
433   n_left_from = frame->n_vectors;
434   next_index = node->cached_next_index;
435   while (n_left_from > 0)
436     {
437       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
438
439 #ifdef IP6_MAP_T_DUAL_LOOP
440       while (n_left_from >= 4 && n_left_to_next >= 2)
441         {
442           u32 pi0, pi1;
443           vlib_buffer_t *p0, *p1;
444           ip6_mapt_tcp_udp_next_t next0, next1;
445
446           pi0 = to_next[0] = from[0];
447           pi1 = to_next[1] = from[1];
448           from += 2;
449           n_left_from -= 2;
450           to_next += 2;
451           n_left_to_next -= 2;
452           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
453           next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
454
455           p0 = vlib_get_buffer (vm, pi0);
456           p1 = vlib_get_buffer (vm, pi1);
457
458           if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
459             {
460               p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
461               next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
462             }
463           else
464             {
465               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
466                 {
467                   //Send to fragmentation node if necessary
468                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
469                   vnet_buffer (p0)->ip_frag.header_offset = 0;
470                   vnet_buffer (p0)->ip_frag.next_index =
471                     IP4_FRAG_NEXT_IP4_LOOKUP;
472                   next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
473                 }
474             }
475
476           if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1))
477             {
478               p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
479               next1 = IP6_MAPT_TCP_UDP_NEXT_DROP;
480             }
481           else
482             {
483               if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
484                 {
485                   //Send to fragmentation node if necessary
486                   vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
487                   vnet_buffer (p1)->ip_frag.header_offset = 0;
488                   vnet_buffer (p1)->ip_frag.next_index =
489                     IP4_FRAG_NEXT_IP4_LOOKUP;
490                   next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
491                 }
492             }
493
494           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
495                                            n_left_to_next, pi0, pi1, next0,
496                                            next1);
497         }
498 #endif
499
500       while (n_left_from > 0 && n_left_to_next > 0)
501         {
502           u32 pi0;
503           vlib_buffer_t *p0;
504           ip6_mapt_tcp_udp_next_t next0;
505
506           pi0 = to_next[0] = from[0];
507           from += 1;
508           n_left_from -= 1;
509           to_next += 1;
510           n_left_to_next -= 1;
511           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
512
513           p0 = vlib_get_buffer (vm, pi0);
514
515           if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
516             {
517               p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
518               next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
519             }
520           else
521             {
522               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
523                 {
524                   //Send to fragmentation node if necessary
525                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
526                   vnet_buffer (p0)->ip_frag.header_offset = 0;
527                   vnet_buffer (p0)->ip_frag.next_index =
528                     IP4_FRAG_NEXT_IP4_LOOKUP;
529                   next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
530                 }
531             }
532
533           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
534                                            to_next, n_left_to_next, pi0,
535                                            next0);
536         }
537       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
538     }
539   return frame->n_vectors;
540 }
541
542 static_always_inline void
543 ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60,
544                     map_domain_t * d0, i32 * map_port0,
545                     u8 * error0, ip6_mapt_next_t * next0,
546                     u32 l4_len0, ip6_frag_hdr_t * frag0)
547 {
548   map_main_t *mm = &map_main;
549   u32 port_offset;
550
551   if (mm->is_ce)
552     port_offset = 2;
553   else
554     port_offset = 0;
555
556   if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
557                      ip6_frag_hdr_offset (frag0)))
558     {
559       *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
560       if (d0->ea_bits_len == 0 && d0->rules)
561         {
562           *map_port0 = 0;
563         }
564       else
565         {
566           *map_port0 = ip6_map_fragment_get (ip60, frag0, d0);
567           *error0 = (*map_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
568         }
569     }
570   else
571     if (PREDICT_TRUE
572         (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
573     {
574       *error0 =
575         l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
576       vnet_buffer (p0)->map_t.checksum_offset =
577         vnet_buffer (p0)->map_t.v6.l4_offset + 16;
578       *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
579       *map_port0 =
580         (i32) *
581         ((u16 *)
582          u8_ptr_add (ip60,
583                      vnet_buffer (p0)->map_t.v6.l4_offset + port_offset));
584     }
585   else
586     if (PREDICT_TRUE
587         (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
588     {
589       *error0 =
590         l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
591       vnet_buffer (p0)->map_t.checksum_offset =
592         vnet_buffer (p0)->map_t.v6.l4_offset + 6;
593       *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
594       *map_port0 =
595         (i32) *
596         ((u16 *)
597          u8_ptr_add (ip60,
598                      vnet_buffer (p0)->map_t.v6.l4_offset + port_offset));
599     }
600   else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
601     {
602       *error0 =
603         l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
604       *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
605       if (d0->ea_bits_len == 0 && d0->rules)
606         {
607           *map_port0 = 0;
608         }
609       else
610         if (((icmp46_header_t *)
611              u8_ptr_add (ip60,
612                          vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
613             ICMP6_echo_reply
614             || ((icmp46_header_t *)
615                 u8_ptr_add (ip60,
616                             vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
617             ICMP6_echo_request)
618         {
619           *map_port0 =
620             (i32) *
621             ((u16 *)
622              u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
623         }
624     }
625   else
626     {
627       //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
628       *error0 = MAP_ERROR_BAD_PROTOCOL;
629     }
630 }
631
632 static uword
633 ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
634 {
635   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
636   vlib_node_runtime_t *error_node =
637     vlib_node_get_runtime (vm, ip6_map_t_node.index);
638   map_main_t *mm = &map_main;
639   vlib_combined_counter_main_t *cm = map_main.domain_counters;
640   u32 thread_index = vm->thread_index;
641
642   from = vlib_frame_vector_args (frame);
643   n_left_from = frame->n_vectors;
644   next_index = node->cached_next_index;
645   while (n_left_from > 0)
646     {
647       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
648
649 #ifdef IP6_MAP_T_DUAL_LOOP
650       while (n_left_from >= 4 && n_left_to_next >= 2)
651         {
652           u32 pi0, pi1;
653           vlib_buffer_t *p0, *p1;
654           ip6_header_t *ip60, *ip61;
655           u8 error0, error1;
656           ip6_mapt_next_t next0, next1;
657           u32 l4_len0, l4_len1;
658           i32 map_port0, map_port1;
659           map_domain_t *d0, *d1;
660           ip6_frag_hdr_t *frag0, *frag1;
661           next0 = next1 = 0;    //Because compiler whines
662
663           pi0 = to_next[0] = from[0];
664           pi1 = to_next[1] = from[1];
665           from += 2;
666           n_left_from -= 2;
667           to_next += 2;
668           n_left_to_next -= 2;
669
670           error0 = MAP_ERROR_NONE;
671           error1 = MAP_ERROR_NONE;
672
673           p0 = vlib_get_buffer (vm, pi0);
674           p1 = vlib_get_buffer (vm, pi1);
675           ip60 = vlib_buffer_get_current (p0);
676           ip61 = vlib_buffer_get_current (p1);
677
678           if (mm->is_ce)
679             {
680               u32 daddr0, daddr1;
681               daddr0 = 0;       /* TODO */
682               daddr1 = 0;       /* TODO */
683               /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
684
685               daddr0 = map_get_ip4 (&ip60->dst_address, 0 /*TODO*/);
686               daddr1 = map_get_ip4 (&ip61->dst_address, 0 /*TODO*/);
687               d0 =
688                 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
689                                     (ip4_address_t *) & daddr0,
690                                     &vnet_buffer (p0)->map_t.map_domain_index,
691                                     &error0);
692               d1 =
693                 ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
694                                     (ip4_address_t *) & daddr1,
695                                     &vnet_buffer (p1)->map_t.map_domain_index,
696                                     &error1);
697
698               daddr0 = map_get_ip4 (&ip60->dst_address, d0->flags);
699               daddr1 = map_get_ip4 (&ip61->dst_address, d1->flags);
700
701               vnet_buffer (p0)->map_t.v6.daddr = daddr0;
702               vnet_buffer (p1)->map_t.v6.daddr = daddr1;
703               vnet_buffer (p0)->map_t.v6.saddr =
704                 ip6_map_t_embedded_address (d0, &ip60->src_address);
705               vnet_buffer (p1)->map_t.v6.saddr =
706                 ip6_map_t_embedded_address (d1, &ip61->src_address);
707             }
708           else
709             {
710               u32 saddr0, saddr1;
711               saddr0 = 0;       /* TODO */
712               saddr1 = 0;       /* TODO */
713               /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
714
715               saddr0 = map_get_ip4 (&ip60->src_address, 0 /*TODO*/);
716               saddr1 = map_get_ip4 (&ip61->src_address, 0 /*TODO*/);
717               d0 =
718                 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
719                                     (ip4_address_t *) & saddr0,
720                                     &vnet_buffer (p0)->map_t.map_domain_index,
721                                     &error0);
722               d1 =
723                 ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
724                                     (ip4_address_t *) & saddr1,
725                                     &vnet_buffer (p1)->map_t.map_domain_index,
726                                     &error1);
727
728               saddr0 = map_get_ip4 (&ip60->src_address, d0->flags);
729               saddr1 = map_get_ip4 (&ip61->src_address, d1->flags);
730
731               vnet_buffer (p0)->map_t.v6.saddr = saddr0;
732               vnet_buffer (p1)->map_t.v6.saddr = saddr1;
733               vnet_buffer (p0)->map_t.v6.daddr =
734                 ip6_map_t_embedded_address (d0, &ip60->dst_address);
735               vnet_buffer (p1)->map_t.v6.daddr =
736                 ip6_map_t_embedded_address (d1, &ip61->dst_address);
737             }
738
739           vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
740           vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
741
742           if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
743                                         &(vnet_buffer (p0)->map_t.
744                                           v6.l4_protocol),
745                                         &(vnet_buffer (p0)->map_t.
746                                           v6.l4_offset),
747                                         &(vnet_buffer (p0)->map_t.
748                                           v6.frag_offset))))
749             {
750               error0 = MAP_ERROR_MALFORMED;
751               next0 = IP6_MAPT_NEXT_DROP;
752             }
753
754           if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
755                                         &(vnet_buffer (p1)->map_t.
756                                           v6.l4_protocol),
757                                         &(vnet_buffer (p1)->map_t.
758                                           v6.l4_offset),
759                                         &(vnet_buffer (p1)->map_t.
760                                           v6.frag_offset))))
761             {
762               error1 = MAP_ERROR_MALFORMED;
763               next1 = IP6_MAPT_NEXT_DROP;
764             }
765
766           map_port0 = map_port1 = -1;
767           l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
768             sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
769           l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
770             sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
771           frag0 =
772             (ip6_frag_hdr_t *) u8_ptr_add (ip60,
773                                            vnet_buffer (p0)->map_t.
774                                            v6.frag_offset);
775           frag1 =
776             (ip6_frag_hdr_t *) u8_ptr_add (ip61,
777                                            vnet_buffer (p1)->map_t.
778                                            v6.frag_offset);
779
780           ip6_map_t_classify (p0, ip60, d0, &map_port0, &error0, &next0,
781                               l4_len0, frag0);
782           ip6_map_t_classify (p1, ip61, d1, &map_port1, &error1, &next1,
783                               l4_len1, frag1);
784
785           if (PREDICT_FALSE
786               ((map_port0 != -1)
787                && (ip60->src_address.as_u64[0] !=
788                    map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
789                                     map_port0)
790                    || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
791                                                                       vnet_buffer
792                                                                       (p0)->map_t.v6.saddr,
793                                                                       map_port0))))
794             {
795               error0 = MAP_ERROR_SEC_CHECK;
796             }
797
798           if (PREDICT_FALSE
799               ((map_port1 != -1)
800                && (ip61->src_address.as_u64[0] !=
801                    map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
802                                     map_port1)
803                    || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
804                                                                       vnet_buffer
805                                                                       (p1)->map_t.v6.saddr,
806                                                                       map_port1))))
807             {
808               error1 = MAP_ERROR_SEC_CHECK;
809             }
810
811           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
812                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
813                                                    u8_ptr_add (ip60,
814                                                                vnet_buffer
815                                                                (p0)->map_t.
816                                                                v6.frag_offset)))
817               && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
818               && (error0 == MAP_ERROR_NONE))
819             {
820               ip6_map_fragment_cache (ip60,
821                                       (ip6_frag_hdr_t *) u8_ptr_add (ip60,
822                                                                      vnet_buffer
823                                                                      (p0)->map_t.
824                                                                      v6.frag_offset),
825                                       d0, map_port0);
826             }
827
828           if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
829                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
830                                                    u8_ptr_add (ip61,
831                                                                vnet_buffer
832                                                                (p1)->map_t.
833                                                                v6.frag_offset)))
834               && (map_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
835               && (error1 == MAP_ERROR_NONE))
836             {
837               ip6_map_fragment_cache (ip61,
838                                       (ip6_frag_hdr_t *) u8_ptr_add (ip61,
839                                                                      vnet_buffer
840                                                                      (p1)->map_t.
841                                                                      v6.frag_offset),
842                                       d1, map_port1);
843             }
844
845           if (PREDICT_TRUE
846               (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
847             {
848               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
849                                                thread_index,
850                                                vnet_buffer (p0)->
851                                                map_t.map_domain_index, 1,
852                                                clib_net_to_host_u16
853                                                (ip60->payload_length));
854             }
855
856           if (PREDICT_TRUE
857               (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
858             {
859               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
860                                                thread_index,
861                                                vnet_buffer (p1)->
862                                                map_t.map_domain_index, 1,
863                                                clib_net_to_host_u16
864                                                (ip61->payload_length));
865             }
866
867           next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
868           next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
869           p0->error = error_node->errors[error0];
870           p1->error = error_node->errors[error1];
871           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
872                                            n_left_to_next, pi0, pi1, next0,
873                                            next1);
874         }
875 #endif
876
877       while (n_left_from > 0 && n_left_to_next > 0)
878         {
879           u32 pi0;
880           vlib_buffer_t *p0;
881           ip6_header_t *ip60;
882           u8 error0;
883           u32 l4_len0;
884           i32 map_port0;
885           map_domain_t *d0;
886           ip6_frag_hdr_t *frag0;
887           u32 port_offset;
888           ip6_mapt_next_t next0 = 0;
889
890           pi0 = to_next[0] = from[0];
891           from += 1;
892           n_left_from -= 1;
893           to_next += 1;
894           n_left_to_next -= 1;
895           error0 = MAP_ERROR_NONE;
896
897           p0 = vlib_get_buffer (vm, pi0);
898           ip60 = vlib_buffer_get_current (p0);
899
900           if (mm->is_ce)
901             {
902               u32 daddr;
903               //Save daddr in a different variable to not overwrite ip.adj_index
904               daddr = 0;        /* TODO */
905               /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
906
907               daddr = map_get_ip4 (&ip60->dst_address, 0 /*TODO*/);
908               d0 =
909                 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
910                                     (ip4_address_t *) & daddr,
911                                     &vnet_buffer (p0)->map_t.map_domain_index,
912                                     &error0);
913
914               daddr = map_get_ip4 (&ip60->dst_address, d0->flags);
915
916               //FIXME: What if d0 is null
917               vnet_buffer (p0)->map_t.v6.daddr = daddr;
918               vnet_buffer (p0)->map_t.v6.saddr =
919                 ip6_map_t_embedded_address (d0, &ip60->src_address);
920
921               port_offset = 2;
922             }
923           else
924             {
925               u32 saddr;
926               //Save saddr in a different variable to not overwrite ip.adj_index
927               saddr = 0;        /* TODO */
928               /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
929
930               saddr = map_get_ip4 (&ip60->src_address, 0 /*TODO*/);
931               d0 =
932                 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
933                                     (ip4_address_t *) & saddr,
934                                     &vnet_buffer (p0)->map_t.map_domain_index,
935                                     &error0);
936
937               saddr = map_get_ip4 (&ip60->src_address, d0->flags);
938
939               //FIXME: What if d0 is null
940               vnet_buffer (p0)->map_t.v6.saddr = saddr;
941               vnet_buffer (p0)->map_t.v6.daddr =
942                 ip6_map_t_embedded_address (d0, &ip60->dst_address);
943
944               port_offset = 0;
945             }
946
947           vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
948
949           if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
950                                         &(vnet_buffer (p0)->map_t.
951                                           v6.l4_protocol),
952                                         &(vnet_buffer (p0)->map_t.
953                                           v6.l4_offset),
954                                         &(vnet_buffer (p0)->map_t.
955                                           v6.frag_offset))))
956             {
957               error0 = MAP_ERROR_MALFORMED;
958               next0 = IP6_MAPT_NEXT_DROP;
959             }
960
961           map_port0 = -1;
962           l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
963             sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
964           frag0 =
965             (ip6_frag_hdr_t *) u8_ptr_add (ip60,
966                                            vnet_buffer (p0)->map_t.
967                                            v6.frag_offset);
968
969
970           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
971                              ip6_frag_hdr_offset (frag0)))
972             {
973               map_port0 = ip6_map_fragment_get (ip60, frag0, d0);
974               error0 = (map_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
975               next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
976             }
977           else
978             if (PREDICT_TRUE
979                 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
980             {
981               error0 =
982                 l4_len0 <
983                 sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
984               vnet_buffer (p0)->map_t.checksum_offset =
985                 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
986               next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
987               map_port0 =
988                 (i32) *
989                 ((u16 *)
990                  u8_ptr_add (ip60,
991                              vnet_buffer (p0)->map_t.v6.l4_offset +
992                              port_offset));
993             }
994           else
995             if (PREDICT_TRUE
996                 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
997             {
998               error0 =
999                 l4_len0 <
1000                 sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
1001               vnet_buffer (p0)->map_t.checksum_offset =
1002                 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
1003               next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
1004               map_port0 =
1005                 (i32) *
1006                 ((u16 *)
1007                  u8_ptr_add (ip60,
1008                              vnet_buffer (p0)->map_t.v6.l4_offset +
1009                              port_offset));
1010             }
1011           else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
1012                    IP_PROTOCOL_ICMP6)
1013             {
1014               error0 =
1015                 l4_len0 <
1016                 sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
1017               next0 = IP6_MAPT_NEXT_MAPT_ICMP;
1018               if (((icmp46_header_t *)
1019                    u8_ptr_add (ip60,
1020                                vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
1021                   ICMP6_echo_reply
1022                   || ((icmp46_header_t *)
1023                       u8_ptr_add (ip60,
1024                                   vnet_buffer (p0)->map_t.v6.
1025                                   l4_offset))->code == ICMP6_echo_request)
1026                 map_port0 =
1027                   (i32) *
1028                   ((u16 *)
1029                    u8_ptr_add (ip60,
1030                                vnet_buffer (p0)->map_t.v6.l4_offset + 6));
1031             }
1032           else
1033             {
1034               //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
1035               error0 = MAP_ERROR_BAD_PROTOCOL;
1036             }
1037
1038           //Security check
1039           if (PREDICT_FALSE
1040               ((!mm->is_ce) && (map_port0 != -1)
1041                && (ip60->src_address.as_u64[0] !=
1042                    map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
1043                                     map_port0)
1044                    || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
1045                                                                       vnet_buffer
1046                                                                       (p0)->map_t.v6.saddr,
1047                                                                       map_port0))))
1048             {
1049               //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
1050               error0 = MAP_ERROR_SEC_CHECK;
1051             }
1052
1053           //Fragmented first packet needs to be cached for following packets
1054           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
1055                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
1056                                                    u8_ptr_add (ip60,
1057                                                                vnet_buffer
1058                                                                (p0)->map_t.
1059                                                                v6.frag_offset)))
1060               && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
1061               && (error0 == MAP_ERROR_NONE))
1062             {
1063               ip6_map_fragment_cache (ip60,
1064                                       (ip6_frag_hdr_t *) u8_ptr_add (ip60,
1065                                                                      vnet_buffer
1066                                                                      (p0)->map_t.
1067                                                                      v6.frag_offset),
1068                                       d0, map_port0);
1069             }
1070
1071           if (PREDICT_TRUE
1072               (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
1073             {
1074               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
1075                                                thread_index,
1076                                                vnet_buffer (p0)->
1077                                                map_t.map_domain_index, 1,
1078                                                clib_net_to_host_u16
1079                                                (ip60->payload_length));
1080             }
1081
1082           next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
1083           p0->error = error_node->errors[error0];
1084           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1085                                            to_next, n_left_to_next, pi0,
1086                                            next0);
1087         }
1088       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1089     }
1090   return frame->n_vectors;
1091 }
1092
1093 static char *map_t_error_strings[] = {
1094 #define _(sym,string) string,
1095   foreach_map_error
1096 #undef _
1097 };
1098
1099 /* *INDENT-OFF* */
1100 VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
1101   .function = ip6_map_t_fragmented,
1102   .name = "ip6-map-t-fragmented",
1103   .vector_size = sizeof (u32),
1104   .format_trace = format_map_trace,
1105   .type = VLIB_NODE_TYPE_INTERNAL,
1106
1107   .n_errors = MAP_N_ERROR,
1108   .error_strings = map_t_error_strings,
1109
1110   .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
1111   .next_nodes = {
1112       [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
1113       [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1114       [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
1115   },
1116 };
1117 /* *INDENT-ON* */
1118
1119 /* *INDENT-OFF* */
1120 VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
1121   .function = ip6_map_t_icmp,
1122   .name = "ip6-map-t-icmp",
1123   .vector_size = sizeof (u32),
1124   .format_trace = format_map_trace,
1125   .type = VLIB_NODE_TYPE_INTERNAL,
1126
1127   .n_errors = MAP_N_ERROR,
1128   .error_strings = map_t_error_strings,
1129
1130   .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
1131   .next_nodes = {
1132       [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1133       [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1134       [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
1135   },
1136 };
1137 /* *INDENT-ON* */
1138
1139 /* *INDENT-OFF* */
1140 VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
1141   .function = ip6_map_t_tcp_udp,
1142   .name = "ip6-map-t-tcp-udp",
1143   .vector_size = sizeof (u32),
1144   .format_trace = format_map_trace,
1145   .type = VLIB_NODE_TYPE_INTERNAL,
1146
1147   .n_errors = MAP_N_ERROR,
1148   .error_strings = map_t_error_strings,
1149
1150   .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1151   .next_nodes = {
1152       [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1153       [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1154       [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1155   },
1156 };
1157 /* *INDENT-ON* */
1158
1159 /* *INDENT-OFF* */
1160 VLIB_REGISTER_NODE(ip6_map_t_node) = {
1161   .function = ip6_map_t,
1162   .name = "ip6-map-t",
1163   .vector_size = sizeof(u32),
1164   .format_trace = format_map_trace,
1165   .type = VLIB_NODE_TYPE_INTERNAL,
1166
1167   .n_errors = MAP_N_ERROR,
1168   .error_strings = map_t_error_strings,
1169
1170   .n_next_nodes = IP6_MAPT_N_NEXT,
1171   .next_nodes = {
1172       [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1173       [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1174       [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1175       [IP6_MAPT_NEXT_DROP] = "error-drop",
1176   },
1177 };
1178 /* *INDENT-ON* */
1179
1180 /*
1181  * fd.io coding-style-patch-verification: ON
1182  *
1183  * Local Variables:
1184  * eval: (c-set-style "gnu")
1185  * End:
1186  */