IPIP and IPv6 fragmentation
[vpp.git] / src / plugins / map / ip6_map_t.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip4_to_ip6.h>
20
21 #define IP6_MAP_T_DUAL_LOOP
22
23 typedef enum
24 {
25   IP6_MAPT_NEXT_MAPT_TCP_UDP,
26   IP6_MAPT_NEXT_MAPT_ICMP,
27   IP6_MAPT_NEXT_MAPT_FRAGMENTED,
28   IP6_MAPT_NEXT_DROP,
29   IP6_MAPT_N_NEXT
30 } ip6_mapt_next_t;
31
32 typedef enum
33 {
34   IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
35   IP6_MAPT_ICMP_NEXT_IP4_FRAG,
36   IP6_MAPT_ICMP_NEXT_DROP,
37   IP6_MAPT_ICMP_N_NEXT
38 } ip6_mapt_icmp_next_t;
39
40 typedef enum
41 {
42   IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
43   IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
44   IP6_MAPT_TCP_UDP_NEXT_DROP,
45   IP6_MAPT_TCP_UDP_N_NEXT
46 } ip6_mapt_tcp_udp_next_t;
47
48 typedef enum
49 {
50   IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
51   IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
52   IP6_MAPT_FRAGMENTED_NEXT_DROP,
53   IP6_MAPT_FRAGMENTED_N_NEXT
54 } ip6_mapt_fragmented_next_t;
55
56 static_always_inline int
57 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
58                         map_domain_t * d, u16 port)
59 {
60   u32 *ignore = NULL;
61   map_ip4_reass_lock ();
62   map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
63                                                        d->flags),
64                                           ip6_map_t_embedded_address (d,
65                                                                       &ip6->
66                                                                       dst_address),
67                                           frag_id_6to4 (frag->identification),
68                                           (ip6->protocol ==
69                                            IP_PROTOCOL_ICMP6) ?
70                                           IP_PROTOCOL_ICMP : ip6->protocol,
71                                           &ignore);
72   if (r)
73     r->port = port;
74
75   map_ip4_reass_unlock ();
76   return !r;
77 }
78
79 /* Returns the associated port or -1 */
80 static_always_inline i32
81 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
82                       map_domain_t * d)
83 {
84   u32 *ignore = NULL;
85   map_ip4_reass_lock ();
86   map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
87                                                        d->flags),
88                                           ip6_map_t_embedded_address (d,
89                                                                       &ip6->
90                                                                       dst_address),
91                                           frag_id_6to4 (frag->identification),
92                                           (ip6->protocol ==
93                                            IP_PROTOCOL_ICMP6) ?
94                                           IP_PROTOCOL_ICMP : ip6->protocol,
95                                           &ignore);
96   i32 ret = r ? r->port : -1;
97   map_ip4_reass_unlock ();
98   return ret;
99 }
100
101 typedef struct
102 {
103   map_domain_t *d;
104   u16 id;
105 } icmp6_to_icmp_ctx_t;
106
107 static int
108 ip6_to_ip4_set_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
109 {
110   icmp6_to_icmp_ctx_t *ctx = arg;
111   map_main_t *mm = &map_main;
112
113   if (mm->is_ce)
114     {
115       u32 ip4_dadr;
116
117       //Security check
118       //Note that this prevents an intermediate IPv6 router from answering the request
119       ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
120       if (ip6->dst_address.as_u64[0] !=
121           map_get_pfx_net (ctx->d, ip4_dadr, ctx->id)
122           || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_dadr,
123                                                             ctx->id))
124         return -1;
125
126       ip4->src_address.as_u32 =
127         ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
128       ip4->dst_address.as_u32 = ip4_dadr;
129     }
130   else
131     {
132       u32 ip4_sadr;
133
134       //Security check
135       //Note that this prevents an intermediate IPv6 router from answering the request
136       ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
137       if (ip6->src_address.as_u64[0] !=
138           map_get_pfx_net (ctx->d, ip4_sadr, ctx->id)
139           || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
140                                                             ctx->id))
141         return -1;
142
143       ip4->dst_address.as_u32 =
144         ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
145       ip4->src_address.as_u32 = ip4_sadr;
146     }
147
148   return 0;
149 }
150
151 static int
152 ip6_to_ip4_set_inner_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4,
153                               void *arg)
154 {
155   icmp6_to_icmp_ctx_t *ctx = arg;
156   map_main_t *mm = &map_main;
157
158   if (mm->is_ce)
159     {
160       u32 inner_ip4_sadr;
161
162       //Security check of inner packet
163       inner_ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
164       if (ip6->src_address.as_u64[0] !=
165           map_get_pfx_net (ctx->d, inner_ip4_sadr, ctx->id)
166           || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d,
167                                                             inner_ip4_sadr,
168                                                             ctx->id))
169         return -1;
170
171       ip4->src_address.as_u32 = inner_ip4_sadr;
172       ip4->dst_address.as_u32 =
173         ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
174     }
175   else
176     {
177       u32 inner_ip4_dadr;
178
179       //Security check of inner packet
180       inner_ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
181       if (ip6->dst_address.as_u64[0] !=
182           map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->id)
183           || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
184                                                             inner_ip4_dadr,
185                                                             ctx->id))
186         return -1;
187
188       ip4->dst_address.as_u32 = inner_ip4_dadr;
189       ip4->src_address.as_u32 =
190         ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
191     }
192
193   return 0;
194 }
195
196 static uword
197 ip6_map_t_icmp (vlib_main_t * vm,
198                 vlib_node_runtime_t * node, vlib_frame_t * frame)
199 {
200   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
201   vlib_node_runtime_t *error_node =
202     vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
203   from = vlib_frame_vector_args (frame);
204   n_left_from = frame->n_vectors;
205   next_index = node->cached_next_index;
206   vlib_combined_counter_main_t *cm = map_main.domain_counters;
207   u32 thread_index = vm->thread_index;
208
209   while (n_left_from > 0)
210     {
211       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
212
213       while (n_left_from > 0 && n_left_to_next > 0)
214         {
215           u32 pi0;
216           vlib_buffer_t *p0;
217           u8 error0;
218           ip6_mapt_icmp_next_t next0;
219           map_domain_t *d0;
220           u16 len0;
221           icmp6_to_icmp_ctx_t ctx0;
222           ip6_header_t *ip60;
223           icmp46_header_t *icmp0;
224
225           pi0 = to_next[0] = from[0];
226           from += 1;
227           n_left_from -= 1;
228           to_next += 1;
229           n_left_to_next -= 1;
230           error0 = MAP_ERROR_NONE;
231           next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
232
233           p0 = vlib_get_buffer (vm, pi0);
234           ip60 = vlib_buffer_get_current (p0);
235           len0 = clib_net_to_host_u16 (ip60->payload_length);
236           icmp0 = (icmp46_header_t *) (ip60 + 1);
237           d0 =
238             pool_elt_at_index (map_main.domains,
239                                vnet_buffer (p0)->map_t.map_domain_index);
240
241           ctx0.id =
242             ip6_get_port (ip60, icmp0->type == ICMP6_echo_request,
243                           p0->current_length);
244           ctx0.d = d0;
245           if (ctx0.id == 0)
246             {
247               // In case of 1:1 mapping, we don't care about the port
248               if (!(d0->ea_bits_len == 0 && d0->rules))
249                 {
250                   error0 = MAP_ERROR_ICMP;
251                   goto err0;
252                 }
253             }
254
255           if (icmp6_to_icmp
256               (p0, ip6_to_ip4_set_icmp_cb, &ctx0,
257                ip6_to_ip4_set_inner_icmp_cb, &ctx0))
258             {
259               error0 = MAP_ERROR_ICMP;
260               goto err0;
261             }
262
263           if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
264             {
265               //Send to fragmentation node if necessary
266               vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
267               vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
268               next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
269             }
270         err0:
271           if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
272             {
273               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
274                                                thread_index,
275                                                vnet_buffer (p0)->
276                                                map_t.map_domain_index, 1,
277                                                len0);
278             }
279           else
280             {
281               next0 = IP6_MAPT_ICMP_NEXT_DROP;
282             }
283
284           p0->error = error_node->errors[error0];
285           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
286                                            to_next, n_left_to_next, pi0,
287                                            next0);
288         }
289       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
290     }
291   return frame->n_vectors;
292 }
293
294 static int
295 ip6_to_ip4_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *ctx)
296 {
297   vlib_buffer_t *p = ctx;
298
299   ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
300   ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
301
302   return 0;
303 }
304
305 static uword
306 ip6_map_t_fragmented (vlib_main_t * vm,
307                       vlib_node_runtime_t * node, vlib_frame_t * frame)
308 {
309   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
310   from = vlib_frame_vector_args (frame);
311   n_left_from = frame->n_vectors;
312   next_index = node->cached_next_index;
313   vlib_node_runtime_t *error_node =
314     vlib_node_get_runtime (vm, ip6_map_t_fragmented_node.index);
315
316   while (n_left_from > 0)
317     {
318       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
319
320 #ifdef IP6_MAP_T_DUAL_LOOP
321       while (n_left_from >= 4 && n_left_to_next >= 2)
322         {
323           u32 pi0, pi1;
324           vlib_buffer_t *p0, *p1;
325           u32 next0, next1;
326
327           pi0 = to_next[0] = from[0];
328           pi1 = to_next[1] = from[1];
329           from += 2;
330           n_left_from -= 2;
331           to_next += 2;
332           n_left_to_next -= 2;
333
334           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
335           next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
336           p0 = vlib_get_buffer (vm, pi0);
337           p1 = vlib_get_buffer (vm, pi1);
338
339           if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
340             {
341               p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
342               next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
343             }
344           else
345             {
346               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
347                 {
348                   //Send to fragmentation node if necessary
349                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
350                   vnet_buffer (p0)->ip_frag.next_index =
351                     IP4_FRAG_NEXT_IP4_LOOKUP;
352                   next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
353                 }
354             }
355
356           if (ip6_to_ip4_fragmented (p1, ip6_to_ip4_set_cb, p1))
357             {
358               p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
359               next1 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
360             }
361           else
362             {
363               if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
364                 {
365                   //Send to fragmentation node if necessary
366                   vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
367                   vnet_buffer (p1)->ip_frag.next_index =
368                     IP4_FRAG_NEXT_IP4_LOOKUP;
369                   next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
370                 }
371             }
372
373           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
374                                            to_next, n_left_to_next, pi0, pi1,
375                                            next0, next1);
376         }
377 #endif
378
379       while (n_left_from > 0 && n_left_to_next > 0)
380         {
381           u32 pi0;
382           vlib_buffer_t *p0;
383           u32 next0;
384
385           pi0 = to_next[0] = from[0];
386           from += 1;
387           n_left_from -= 1;
388           to_next += 1;
389           n_left_to_next -= 1;
390
391           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
392           p0 = vlib_get_buffer (vm, pi0);
393
394           if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
395             {
396               p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
397               next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
398             }
399           else
400             {
401               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
402                 {
403                   //Send to fragmentation node if necessary
404                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
405                   vnet_buffer (p0)->ip_frag.next_index =
406                     IP4_FRAG_NEXT_IP4_LOOKUP;
407                   next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
408                 }
409             }
410
411           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
412                                            to_next, n_left_to_next, pi0,
413                                            next0);
414         }
415       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
416     }
417   return frame->n_vectors;
418 }
419
420 static uword
421 ip6_map_t_tcp_udp (vlib_main_t * vm,
422                    vlib_node_runtime_t * node, vlib_frame_t * frame)
423 {
424   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
425   vlib_node_runtime_t *error_node =
426     vlib_node_get_runtime (vm, ip6_map_t_tcp_udp_node.index);
427
428   from = vlib_frame_vector_args (frame);
429   n_left_from = frame->n_vectors;
430   next_index = node->cached_next_index;
431   while (n_left_from > 0)
432     {
433       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
434
435 #ifdef IP6_MAP_T_DUAL_LOOP
436       while (n_left_from >= 4 && n_left_to_next >= 2)
437         {
438           u32 pi0, pi1;
439           vlib_buffer_t *p0, *p1;
440           ip6_mapt_tcp_udp_next_t next0, next1;
441
442           pi0 = to_next[0] = from[0];
443           pi1 = to_next[1] = from[1];
444           from += 2;
445           n_left_from -= 2;
446           to_next += 2;
447           n_left_to_next -= 2;
448           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
449           next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
450
451           p0 = vlib_get_buffer (vm, pi0);
452           p1 = vlib_get_buffer (vm, pi1);
453
454           if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
455             {
456               p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
457               next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
458             }
459           else
460             {
461               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
462                 {
463                   //Send to fragmentation node if necessary
464                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
465                   vnet_buffer (p0)->ip_frag.next_index =
466                     IP4_FRAG_NEXT_IP4_LOOKUP;
467                   next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
468                 }
469             }
470
471           if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1))
472             {
473               p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
474               next1 = IP6_MAPT_TCP_UDP_NEXT_DROP;
475             }
476           else
477             {
478               if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
479                 {
480                   //Send to fragmentation node if necessary
481                   vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
482                   vnet_buffer (p1)->ip_frag.next_index =
483                     IP4_FRAG_NEXT_IP4_LOOKUP;
484                   next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
485                 }
486             }
487
488           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
489                                            n_left_to_next, pi0, pi1, next0,
490                                            next1);
491         }
492 #endif
493
494       while (n_left_from > 0 && n_left_to_next > 0)
495         {
496           u32 pi0;
497           vlib_buffer_t *p0;
498           ip6_mapt_tcp_udp_next_t next0;
499
500           pi0 = to_next[0] = from[0];
501           from += 1;
502           n_left_from -= 1;
503           to_next += 1;
504           n_left_to_next -= 1;
505           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
506
507           p0 = vlib_get_buffer (vm, pi0);
508
509           if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
510             {
511               p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
512               next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
513             }
514           else
515             {
516               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
517                 {
518                   //Send to fragmentation node if necessary
519                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
520                   vnet_buffer (p0)->ip_frag.next_index =
521                     IP4_FRAG_NEXT_IP4_LOOKUP;
522                   next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
523                 }
524             }
525
526           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
527                                            to_next, n_left_to_next, pi0,
528                                            next0);
529         }
530       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
531     }
532   return frame->n_vectors;
533 }
534
535 static_always_inline void
536 ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60,
537                     map_domain_t * d0, i32 * map_port0,
538                     u8 * error0, ip6_mapt_next_t * next0,
539                     u32 l4_len0, ip6_frag_hdr_t * frag0)
540 {
541   map_main_t *mm = &map_main;
542   u32 port_offset;
543
544   if (mm->is_ce)
545     port_offset = 2;
546   else
547     port_offset = 0;
548
549   if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
550                      ip6_frag_hdr_offset (frag0)))
551     {
552       *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
553       if (d0->ea_bits_len == 0 && d0->rules)
554         {
555           *map_port0 = 0;
556         }
557       else
558         {
559           *map_port0 = ip6_map_fragment_get (ip60, frag0, d0);
560           *error0 = (*map_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
561         }
562     }
563   else
564     if (PREDICT_TRUE
565         (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
566     {
567       *error0 =
568         l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
569       vnet_buffer (p0)->map_t.checksum_offset =
570         vnet_buffer (p0)->map_t.v6.l4_offset + 16;
571       *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
572       *map_port0 =
573         (i32) *
574         ((u16 *)
575          u8_ptr_add (ip60,
576                      vnet_buffer (p0)->map_t.v6.l4_offset + port_offset));
577     }
578   else
579     if (PREDICT_TRUE
580         (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
581     {
582       *error0 =
583         l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
584       vnet_buffer (p0)->map_t.checksum_offset =
585         vnet_buffer (p0)->map_t.v6.l4_offset + 6;
586       *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
587       *map_port0 =
588         (i32) *
589         ((u16 *)
590          u8_ptr_add (ip60,
591                      vnet_buffer (p0)->map_t.v6.l4_offset + port_offset));
592     }
593   else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
594     {
595       *error0 =
596         l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
597       *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
598       if (d0->ea_bits_len == 0 && d0->rules)
599         {
600           *map_port0 = 0;
601         }
602       else
603         if (((icmp46_header_t *)
604              u8_ptr_add (ip60,
605                          vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
606             ICMP6_echo_reply
607             || ((icmp46_header_t *)
608                 u8_ptr_add (ip60,
609                             vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
610             ICMP6_echo_request)
611         {
612           *map_port0 =
613             (i32) *
614             ((u16 *)
615              u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
616         }
617     }
618   else
619     {
620       //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
621       *error0 = MAP_ERROR_BAD_PROTOCOL;
622     }
623 }
624
625 static uword
626 ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
627 {
628   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
629   vlib_node_runtime_t *error_node =
630     vlib_node_get_runtime (vm, ip6_map_t_node.index);
631   map_main_t *mm = &map_main;
632   vlib_combined_counter_main_t *cm = map_main.domain_counters;
633   u32 thread_index = vm->thread_index;
634
635   from = vlib_frame_vector_args (frame);
636   n_left_from = frame->n_vectors;
637   next_index = node->cached_next_index;
638   while (n_left_from > 0)
639     {
640       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
641
642 #ifdef IP6_MAP_T_DUAL_LOOP
643       while (n_left_from >= 4 && n_left_to_next >= 2)
644         {
645           u32 pi0, pi1;
646           vlib_buffer_t *p0, *p1;
647           ip6_header_t *ip60, *ip61;
648           u8 error0, error1;
649           ip6_mapt_next_t next0, next1;
650           u32 l4_len0, l4_len1;
651           i32 map_port0, map_port1;
652           map_domain_t *d0, *d1;
653           ip6_frag_hdr_t *frag0, *frag1;
654           next0 = next1 = 0;    //Because compiler whines
655
656           pi0 = to_next[0] = from[0];
657           pi1 = to_next[1] = from[1];
658           from += 2;
659           n_left_from -= 2;
660           to_next += 2;
661           n_left_to_next -= 2;
662
663           error0 = MAP_ERROR_NONE;
664           error1 = MAP_ERROR_NONE;
665
666           p0 = vlib_get_buffer (vm, pi0);
667           p1 = vlib_get_buffer (vm, pi1);
668           ip60 = vlib_buffer_get_current (p0);
669           ip61 = vlib_buffer_get_current (p1);
670
671           if (mm->is_ce)
672             {
673               u32 daddr0, daddr1;
674               daddr0 = 0;       /* TODO */
675               daddr1 = 0;       /* TODO */
676               /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
677
678               daddr0 = map_get_ip4 (&ip60->dst_address, 0 /*TODO*/);
679               daddr1 = map_get_ip4 (&ip61->dst_address, 0 /*TODO*/);
680               d0 =
681                 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
682                                     (ip4_address_t *) & daddr0,
683                                     &vnet_buffer (p0)->map_t.map_domain_index,
684                                     &error0);
685               d1 =
686                 ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
687                                     (ip4_address_t *) & daddr1,
688                                     &vnet_buffer (p1)->map_t.map_domain_index,
689                                     &error1);
690
691               daddr0 = map_get_ip4 (&ip60->dst_address, d0->flags);
692               daddr1 = map_get_ip4 (&ip61->dst_address, d1->flags);
693
694               vnet_buffer (p0)->map_t.v6.daddr = daddr0;
695               vnet_buffer (p1)->map_t.v6.daddr = daddr1;
696               vnet_buffer (p0)->map_t.v6.saddr =
697                 ip6_map_t_embedded_address (d0, &ip60->src_address);
698               vnet_buffer (p1)->map_t.v6.saddr =
699                 ip6_map_t_embedded_address (d1, &ip61->src_address);
700             }
701           else
702             {
703               u32 saddr0, saddr1;
704               saddr0 = 0;       /* TODO */
705               saddr1 = 0;       /* TODO */
706               /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
707
708               saddr0 = map_get_ip4 (&ip60->src_address, 0 /*TODO*/);
709               saddr1 = map_get_ip4 (&ip61->src_address, 0 /*TODO*/);
710               d0 =
711                 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
712                                     (ip4_address_t *) & saddr0,
713                                     &vnet_buffer (p0)->map_t.map_domain_index,
714                                     &error0);
715               d1 =
716                 ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
717                                     (ip4_address_t *) & saddr1,
718                                     &vnet_buffer (p1)->map_t.map_domain_index,
719                                     &error1);
720
721               saddr0 = map_get_ip4 (&ip60->src_address, d0->flags);
722               saddr1 = map_get_ip4 (&ip61->src_address, d1->flags);
723
724               vnet_buffer (p0)->map_t.v6.saddr = saddr0;
725               vnet_buffer (p1)->map_t.v6.saddr = saddr1;
726               vnet_buffer (p0)->map_t.v6.daddr =
727                 ip6_map_t_embedded_address (d0, &ip60->dst_address);
728               vnet_buffer (p1)->map_t.v6.daddr =
729                 ip6_map_t_embedded_address (d1, &ip61->dst_address);
730             }
731
732           vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
733           vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
734
735           if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
736                                         &(vnet_buffer (p0)->map_t.
737                                           v6.l4_protocol),
738                                         &(vnet_buffer (p0)->map_t.
739                                           v6.l4_offset),
740                                         &(vnet_buffer (p0)->map_t.
741                                           v6.frag_offset))))
742             {
743               error0 = MAP_ERROR_MALFORMED;
744               next0 = IP6_MAPT_NEXT_DROP;
745             }
746
747           if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
748                                         &(vnet_buffer (p1)->map_t.
749                                           v6.l4_protocol),
750                                         &(vnet_buffer (p1)->map_t.
751                                           v6.l4_offset),
752                                         &(vnet_buffer (p1)->map_t.
753                                           v6.frag_offset))))
754             {
755               error1 = MAP_ERROR_MALFORMED;
756               next1 = IP6_MAPT_NEXT_DROP;
757             }
758
759           map_port0 = map_port1 = -1;
760           l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
761             sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
762           l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
763             sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
764           frag0 =
765             (ip6_frag_hdr_t *) u8_ptr_add (ip60,
766                                            vnet_buffer (p0)->map_t.
767                                            v6.frag_offset);
768           frag1 =
769             (ip6_frag_hdr_t *) u8_ptr_add (ip61,
770                                            vnet_buffer (p1)->map_t.
771                                            v6.frag_offset);
772
773           ip6_map_t_classify (p0, ip60, d0, &map_port0, &error0, &next0,
774                               l4_len0, frag0);
775           ip6_map_t_classify (p1, ip61, d1, &map_port1, &error1, &next1,
776                               l4_len1, frag1);
777
778           if (PREDICT_FALSE
779               ((map_port0 != -1)
780                && (ip60->src_address.as_u64[0] !=
781                    map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
782                                     map_port0)
783                    || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
784                                                                       vnet_buffer
785                                                                       (p0)->map_t.v6.saddr,
786                                                                       map_port0))))
787             {
788               error0 = MAP_ERROR_SEC_CHECK;
789             }
790
791           if (PREDICT_FALSE
792               ((map_port1 != -1)
793                && (ip61->src_address.as_u64[0] !=
794                    map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
795                                     map_port1)
796                    || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
797                                                                       vnet_buffer
798                                                                       (p1)->map_t.v6.saddr,
799                                                                       map_port1))))
800             {
801               error1 = MAP_ERROR_SEC_CHECK;
802             }
803
804           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
805                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
806                                                    u8_ptr_add (ip60,
807                                                                vnet_buffer
808                                                                (p0)->map_t.
809                                                                v6.frag_offset)))
810               && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
811               && (error0 == MAP_ERROR_NONE))
812             {
813               ip6_map_fragment_cache (ip60,
814                                       (ip6_frag_hdr_t *) u8_ptr_add (ip60,
815                                                                      vnet_buffer
816                                                                      (p0)->map_t.
817                                                                      v6.frag_offset),
818                                       d0, map_port0);
819             }
820
821           if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
822                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
823                                                    u8_ptr_add (ip61,
824                                                                vnet_buffer
825                                                                (p1)->map_t.
826                                                                v6.frag_offset)))
827               && (map_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
828               && (error1 == MAP_ERROR_NONE))
829             {
830               ip6_map_fragment_cache (ip61,
831                                       (ip6_frag_hdr_t *) u8_ptr_add (ip61,
832                                                                      vnet_buffer
833                                                                      (p1)->map_t.
834                                                                      v6.frag_offset),
835                                       d1, map_port1);
836             }
837
838           if (PREDICT_TRUE
839               (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
840             {
841               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
842                                                thread_index,
843                                                vnet_buffer (p0)->
844                                                map_t.map_domain_index, 1,
845                                                clib_net_to_host_u16
846                                                (ip60->payload_length));
847             }
848
849           if (PREDICT_TRUE
850               (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
851             {
852               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
853                                                thread_index,
854                                                vnet_buffer (p1)->
855                                                map_t.map_domain_index, 1,
856                                                clib_net_to_host_u16
857                                                (ip61->payload_length));
858             }
859
860           next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
861           next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
862           p0->error = error_node->errors[error0];
863           p1->error = error_node->errors[error1];
864           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
865                                            n_left_to_next, pi0, pi1, next0,
866                                            next1);
867         }
868 #endif
869
870       while (n_left_from > 0 && n_left_to_next > 0)
871         {
872           u32 pi0;
873           vlib_buffer_t *p0;
874           ip6_header_t *ip60;
875           u8 error0;
876           u32 l4_len0;
877           i32 map_port0;
878           map_domain_t *d0;
879           ip6_frag_hdr_t *frag0;
880           u32 port_offset;
881           ip6_mapt_next_t next0 = 0;
882
883           pi0 = to_next[0] = from[0];
884           from += 1;
885           n_left_from -= 1;
886           to_next += 1;
887           n_left_to_next -= 1;
888           error0 = MAP_ERROR_NONE;
889
890           p0 = vlib_get_buffer (vm, pi0);
891           ip60 = vlib_buffer_get_current (p0);
892
893           if (mm->is_ce)
894             {
895               u32 daddr;
896               //Save daddr in a different variable to not overwrite ip.adj_index
897               daddr = 0;        /* TODO */
898               /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
899
900               daddr = map_get_ip4 (&ip60->dst_address, 0 /*TODO*/);
901               d0 =
902                 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
903                                     (ip4_address_t *) & daddr,
904                                     &vnet_buffer (p0)->map_t.map_domain_index,
905                                     &error0);
906
907               daddr = map_get_ip4 (&ip60->dst_address, d0->flags);
908
909               //FIXME: What if d0 is null
910               vnet_buffer (p0)->map_t.v6.daddr = daddr;
911               vnet_buffer (p0)->map_t.v6.saddr =
912                 ip6_map_t_embedded_address (d0, &ip60->src_address);
913
914               port_offset = 2;
915             }
916           else
917             {
918               u32 saddr;
919               //Save saddr in a different variable to not overwrite ip.adj_index
920               saddr = 0;        /* TODO */
921               /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
922
923               saddr = map_get_ip4 (&ip60->src_address, 0 /*TODO*/);
924               d0 =
925                 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
926                                     (ip4_address_t *) & saddr,
927                                     &vnet_buffer (p0)->map_t.map_domain_index,
928                                     &error0);
929
930               saddr = map_get_ip4 (&ip60->src_address, d0->flags);
931
932               //FIXME: What if d0 is null
933               vnet_buffer (p0)->map_t.v6.saddr = saddr;
934               vnet_buffer (p0)->map_t.v6.daddr =
935                 ip6_map_t_embedded_address (d0, &ip60->dst_address);
936
937               port_offset = 0;
938             }
939
940           vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
941
942           if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
943                                         &(vnet_buffer (p0)->map_t.
944                                           v6.l4_protocol),
945                                         &(vnet_buffer (p0)->map_t.
946                                           v6.l4_offset),
947                                         &(vnet_buffer (p0)->map_t.
948                                           v6.frag_offset))))
949             {
950               error0 = MAP_ERROR_MALFORMED;
951               next0 = IP6_MAPT_NEXT_DROP;
952             }
953
954           map_port0 = -1;
955           l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
956             sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
957           frag0 =
958             (ip6_frag_hdr_t *) u8_ptr_add (ip60,
959                                            vnet_buffer (p0)->map_t.
960                                            v6.frag_offset);
961
962
963           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
964                              ip6_frag_hdr_offset (frag0)))
965             {
966               map_port0 = ip6_map_fragment_get (ip60, frag0, d0);
967               error0 = (map_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
968               next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
969             }
970           else
971             if (PREDICT_TRUE
972                 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
973             {
974               error0 =
975                 l4_len0 <
976                 sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
977               vnet_buffer (p0)->map_t.checksum_offset =
978                 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
979               next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
980               map_port0 =
981                 (i32) *
982                 ((u16 *)
983                  u8_ptr_add (ip60,
984                              vnet_buffer (p0)->map_t.v6.l4_offset +
985                              port_offset));
986             }
987           else
988             if (PREDICT_TRUE
989                 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
990             {
991               error0 =
992                 l4_len0 <
993                 sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
994               vnet_buffer (p0)->map_t.checksum_offset =
995                 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
996               next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
997               map_port0 =
998                 (i32) *
999                 ((u16 *)
1000                  u8_ptr_add (ip60,
1001                              vnet_buffer (p0)->map_t.v6.l4_offset +
1002                              port_offset));
1003             }
1004           else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
1005                    IP_PROTOCOL_ICMP6)
1006             {
1007               error0 =
1008                 l4_len0 <
1009                 sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
1010               next0 = IP6_MAPT_NEXT_MAPT_ICMP;
1011               if (((icmp46_header_t *)
1012                    u8_ptr_add (ip60,
1013                                vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
1014                   ICMP6_echo_reply
1015                   || ((icmp46_header_t *)
1016                       u8_ptr_add (ip60,
1017                                   vnet_buffer (p0)->map_t.v6.
1018                                   l4_offset))->code == ICMP6_echo_request)
1019                 map_port0 =
1020                   (i32) *
1021                   ((u16 *)
1022                    u8_ptr_add (ip60,
1023                                vnet_buffer (p0)->map_t.v6.l4_offset + 6));
1024             }
1025           else
1026             {
1027               //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
1028               error0 = MAP_ERROR_BAD_PROTOCOL;
1029             }
1030
1031           //Security check
1032           if (PREDICT_FALSE
1033               ((!mm->is_ce) && (map_port0 != -1)
1034                && (ip60->src_address.as_u64[0] !=
1035                    map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
1036                                     map_port0)
1037                    || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
1038                                                                       vnet_buffer
1039                                                                       (p0)->map_t.v6.saddr,
1040                                                                       map_port0))))
1041             {
1042               //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
1043               error0 = MAP_ERROR_SEC_CHECK;
1044             }
1045
1046           //Fragmented first packet needs to be cached for following packets
1047           if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
1048                              !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
1049                                                    u8_ptr_add (ip60,
1050                                                                vnet_buffer
1051                                                                (p0)->map_t.
1052                                                                v6.frag_offset)))
1053               && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
1054               && (error0 == MAP_ERROR_NONE))
1055             {
1056               ip6_map_fragment_cache (ip60,
1057                                       (ip6_frag_hdr_t *) u8_ptr_add (ip60,
1058                                                                      vnet_buffer
1059                                                                      (p0)->map_t.
1060                                                                      v6.frag_offset),
1061                                       d0, map_port0);
1062             }
1063
1064           if (PREDICT_TRUE
1065               (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
1066             {
1067               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
1068                                                thread_index,
1069                                                vnet_buffer (p0)->
1070                                                map_t.map_domain_index, 1,
1071                                                clib_net_to_host_u16
1072                                                (ip60->payload_length));
1073             }
1074
1075           next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
1076           p0->error = error_node->errors[error0];
1077           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1078                                            to_next, n_left_to_next, pi0,
1079                                            next0);
1080         }
1081       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1082     }
1083   return frame->n_vectors;
1084 }
1085
1086 static char *map_t_error_strings[] = {
1087 #define _(sym,string) string,
1088   foreach_map_error
1089 #undef _
1090 };
1091
1092 /* *INDENT-OFF* */
1093 VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
1094   .function = ip6_map_t_fragmented,
1095   .name = "ip6-map-t-fragmented",
1096   .vector_size = sizeof (u32),
1097   .format_trace = format_map_trace,
1098   .type = VLIB_NODE_TYPE_INTERNAL,
1099
1100   .n_errors = MAP_N_ERROR,
1101   .error_strings = map_t_error_strings,
1102
1103   .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
1104   .next_nodes = {
1105       [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
1106       [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1107       [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
1108   },
1109 };
1110 /* *INDENT-ON* */
1111
1112 /* *INDENT-OFF* */
1113 VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
1114   .function = ip6_map_t_icmp,
1115   .name = "ip6-map-t-icmp",
1116   .vector_size = sizeof (u32),
1117   .format_trace = format_map_trace,
1118   .type = VLIB_NODE_TYPE_INTERNAL,
1119
1120   .n_errors = MAP_N_ERROR,
1121   .error_strings = map_t_error_strings,
1122
1123   .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
1124   .next_nodes = {
1125       [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1126       [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1127       [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
1128   },
1129 };
1130 /* *INDENT-ON* */
1131
1132 /* *INDENT-OFF* */
1133 VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
1134   .function = ip6_map_t_tcp_udp,
1135   .name = "ip6-map-t-tcp-udp",
1136   .vector_size = sizeof (u32),
1137   .format_trace = format_map_trace,
1138   .type = VLIB_NODE_TYPE_INTERNAL,
1139
1140   .n_errors = MAP_N_ERROR,
1141   .error_strings = map_t_error_strings,
1142
1143   .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1144   .next_nodes = {
1145       [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1146       [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1147       [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1148   },
1149 };
1150 /* *INDENT-ON* */
1151
1152 /* *INDENT-OFF* */
1153 VLIB_REGISTER_NODE(ip6_map_t_node) = {
1154   .function = ip6_map_t,
1155   .name = "ip6-map-t",
1156   .vector_size = sizeof(u32),
1157   .format_trace = format_map_trace,
1158   .type = VLIB_NODE_TYPE_INTERNAL,
1159
1160   .n_errors = MAP_N_ERROR,
1161   .error_strings = map_t_error_strings,
1162
1163   .n_next_nodes = IP6_MAPT_N_NEXT,
1164   .next_nodes = {
1165       [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1166       [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1167       [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1168       [IP6_MAPT_NEXT_DROP] = "error-drop",
1169   },
1170 };
1171 /* *INDENT-ON* */
1172
1173 /*
1174  * fd.io coding-style-patch-verification: ON
1175  *
1176  * Local Variables:
1177  * eval: (c-set-style "gnu")
1178  * End:
1179  */