MAP: Convert from DPO to input feature.
[vpp.git] / src / plugins / map / ip6_map.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
19 #include <vnet/ip/ip6_to_ip4.h>
20
21 enum ip6_map_next_e
22 {
23   IP6_MAP_NEXT_IP4_LOOKUP,
24 #ifdef MAP_SKIP_IP6_LOOKUP
25   IP6_MAP_NEXT_IP4_REWRITE,
26 #endif
27   IP6_MAP_NEXT_IP6_REASS,
28   IP6_MAP_NEXT_IP4_REASS,
29   IP6_MAP_NEXT_IP4_FRAGMENT,
30   IP6_MAP_NEXT_IP6_ICMP_RELAY,
31   IP6_MAP_NEXT_IP6_LOCAL,
32   IP6_MAP_NEXT_DROP,
33   IP6_MAP_NEXT_ICMP,
34   IP6_MAP_N_NEXT,
35 };
36
37 enum ip6_map_ip6_reass_next_e
38 {
39   IP6_MAP_IP6_REASS_NEXT_IP6_MAP,
40   IP6_MAP_IP6_REASS_NEXT_DROP,
41   IP6_MAP_IP6_REASS_N_NEXT,
42 };
43
44 enum ip6_map_ip4_reass_next_e
45 {
46   IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP,
47   IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT,
48   IP6_MAP_IP4_REASS_NEXT_DROP,
49   IP6_MAP_IP4_REASS_N_NEXT,
50 };
51
52 enum ip6_icmp_relay_next_e
53 {
54   IP6_ICMP_RELAY_NEXT_IP4_LOOKUP,
55   IP6_ICMP_RELAY_NEXT_DROP,
56   IP6_ICMP_RELAY_N_NEXT,
57 };
58
59 vlib_node_registration_t ip6_map_ip4_reass_node;
60 vlib_node_registration_t ip6_map_ip6_reass_node;
61 static vlib_node_registration_t ip6_map_icmp_relay_node;
62
63 typedef struct
64 {
65   u32 map_domain_index;
66   u16 port;
67   u8 cached;
68 } map_ip6_map_ip4_reass_trace_t;
69
70 u8 *
71 format_ip6_map_ip4_reass_trace (u8 * s, va_list * args)
72 {
73   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
74   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
75   map_ip6_map_ip4_reass_trace_t *t =
76     va_arg (*args, map_ip6_map_ip4_reass_trace_t *);
77   return format (s, "MAP domain index: %d L4 port: %u Status: %s",
78                  t->map_domain_index, t->port,
79                  t->cached ? "cached" : "forwarded");
80 }
81
82 typedef struct
83 {
84   u16 offset;
85   u16 frag_len;
86   u8 out;
87 } map_ip6_map_ip6_reass_trace_t;
88
89 u8 *
90 format_ip6_map_ip6_reass_trace (u8 * s, va_list * args)
91 {
92   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
93   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
94   map_ip6_map_ip6_reass_trace_t *t =
95     va_arg (*args, map_ip6_map_ip6_reass_trace_t *);
96   return format (s, "Offset: %d Fragment length: %d Status: %s", t->offset,
97                  t->frag_len, t->out ? "out" : "in");
98 }
99
100 /*
101  * ip6_map_sec_check
102  */
103 static_always_inline bool
104 ip6_map_sec_check (map_domain_t * d, u16 port, ip4_header_t * ip4,
105                    ip6_header_t * ip6)
106 {
107   u16 sp4 = clib_net_to_host_u16 (port);
108   u32 sa4 = clib_net_to_host_u32 (ip4->src_address.as_u32);
109   u64 sal6 = map_get_pfx (d, sa4, sp4);
110   u64 sar6 = map_get_sfx (d, sa4, sp4);
111
112   if (PREDICT_FALSE
113       (sal6 != clib_net_to_host_u64 (ip6->src_address.as_u64[0])
114        || sar6 != clib_net_to_host_u64 (ip6->src_address.as_u64[1])))
115     return (false);
116   return (true);
117 }
118
119 static_always_inline void
120 ip6_map_security_check (map_domain_t * d, ip4_header_t * ip4,
121                         ip6_header_t * ip6, u32 * next, u8 * error)
122 {
123   map_main_t *mm = &map_main;
124   if (d->ea_bits_len || d->rules)
125     {
126       if (d->psid_length > 0)
127         {
128           if (!ip4_is_fragment (ip4))
129             {
130               u16 port = ip4_get_port (ip4, 1);
131               if (port)
132                 {
133                   if (mm->sec_check)
134                     *error =
135                       ip6_map_sec_check (d, port, ip4,
136                                          ip6) ? MAP_ERROR_NONE :
137                       MAP_ERROR_DECAP_SEC_CHECK;
138                 }
139               else
140                 {
141                   *error = MAP_ERROR_BAD_PROTOCOL;
142                 }
143             }
144           else
145             {
146               *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next;
147             }
148         }
149     }
150 }
151
152 static_always_inline bool
153 ip6_map_ip4_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
154 {
155 #ifdef MAP_SKIP_IP6_LOOKUP
156   if (FIB_NODE_INDEX_INVALID != pre_resolved[FIB_PROTOCOL_IP4].fei)
157     {
158       vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
159         pre_resolved[FIB_PROTOCOL_IP4].dpo.dpoi_index;
160       return (true);
161     }
162 #endif
163   return (false);
164 }
165
166 /*
167  * ip6_map
168  */
169 static uword
170 ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
171 {
172   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
173   vlib_node_runtime_t *error_node =
174     vlib_node_get_runtime (vm, ip6_map_node.index);
175   map_main_t *mm = &map_main;
176   vlib_combined_counter_main_t *cm = mm->domain_counters;
177   u32 thread_index = vm->thread_index;
178
179   from = vlib_frame_vector_args (frame);
180   n_left_from = frame->n_vectors;
181   next_index = node->cached_next_index;
182   while (n_left_from > 0)
183     {
184       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
185
186       /* Dual loop */
187       while (n_left_from >= 4 && n_left_to_next >= 2)
188         {
189           u32 pi0, pi1;
190           vlib_buffer_t *p0, *p1;
191           u8 error0 = MAP_ERROR_NONE;
192           u8 error1 = MAP_ERROR_NONE;
193           map_domain_t *d0 = 0, *d1 = 0;
194           ip4_header_t *ip40, *ip41;
195           ip6_header_t *ip60, *ip61;
196           u16 port0 = 0, port1 = 0;
197           u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
198           u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
199           u32 next1 = IP6_MAP_NEXT_IP4_LOOKUP;
200
201           /* Prefetch next iteration. */
202           {
203             vlib_buffer_t *p2, *p3;
204
205             p2 = vlib_get_buffer (vm, from[2]);
206             p3 = vlib_get_buffer (vm, from[3]);
207
208             vlib_prefetch_buffer_header (p2, LOAD);
209             vlib_prefetch_buffer_header (p3, LOAD);
210
211             /* IPv6 + IPv4 header + 8 bytes of ULP */
212             CLIB_PREFETCH (p2->data, 68, LOAD);
213             CLIB_PREFETCH (p3->data, 68, LOAD);
214           }
215
216           pi0 = to_next[0] = from[0];
217           pi1 = to_next[1] = from[1];
218           from += 2;
219           n_left_from -= 2;
220           to_next += 2;
221           n_left_to_next -= 2;
222
223           p0 = vlib_get_buffer (vm, pi0);
224           p1 = vlib_get_buffer (vm, pi1);
225           ip60 = vlib_buffer_get_current (p0);
226           ip61 = vlib_buffer_get_current (p1);
227           vlib_buffer_advance (p0, sizeof (ip6_header_t));
228           vlib_buffer_advance (p1, sizeof (ip6_header_t));
229           ip40 = vlib_buffer_get_current (p0);
230           ip41 = vlib_buffer_get_current (p1);
231
232           /*
233            * Encapsulated IPv4 packet
234            *   - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
235            *   - Lookup/Rewrite or Fragment node in case of packet > MTU
236            * Fragmented IPv6 packet
237            * ICMP IPv6 packet
238            *   - Error -> Pass to ICMPv6/ICMPv4 relay
239            *   - Info -> Pass to IPv6 local
240            * Anything else -> drop
241            */
242           if (PREDICT_TRUE
243               (ip60->protocol == IP_PROTOCOL_IP_IN_IP
244                && clib_net_to_host_u16 (ip60->payload_length) > 20))
245             {
246               d0 =
247                 ip4_map_get_domain ((ip4_address_t *) & ip40->
248                                     src_address.as_u32, &map_domain_index0,
249                                     &error0);
250             }
251           else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
252                    clib_net_to_host_u16 (ip60->payload_length) >
253                    sizeof (icmp46_header_t))
254             {
255               icmp46_header_t *icmp = (void *) (ip60 + 1);
256               next0 = (icmp->type == ICMP6_echo_request
257                        || icmp->type ==
258                        ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
259                 IP6_MAP_NEXT_IP6_ICMP_RELAY;
260             }
261           else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
262             {
263               next0 = IP6_MAP_NEXT_IP6_REASS;
264             }
265           else
266             {
267               error0 = MAP_ERROR_BAD_PROTOCOL;
268             }
269           if (PREDICT_TRUE
270               (ip61->protocol == IP_PROTOCOL_IP_IN_IP
271                && clib_net_to_host_u16 (ip61->payload_length) > 20))
272             {
273               d1 =
274                 ip4_map_get_domain ((ip4_address_t *) & ip41->
275                                     src_address.as_u32, &map_domain_index1,
276                                     &error1);
277             }
278           else if (ip61->protocol == IP_PROTOCOL_ICMP6 &&
279                    clib_net_to_host_u16 (ip61->payload_length) >
280                    sizeof (icmp46_header_t))
281             {
282               icmp46_header_t *icmp = (void *) (ip61 + 1);
283               next1 = (icmp->type == ICMP6_echo_request
284                        || icmp->type ==
285                        ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
286                 IP6_MAP_NEXT_IP6_ICMP_RELAY;
287             }
288           else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
289             {
290               next1 = IP6_MAP_NEXT_IP6_REASS;
291             }
292           else
293             {
294               error1 = MAP_ERROR_BAD_PROTOCOL;
295             }
296
297           if (d0)
298             {
299               /* MAP inbound security check */
300               ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
301
302               if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
303                                 next0 == IP6_MAP_NEXT_IP4_LOOKUP))
304                 {
305                   if (PREDICT_FALSE
306                       (d0->mtu
307                        && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
308                     {
309                       vnet_buffer (p0)->ip_frag.flags = 0;
310                       vnet_buffer (p0)->ip_frag.next_index =
311                         IP4_FRAG_NEXT_IP4_LOOKUP;
312                       vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
313                       next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
314                     }
315                   else
316                     {
317                       next0 =
318                         ip6_map_ip4_lookup_bypass (p0,
319                                                    ip40) ?
320                         IP6_MAP_NEXT_IP4_REWRITE : next0;
321                     }
322                   vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
323                                                    thread_index,
324                                                    map_domain_index0, 1,
325                                                    clib_net_to_host_u16
326                                                    (ip40->length));
327                 }
328             }
329           if (d1)
330             {
331               /* MAP inbound security check */
332               ip6_map_security_check (d1, ip41, ip61, &next1, &error1);
333
334               if (PREDICT_TRUE (error1 == MAP_ERROR_NONE &&
335                                 next1 == IP6_MAP_NEXT_IP4_LOOKUP))
336                 {
337                   if (PREDICT_FALSE
338                       (d1->mtu
339                        && (clib_host_to_net_u16 (ip41->length) > d1->mtu)))
340                     {
341                       vnet_buffer (p1)->ip_frag.flags = 0;
342                       vnet_buffer (p1)->ip_frag.next_index =
343                         IP4_FRAG_NEXT_IP4_LOOKUP;
344                       vnet_buffer (p1)->ip_frag.mtu = d1->mtu;
345                       next1 = IP6_MAP_NEXT_IP4_FRAGMENT;
346                     }
347                   else
348                     {
349                       next1 =
350                         ip6_map_ip4_lookup_bypass (p1,
351                                                    ip41) ?
352                         IP6_MAP_NEXT_IP4_REWRITE : next1;
353                     }
354                   vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
355                                                    thread_index,
356                                                    map_domain_index1, 1,
357                                                    clib_net_to_host_u16
358                                                    (ip41->length));
359                 }
360             }
361
362           if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
363             {
364               map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
365               tr->map_domain_index = map_domain_index0;
366               tr->port = port0;
367             }
368
369           if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
370             {
371               map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
372               tr->map_domain_index = map_domain_index1;
373               tr->port = port1;
374             }
375
376           if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
377             {
378               /* Set ICMP parameters */
379               vlib_buffer_advance (p0, -sizeof (ip6_header_t));
380               icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
381                                            ICMP6_destination_unreachable_source_address_failed_policy,
382                                            0);
383               next0 = IP6_MAP_NEXT_ICMP;
384             }
385           else
386             {
387               next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
388             }
389
390           if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
391             {
392               /* Set ICMP parameters */
393               vlib_buffer_advance (p1, -sizeof (ip6_header_t));
394               icmp6_error_set_vnet_buffer (p1, ICMP6_destination_unreachable,
395                                            ICMP6_destination_unreachable_source_address_failed_policy,
396                                            0);
397               next1 = IP6_MAP_NEXT_ICMP;
398             }
399           else
400             {
401               next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP;
402             }
403
404           /* Reset packet */
405           if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
406             vlib_buffer_advance (p0, -sizeof (ip6_header_t));
407           if (next1 == IP6_MAP_NEXT_IP6_LOCAL)
408             vlib_buffer_advance (p1, -sizeof (ip6_header_t));
409
410           p0->error = error_node->errors[error0];
411           p1->error = error_node->errors[error1];
412           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
413                                            n_left_to_next, pi0, pi1, next0,
414                                            next1);
415         }
416
417       /* Single loop */
418       while (n_left_from > 0 && n_left_to_next > 0)
419         {
420           u32 pi0;
421           vlib_buffer_t *p0;
422           u8 error0 = MAP_ERROR_NONE;
423           map_domain_t *d0 = 0;
424           ip4_header_t *ip40;
425           ip6_header_t *ip60;
426           i32 port0 = 0;
427           u32 map_domain_index0 = ~0;
428           u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
429
430           pi0 = to_next[0] = from[0];
431           from += 1;
432           n_left_from -= 1;
433           to_next += 1;
434           n_left_to_next -= 1;
435
436           p0 = vlib_get_buffer (vm, pi0);
437           ip60 = vlib_buffer_get_current (p0);
438           vlib_buffer_advance (p0, sizeof (ip6_header_t));
439           ip40 = vlib_buffer_get_current (p0);
440
441           /*
442            * Encapsulated IPv4 packet
443            *   - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
444            *   - Lookup/Rewrite or Fragment node in case of packet > MTU
445            * Fragmented IPv6 packet
446            * ICMP IPv6 packet
447            *   - Error -> Pass to ICMPv6/ICMPv4 relay
448            *   - Info -> Pass to IPv6 local
449            * Anything else -> drop
450            */
451           if (PREDICT_TRUE
452               (ip60->protocol == IP_PROTOCOL_IP_IN_IP
453                && clib_net_to_host_u16 (ip60->payload_length) > 20))
454             {
455               d0 =
456                 ip4_map_get_domain ((ip4_address_t *) & ip40->
457                                     src_address.as_u32, &map_domain_index0,
458                                     &error0);
459             }
460           else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
461                    clib_net_to_host_u16 (ip60->payload_length) >
462                    sizeof (icmp46_header_t))
463             {
464               icmp46_header_t *icmp = (void *) (ip60 + 1);
465               next0 = (icmp->type == ICMP6_echo_request
466                        || icmp->type ==
467                        ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
468                 IP6_MAP_NEXT_IP6_ICMP_RELAY;
469             }
470           else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION &&
471                    (((ip6_frag_hdr_t *) (ip60 + 1))->next_hdr ==
472                     IP_PROTOCOL_IP_IN_IP))
473             {
474               next0 = IP6_MAP_NEXT_IP6_REASS;
475             }
476           else
477             {
478               /* XXX: Move get_domain to ip6_get_domain lookup on source */
479               //error0 = MAP_ERROR_BAD_PROTOCOL;
480               vlib_buffer_advance (p0, -sizeof (ip6_header_t));
481               vnet_feature_next (&next0, p0);
482             }
483
484           if (d0)
485             {
486               /* MAP inbound security check */
487               ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
488
489               if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
490                                 next0 == IP6_MAP_NEXT_IP4_LOOKUP))
491                 {
492                   if (PREDICT_FALSE
493                       (d0->mtu
494                        && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
495                     {
496                       vnet_buffer (p0)->ip_frag.flags = 0;
497                       vnet_buffer (p0)->ip_frag.next_index =
498                         IP4_FRAG_NEXT_IP4_LOOKUP;
499                       vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
500                       next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
501                     }
502                   else
503                     {
504                       next0 =
505                         ip6_map_ip4_lookup_bypass (p0,
506                                                    ip40) ?
507                         IP6_MAP_NEXT_IP4_REWRITE : next0;
508                     }
509                   vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
510                                                    thread_index,
511                                                    map_domain_index0, 1,
512                                                    clib_net_to_host_u16
513                                                    (ip40->length));
514                 }
515             }
516
517           if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
518             {
519               map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
520               tr->map_domain_index = map_domain_index0;
521               tr->port = (u16) port0;
522             }
523
524           if (mm->icmp6_enabled &&
525               (error0 == MAP_ERROR_DECAP_SEC_CHECK
526                || error0 == MAP_ERROR_NO_DOMAIN))
527             {
528               /* Set ICMP parameters */
529               vlib_buffer_advance (p0, -sizeof (ip6_header_t));
530               icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
531                                            ICMP6_destination_unreachable_source_address_failed_policy,
532                                            0);
533               next0 = IP6_MAP_NEXT_ICMP;
534             }
535           else
536             {
537               next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
538             }
539
540           /* Reset packet */
541           if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
542             vlib_buffer_advance (p0, -sizeof (ip6_header_t));
543
544           p0->error = error_node->errors[error0];
545           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
546                                            n_left_to_next, pi0, next0);
547         }
548       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
549     }
550
551   return frame->n_vectors;
552 }
553
554
555 static_always_inline void
556 ip6_map_ip6_reass_prepare (vlib_main_t * vm, vlib_node_runtime_t * node,
557                            map_ip6_reass_t * r, u32 ** fragments_ready,
558                            u32 ** fragments_to_drop)
559 {
560   ip4_header_t *ip40;
561   ip6_header_t *ip60;
562   ip6_frag_hdr_t *frag0;
563   vlib_buffer_t *p0;
564
565   if (!r->ip4_header.ip_version_and_header_length)
566     return;
567
568   //The IP header is here, we need to check for packets
569   //that can be forwarded
570   int i;
571   for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
572     {
573       if (r->fragments[i].pi == ~0 ||
574           ((!r->fragments[i].next_data_len)
575            && (r->fragments[i].next_data_offset != (0xffff))))
576         continue;
577
578       p0 = vlib_get_buffer (vm, r->fragments[i].pi);
579       ip60 = vlib_buffer_get_current (p0);
580       frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
581       ip40 = (ip4_header_t *) (frag0 + 1);
582
583       if (ip6_frag_hdr_offset (frag0))
584         {
585           //Not first fragment, add the IPv4 header
586           clib_memcpy_fast (ip40, &r->ip4_header, 20);
587         }
588
589 #ifdef MAP_IP6_REASS_COUNT_BYTES
590       r->forwarded +=
591         clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
592 #endif
593
594       if (ip6_frag_hdr_more (frag0))
595         {
596           //Not last fragment, we copy end of next
597           clib_memcpy_fast (u8_ptr_add (ip60, p0->current_length),
598                             r->fragments[i].next_data, 20);
599           p0->current_length += 20;
600           ip60->payload_length = u16_net_add (ip60->payload_length, 20);
601         }
602
603       if (!ip4_is_fragment (ip40))
604         {
605           ip40->fragment_id = frag_id_6to4 (frag0->identification);
606           ip40->flags_and_fragment_offset =
607             clib_host_to_net_u16 (ip6_frag_hdr_offset (frag0));
608         }
609       else
610         {
611           ip40->flags_and_fragment_offset =
612             clib_host_to_net_u16 (ip4_get_fragment_offset (ip40) +
613                                   ip6_frag_hdr_offset (frag0));
614         }
615
616       if (ip6_frag_hdr_more (frag0))
617         ip40->flags_and_fragment_offset |=
618           clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
619
620       ip40->length =
621         clib_host_to_net_u16 (p0->current_length - sizeof (*ip60) -
622                               sizeof (*frag0));
623       ip40->checksum = ip4_header_checksum (ip40);
624
625       if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
626         {
627           map_ip6_map_ip6_reass_trace_t *tr =
628             vlib_add_trace (vm, node, p0, sizeof (*tr));
629           tr->offset = ip4_get_fragment_offset (ip40);
630           tr->frag_len = clib_net_to_host_u16 (ip40->length) - sizeof (*ip40);
631           tr->out = 1;
632         }
633
634       vec_add1 (*fragments_ready, r->fragments[i].pi);
635       r->fragments[i].pi = ~0;
636       r->fragments[i].next_data_len = 0;
637       r->fragments[i].next_data_offset = 0;
638       map_main.ip6_reass_buffered_counter--;
639
640       //TODO: Best solution would be that ip6_map handles extension headers
641       // and ignores atomic fragment. But in the meantime, let's just copy the header.
642
643       u8 protocol = frag0->next_hdr;
644       memmove (u8_ptr_add (ip40, -sizeof (*ip60)), ip60, sizeof (*ip60));
645       ((ip6_header_t *) u8_ptr_add (ip40, -sizeof (*ip60)))->protocol =
646         protocol;
647       vlib_buffer_advance (p0, sizeof (*frag0));
648     }
649 }
650
651 void
652 map_ip6_drop_pi (u32 pi)
653 {
654   vlib_main_t *vm = vlib_get_main ();
655   vlib_node_runtime_t *n =
656     vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index);
657   vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP6_REASS_NEXT_DROP, pi);
658 }
659
660 void
661 map_ip4_drop_pi (u32 pi)
662 {
663   vlib_main_t *vm = vlib_get_main ();
664   vlib_node_runtime_t *n =
665     vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index);
666   vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP4_REASS_NEXT_DROP, pi);
667 }
668
669 /*
670  * ip6_reass
671  * TODO: We should count the number of successfully
672  * transmitted fragment bytes and compare that to the last fragment
673  * offset such that we can free the reassembly structure when all fragments
674  * have been forwarded.
675  */
676 static uword
677 ip6_map_ip6_reass (vlib_main_t * vm,
678                    vlib_node_runtime_t * node, vlib_frame_t * frame)
679 {
680   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
681   vlib_node_runtime_t *error_node =
682     vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index);
683   u32 *fragments_to_drop = NULL;
684   u32 *fragments_ready = NULL;
685
686   from = vlib_frame_vector_args (frame);
687   n_left_from = frame->n_vectors;
688   next_index = node->cached_next_index;
689   while (n_left_from > 0)
690     {
691       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
692
693       /* Single loop */
694       while (n_left_from > 0 && n_left_to_next > 0)
695         {
696           u32 pi0;
697           vlib_buffer_t *p0;
698           u8 error0 = MAP_ERROR_NONE;
699           ip6_header_t *ip60;
700           ip6_frag_hdr_t *frag0;
701           u16 offset;
702           u16 next_offset;
703           u16 frag_len;
704
705           pi0 = to_next[0] = from[0];
706           from += 1;
707           n_left_from -= 1;
708           to_next += 1;
709           n_left_to_next -= 1;
710
711           p0 = vlib_get_buffer (vm, pi0);
712           ip60 = vlib_buffer_get_current (p0);
713           frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
714           offset =
715             clib_host_to_net_u16 (frag0->fragment_offset_and_more) & (~7);
716           frag_len =
717             clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
718           next_offset =
719             ip6_frag_hdr_more (frag0) ? (offset + frag_len) : (0xffff);
720
721           //FIXME: Support other extension headers, maybe
722
723           if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
724             {
725               map_ip6_map_ip6_reass_trace_t *tr =
726                 vlib_add_trace (vm, node, p0, sizeof (*tr));
727               tr->offset = offset;
728               tr->frag_len = frag_len;
729               tr->out = 0;
730             }
731
732           map_ip6_reass_lock ();
733           map_ip6_reass_t *r =
734             map_ip6_reass_get (&ip60->src_address, &ip60->dst_address,
735                                frag0->identification, frag0->next_hdr,
736                                &fragments_to_drop);
737           //FIXME: Use better error codes
738           if (PREDICT_FALSE (!r))
739             {
740               // Could not create a caching entry
741               error0 = MAP_ERROR_FRAGMENT_MEMORY;
742             }
743           else if (PREDICT_FALSE ((frag_len <= 20 &&
744                                    (ip6_frag_hdr_more (frag0) || (!offset)))))
745             {
746               //Very small fragment are restricted to the last one and
747               //can't be the first one
748               error0 = MAP_ERROR_FRAGMENT_MALFORMED;
749             }
750           else
751             if (map_ip6_reass_add_fragment
752                 (r, pi0, offset, next_offset, (u8 *) (frag0 + 1), frag_len))
753             {
754               map_ip6_reass_free (r, &fragments_to_drop);
755               error0 = MAP_ERROR_FRAGMENT_MEMORY;
756             }
757           else
758             {
759 #ifdef MAP_IP6_REASS_COUNT_BYTES
760               if (!ip6_frag_hdr_more (frag0))
761                 r->expected_total = offset + frag_len;
762 #endif
763               ip6_map_ip6_reass_prepare (vm, node, r, &fragments_ready,
764                                          &fragments_to_drop);
765 #ifdef MAP_IP6_REASS_COUNT_BYTES
766               if (r->forwarded >= r->expected_total)
767                 map_ip6_reass_free (r, &fragments_to_drop);
768 #endif
769             }
770           map_ip6_reass_unlock ();
771
772           if (error0 == MAP_ERROR_NONE)
773             {
774               if (frag_len > 20)
775                 {
776                   //Dequeue the packet
777                   n_left_to_next++;
778                   to_next--;
779                 }
780               else
781                 {
782                   //All data from that packet was copied no need to keep it, but this is not an error
783                   p0->error = error_node->errors[MAP_ERROR_NONE];
784                   vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
785                                                    to_next, n_left_to_next,
786                                                    pi0,
787                                                    IP6_MAP_IP6_REASS_NEXT_DROP);
788                 }
789             }
790           else
791             {
792               p0->error = error_node->errors[error0];
793               vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
794                                                n_left_to_next, pi0,
795                                                IP6_MAP_IP6_REASS_NEXT_DROP);
796             }
797         }
798       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
799     }
800
801   map_send_all_to_node (vm, fragments_ready, node,
802                         &error_node->errors[MAP_ERROR_NONE],
803                         IP6_MAP_IP6_REASS_NEXT_IP6_MAP);
804   map_send_all_to_node (vm, fragments_to_drop, node,
805                         &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
806                         IP6_MAP_IP6_REASS_NEXT_DROP);
807
808   vec_free (fragments_to_drop);
809   vec_free (fragments_ready);
810   return frame->n_vectors;
811 }
812
813 /*
814  * ip6_map_ip4_reass
815  */
816 static uword
817 ip6_map_ip4_reass (vlib_main_t * vm,
818                    vlib_node_runtime_t * node, vlib_frame_t * frame)
819 {
820   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
821   vlib_node_runtime_t *error_node =
822     vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index);
823   map_main_t *mm = &map_main;
824   vlib_combined_counter_main_t *cm = mm->domain_counters;
825   u32 thread_index = vm->thread_index;
826   u32 *fragments_to_drop = NULL;
827   u32 *fragments_to_loopback = NULL;
828
829   from = vlib_frame_vector_args (frame);
830   n_left_from = frame->n_vectors;
831   next_index = node->cached_next_index;
832   while (n_left_from > 0)
833     {
834       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
835
836       /* Single loop */
837       while (n_left_from > 0 && n_left_to_next > 0)
838         {
839           u32 pi0;
840           vlib_buffer_t *p0;
841           u8 error0 = MAP_ERROR_NONE;
842           map_domain_t *d0;
843           ip4_header_t *ip40;
844           ip6_header_t *ip60;
845           i32 port0 = 0;
846           u32 map_domain_index0 = ~0;
847           u32 next0 = IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP;
848           u8 cached = 0;
849
850           pi0 = to_next[0] = from[0];
851           from += 1;
852           n_left_from -= 1;
853           to_next += 1;
854           n_left_to_next -= 1;
855
856           p0 = vlib_get_buffer (vm, pi0);
857           ip40 = vlib_buffer_get_current (p0);
858           ip60 = ((ip6_header_t *) ip40) - 1;
859
860           d0 =
861             ip4_map_get_domain ((ip4_address_t *) & ip40->src_address.as_u32,
862                                 &map_domain_index0, &error0);
863
864           map_ip4_reass_lock ();
865           //This node only deals with fragmented ip4
866           map_ip4_reass_t *r = map_ip4_reass_get (ip40->src_address.as_u32,
867                                                   ip40->dst_address.as_u32,
868                                                   ip40->fragment_id,
869                                                   ip40->protocol,
870                                                   &fragments_to_drop);
871           if (PREDICT_FALSE (!r))
872             {
873               // Could not create a caching entry
874               error0 = MAP_ERROR_FRAGMENT_MEMORY;
875             }
876           else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
877             {
878               // This is a fragment
879               if (r->port >= 0)
880                 {
881                   // We know the port already
882                   port0 = r->port;
883                 }
884               else if (map_ip4_reass_add_fragment (r, pi0))
885                 {
886                   // Not enough space for caching
887                   error0 = MAP_ERROR_FRAGMENT_MEMORY;
888                   map_ip4_reass_free (r, &fragments_to_drop);
889                 }
890               else
891                 {
892                   cached = 1;
893                 }
894             }
895           else if ((port0 = ip4_get_port (ip40, 1)) == 0)
896             {
897               // Could not find port from first fragment. Stop reassembling.
898               error0 = MAP_ERROR_BAD_PROTOCOL;
899               port0 = 0;
900               map_ip4_reass_free (r, &fragments_to_drop);
901             }
902           else
903             {
904               // Found port. Remember it and loopback saved fragments
905               r->port = port0;
906               map_ip4_reass_get_fragments (r, &fragments_to_loopback);
907             }
908
909 #ifdef MAP_IP4_REASS_COUNT_BYTES
910           if (!cached && r)
911             {
912               r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
913               if (!ip4_get_fragment_more (ip40))
914                 r->expected_total =
915                   ip4_get_fragment_offset (ip40) * 8 +
916                   clib_host_to_net_u16 (ip40->length) - 20;
917               if (r->forwarded >= r->expected_total)
918                 map_ip4_reass_free (r, &fragments_to_drop);
919             }
920 #endif
921
922           map_ip4_reass_unlock ();
923
924           if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
925             error0 =
926               ip6_map_sec_check (d0, port0, ip40,
927                                  ip60) ? MAP_ERROR_NONE :
928               MAP_ERROR_DECAP_SEC_CHECK;
929
930           if (PREDICT_FALSE
931               (d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu)
932                && error0 == MAP_ERROR_NONE && !cached))
933             {
934               vnet_buffer (p0)->ip_frag.flags = 0;
935               vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
936               vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
937               next0 = IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT;
938             }
939
940           if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
941             {
942               map_ip6_map_ip4_reass_trace_t *tr =
943                 vlib_add_trace (vm, node, p0, sizeof (*tr));
944               tr->map_domain_index = map_domain_index0;
945               tr->port = port0;
946               tr->cached = cached;
947             }
948
949           if (cached)
950             {
951               //Dequeue the packet
952               n_left_to_next++;
953               to_next--;
954             }
955           else
956             {
957               if (error0 == MAP_ERROR_NONE)
958                 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
959                                                  thread_index,
960                                                  map_domain_index0, 1,
961                                                  clib_net_to_host_u16
962                                                  (ip40->length));
963               next0 =
964                 (error0 ==
965                  MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP;
966               p0->error = error_node->errors[error0];
967               vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
968                                                n_left_to_next, pi0, next0);
969             }
970
971           //Loopback when we reach the end of the inpu vector
972           if (n_left_from == 0 && vec_len (fragments_to_loopback))
973             {
974               from = vlib_frame_vector_args (frame);
975               u32 len = vec_len (fragments_to_loopback);
976               if (len <= VLIB_FRAME_SIZE)
977                 {
978                   clib_memcpy_fast (from, fragments_to_loopback,
979                                     sizeof (u32) * len);
980                   n_left_from = len;
981                   vec_reset_length (fragments_to_loopback);
982                 }
983               else
984                 {
985                   clib_memcpy_fast (from, fragments_to_loopback +
986                                     (len - VLIB_FRAME_SIZE),
987                                     sizeof (u32) * VLIB_FRAME_SIZE);
988                   n_left_from = VLIB_FRAME_SIZE;
989                   _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
990                 }
991             }
992         }
993       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
994     }
995   map_send_all_to_node (vm, fragments_to_drop, node,
996                         &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
997                         IP6_MAP_IP4_REASS_NEXT_DROP);
998
999   vec_free (fragments_to_drop);
1000   vec_free (fragments_to_loopback);
1001   return frame->n_vectors;
1002 }
1003
1004 /*
1005  * ip6_icmp_relay
1006  */
1007 static uword
1008 ip6_map_icmp_relay (vlib_main_t * vm,
1009                     vlib_node_runtime_t * node, vlib_frame_t * frame)
1010 {
1011   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
1012   vlib_node_runtime_t *error_node =
1013     vlib_node_get_runtime (vm, ip6_map_icmp_relay_node.index);
1014   map_main_t *mm = &map_main;
1015   u32 thread_index = vm->thread_index;
1016   u16 *fragment_ids, *fid;
1017
1018   from = vlib_frame_vector_args (frame);
1019   n_left_from = frame->n_vectors;
1020   next_index = node->cached_next_index;
1021
1022   /* Get random fragment IDs for replies. */
1023   fid = fragment_ids =
1024     clib_random_buffer_get_data (&vm->random_buffer,
1025                                  n_left_from * sizeof (fragment_ids[0]));
1026
1027   while (n_left_from > 0)
1028     {
1029       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1030
1031       /* Single loop */
1032       while (n_left_from > 0 && n_left_to_next > 0)
1033         {
1034           u32 pi0;
1035           vlib_buffer_t *p0;
1036           u8 error0 = MAP_ERROR_NONE;
1037           ip6_header_t *ip60;
1038           u32 next0 = IP6_ICMP_RELAY_NEXT_IP4_LOOKUP;
1039           u32 mtu;
1040
1041           pi0 = to_next[0] = from[0];
1042           from += 1;
1043           n_left_from -= 1;
1044           to_next += 1;
1045           n_left_to_next -= 1;
1046
1047           p0 = vlib_get_buffer (vm, pi0);
1048           ip60 = vlib_buffer_get_current (p0);
1049           u16 tlen = clib_net_to_host_u16 (ip60->payload_length);
1050
1051           /*
1052            * In:
1053            *  IPv6 header           (40)
1054            *  ICMPv6 header          (8)
1055            *  IPv6 header           (40)
1056            *  Original IPv4 header / packet
1057            * Out:
1058            *  New IPv4 header
1059            *  New ICMP header
1060            *  Original IPv4 header / packet
1061            */
1062
1063           /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */
1064           if (tlen < 76)
1065             {
1066               error0 = MAP_ERROR_ICMP_RELAY;
1067               goto error;
1068             }
1069
1070           icmp46_header_t *icmp60 = (icmp46_header_t *) (ip60 + 1);
1071           ip6_header_t *inner_ip60 = (ip6_header_t *) (icmp60 + 2);
1072
1073           if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP)
1074             {
1075               error0 = MAP_ERROR_ICMP_RELAY;
1076               goto error;
1077             }
1078
1079           ip4_header_t *inner_ip40 = (ip4_header_t *) (inner_ip60 + 1);
1080           vlib_buffer_advance (p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */
1081           ip4_header_t *new_ip40 = vlib_buffer_get_current (p0);
1082           icmp46_header_t *new_icmp40 = (icmp46_header_t *) (new_ip40 + 1);
1083
1084           /*
1085            * Relay according to RFC2473, section 8.3
1086            */
1087           switch (icmp60->type)
1088             {
1089             case ICMP6_destination_unreachable:
1090             case ICMP6_time_exceeded:
1091             case ICMP6_parameter_problem:
1092               /* Type 3 - destination unreachable, Code 1 - host unreachable */
1093               new_icmp40->type = ICMP4_destination_unreachable;
1094               new_icmp40->code =
1095                 ICMP4_destination_unreachable_destination_unreachable_host;
1096               break;
1097
1098             case ICMP6_packet_too_big:
1099               /* Type 3 - destination unreachable, Code 4 - packet too big */
1100               /* Potential TODO: Adjust domain tunnel MTU based on the value received here */
1101               mtu = clib_net_to_host_u32 (*((u32 *) (icmp60 + 1)));
1102
1103               /* Check DF flag */
1104               if (!
1105                   (inner_ip40->flags_and_fragment_offset &
1106                    clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT)))
1107                 {
1108                   error0 = MAP_ERROR_ICMP_RELAY;
1109                   goto error;
1110                 }
1111
1112               new_icmp40->type = ICMP4_destination_unreachable;
1113               new_icmp40->code =
1114                 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set;
1115               *((u32 *) (new_icmp40 + 1)) =
1116                 clib_host_to_net_u32 (mtu < 1280 ? 1280 : mtu);
1117               break;
1118
1119             default:
1120               error0 = MAP_ERROR_ICMP_RELAY;
1121               break;
1122             }
1123
1124           /*
1125            * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812)
1126            */
1127           new_ip40->ip_version_and_header_length = 0x45;
1128           new_ip40->tos = 0;
1129           u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20;
1130           new_ip40->length = clib_host_to_net_u16 (nlen);
1131           new_ip40->fragment_id = fid[0];
1132           fid++;
1133           new_ip40->ttl = 64;
1134           new_ip40->protocol = IP_PROTOCOL_ICMP;
1135           new_ip40->src_address = mm->icmp4_src_address;
1136           new_ip40->dst_address = inner_ip40->src_address;
1137           new_ip40->checksum = ip4_header_checksum (new_ip40);
1138
1139           new_icmp40->checksum = 0;
1140           ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20);
1141           new_icmp40->checksum = ~ip_csum_fold (sum);
1142
1143           vlib_increment_simple_counter (&mm->icmp_relayed, thread_index, 0,
1144                                          1);
1145
1146         error:
1147           if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
1148             {
1149               map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
1150               tr->map_domain_index = 0;
1151               tr->port = 0;
1152             }
1153
1154           next0 =
1155             (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP;
1156           p0->error = error_node->errors[error0];
1157           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1158                                            n_left_to_next, pi0, next0);
1159         }
1160       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1161     }
1162
1163   return frame->n_vectors;
1164
1165 }
1166
1167 static char *map_error_strings[] = {
1168 #define _(sym,string) string,
1169   foreach_map_error
1170 #undef _
1171 };
1172
1173 /* *INDENT-OFF* */
1174 VNET_FEATURE_INIT (ip6_map_feature, static) =
1175 {
1176   .arc_name = "ip6-unicast",
1177   .node_name = "ip6-map",
1178   .runs_before = VNET_FEATURES ("ip6-flow-classify"),
1179 };
1180
1181 VLIB_REGISTER_NODE(ip6_map_node) = {
1182   .function = ip6_map,
1183   .name = "ip6-map",
1184   .vector_size = sizeof(u32),
1185   .format_trace = format_map_trace,
1186   .type = VLIB_NODE_TYPE_INTERNAL,
1187
1188   .n_errors = MAP_N_ERROR,
1189   .error_strings = map_error_strings,
1190
1191   .n_next_nodes = IP6_MAP_N_NEXT,
1192   .next_nodes = {
1193     [IP6_MAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1194 #ifdef MAP_SKIP_IP6_LOOKUP
1195     [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-load-balance",
1196 #endif
1197     [IP6_MAP_NEXT_IP6_REASS] = "ip6-map-ip6-reass",
1198     [IP6_MAP_NEXT_IP4_REASS] = "ip6-map-ip4-reass",
1199     [IP6_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
1200     [IP6_MAP_NEXT_IP6_ICMP_RELAY] = "ip6-map-icmp-relay",
1201     [IP6_MAP_NEXT_IP6_LOCAL] = "ip6-local",
1202     [IP6_MAP_NEXT_DROP] = "error-drop",
1203     [IP6_MAP_NEXT_ICMP] = "ip6-icmp-error",
1204   },
1205 };
1206 /* *INDENT-ON* */
1207
1208 /* *INDENT-OFF* */
1209 VLIB_REGISTER_NODE(ip6_map_ip6_reass_node) = {
1210   .function = ip6_map_ip6_reass,
1211   .name = "ip6-map-ip6-reass",
1212   .vector_size = sizeof(u32),
1213   .format_trace = format_ip6_map_ip6_reass_trace,
1214   .type = VLIB_NODE_TYPE_INTERNAL,
1215   .n_errors = MAP_N_ERROR,
1216   .error_strings = map_error_strings,
1217   .n_next_nodes = IP6_MAP_IP6_REASS_N_NEXT,
1218   .next_nodes = {
1219     [IP6_MAP_IP6_REASS_NEXT_IP6_MAP] = "ip6-map",
1220     [IP6_MAP_IP6_REASS_NEXT_DROP] = "error-drop",
1221   },
1222 };
1223 /* *INDENT-ON* */
1224
1225 /* *INDENT-OFF* */
1226 VLIB_REGISTER_NODE(ip6_map_ip4_reass_node) = {
1227   .function = ip6_map_ip4_reass,
1228   .name = "ip6-map-ip4-reass",
1229   .vector_size = sizeof(u32),
1230   .format_trace = format_ip6_map_ip4_reass_trace,
1231   .type = VLIB_NODE_TYPE_INTERNAL,
1232   .n_errors = MAP_N_ERROR,
1233   .error_strings = map_error_strings,
1234   .n_next_nodes = IP6_MAP_IP4_REASS_N_NEXT,
1235   .next_nodes = {
1236     [IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup",
1237     [IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
1238     [IP6_MAP_IP4_REASS_NEXT_DROP] = "error-drop",
1239   },
1240 };
1241 /* *INDENT-ON* */
1242
1243 /* *INDENT-OFF* */
1244 VLIB_REGISTER_NODE(ip6_map_icmp_relay_node, static) = {
1245   .function = ip6_map_icmp_relay,
1246   .name = "ip6-map-icmp-relay",
1247   .vector_size = sizeof(u32),
1248   .format_trace = format_map_trace, //FIXME
1249   .type = VLIB_NODE_TYPE_INTERNAL,
1250   .n_errors = MAP_N_ERROR,
1251   .error_strings = map_error_strings,
1252   .n_next_nodes = IP6_ICMP_RELAY_N_NEXT,
1253   .next_nodes = {
1254     [IP6_ICMP_RELAY_NEXT_IP4_LOOKUP] = "ip4-lookup",
1255     [IP6_ICMP_RELAY_NEXT_DROP] = "error-drop",
1256   },
1257 };
1258 /* *INDENT-ON* */
1259
1260 /*
1261  * fd.io coding-style-patch-verification: ON
1262  *
1263  * Local Variables:
1264  * eval: (c-set-style "gnu")
1265  * End:
1266  */