stats: counters data model
[vpp.git] / src / plugins / map / ip6_map_t.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16
17 #include <vnet/ip/ip4_to_ip6.h>
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip_frag.h>
20
21 typedef enum
22 {
23   IP6_MAPT_NEXT_MAPT_TCP_UDP,
24   IP6_MAPT_NEXT_MAPT_ICMP,
25   IP6_MAPT_NEXT_MAPT_FRAGMENTED,
26   IP6_MAPT_NEXT_DROP,
27   IP6_MAPT_NEXT_ICMP,
28   IP6_MAPT_N_NEXT
29 } ip6_mapt_next_t;
30
31 typedef enum
32 {
33   IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
34   IP6_MAPT_ICMP_NEXT_IP4_REWRITE,
35   IP6_MAPT_ICMP_NEXT_IP4_FRAG,
36   IP6_MAPT_ICMP_NEXT_DROP,
37   IP6_MAPT_ICMP_N_NEXT
38 } ip6_mapt_icmp_next_t;
39
40 typedef enum
41 {
42   IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
43   IP6_MAPT_TCP_UDP_NEXT_IP4_REWRITE,
44   IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
45   IP6_MAPT_TCP_UDP_NEXT_DROP,
46   IP6_MAPT_TCP_UDP_N_NEXT
47 } ip6_mapt_tcp_udp_next_t;
48
49 typedef enum
50 {
51   IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
52   IP6_MAPT_FRAGMENTED_NEXT_IP4_REWRITE,
53   IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
54   IP6_MAPT_FRAGMENTED_NEXT_DROP,
55   IP6_MAPT_FRAGMENTED_N_NEXT
56 } ip6_mapt_fragmented_next_t;
57
58 typedef struct
59 {
60   map_domain_t *d;
61   u16 sender_port;
62 } icmp6_to_icmp_ctx_t;
63
64 static int
65 ip6_to_ip4_set_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
66 {
67   icmp6_to_icmp_ctx_t *ctx = arg;
68   u32 ip4_sadr;
69
70   // Security check
71   // Note that this prevents an intermediate IPv6 router from answering
72   // the request.
73   ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->ip6_src_len);
74   if (ip6->src_address.as_u64[0] !=
75       map_get_pfx_net (ctx->d, ip4_sadr, ctx->sender_port)
76       || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
77                                                         ctx->sender_port))
78     return -1;
79
80   ip4->dst_address.as_u32 =
81     ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
82   ip4->src_address.as_u32 = ip4_sadr;
83
84   return 0;
85 }
86
87 static int
88 ip6_to_ip4_set_inner_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4,
89                               void *arg)
90 {
91   icmp6_to_icmp_ctx_t *ctx = arg;
92   u32 inner_ip4_dadr;
93
94   //Security check of inner packet
95   inner_ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->ip6_src_len);
96   if (ip6->dst_address.as_u64[0] !=
97       map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->sender_port)
98       || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
99                                                         inner_ip4_dadr,
100                                                         ctx->sender_port))
101     return -1;
102
103   ip4->dst_address.as_u32 = inner_ip4_dadr;
104   ip4->src_address.as_u32 =
105     ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
106
107   return 0;
108 }
109
110 static uword
111 ip6_map_t_icmp (vlib_main_t * vm,
112                 vlib_node_runtime_t * node, vlib_frame_t * frame)
113 {
114   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
115   vlib_node_runtime_t *error_node =
116     vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
117   from = vlib_frame_vector_args (frame);
118   n_left_from = frame->n_vectors;
119   next_index = node->cached_next_index;
120   vlib_combined_counter_main_t *cm = map_main.domain_counters;
121   u32 thread_index = vm->thread_index;
122
123   while (n_left_from > 0)
124     {
125       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
126
127       while (n_left_from > 0 && n_left_to_next > 0)
128         {
129           u32 pi0;
130           vlib_buffer_t *p0;
131           u8 error0;
132           ip6_mapt_icmp_next_t next0;
133           map_domain_t *d0;
134           u16 len0;
135           icmp6_to_icmp_ctx_t ctx0;
136           ip6_header_t *ip60;
137
138           pi0 = to_next[0] = from[0];
139           from += 1;
140           n_left_from -= 1;
141           to_next += 1;
142           n_left_to_next -= 1;
143           error0 = MAP_ERROR_NONE;
144           next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
145
146           p0 = vlib_get_buffer (vm, pi0);
147           ip60 = vlib_buffer_get_current (p0);
148           len0 = clib_net_to_host_u16 (ip60->payload_length);
149           d0 =
150             pool_elt_at_index (map_main.domains,
151                                vnet_buffer (p0)->map_t.map_domain_index);
152           ctx0.d = d0;
153           ctx0.sender_port = 0;
154           if (!ip6_get_port
155               (vm, p0, ip60, p0->current_length, NULL, &ctx0.sender_port,
156                NULL, NULL, NULL, NULL))
157             {
158               // In case of 1:1 mapping, we don't care about the port
159               if (!(d0->ea_bits_len == 0 && d0->rules))
160                 {
161                   error0 = MAP_ERROR_ICMP;
162                   goto err0;
163                 }
164             }
165
166           if (icmp6_to_icmp (vm, p0, ip6_to_ip4_set_icmp_cb, &ctx0,
167                              ip6_to_ip4_set_inner_icmp_cb, &ctx0))
168             {
169               error0 = MAP_ERROR_ICMP;
170               goto err0;
171             }
172
173           if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
174             {
175               // Send to fragmentation node if necessary
176               vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
177               vnet_buffer (p0)->ip_frag.next_index = IP_FRAG_NEXT_IP4_LOOKUP;
178               next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
179             }
180           else
181             {
182               next0 = ip6_map_ip4_lookup_bypass (p0, NULL) ?
183                 IP6_MAPT_ICMP_NEXT_IP4_REWRITE : next0;
184             }
185         err0:
186           if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
187             {
188               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
189                                                thread_index,
190                                                vnet_buffer (p0)->
191                                                map_t.map_domain_index, 1,
192                                                len0);
193             }
194           else
195             {
196               next0 = IP6_MAPT_ICMP_NEXT_DROP;
197             }
198
199           p0->error = error_node->errors[error0];
200           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
201                                            to_next, n_left_to_next, pi0,
202                                            next0);
203         }
204       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
205     }
206   return frame->n_vectors;
207 }
208
209 /*
210  * Translate IPv6 fragmented packet to IPv4.
211  */
212 always_inline int
213 map_ip6_to_ip4_fragmented (vlib_main_t * vm, vlib_buffer_t * p)
214 {
215   ip6_header_t *ip6;
216   ip6_frag_hdr_t *frag;
217   ip4_header_t *ip4;
218   u16 frag_id;
219   u8 frag_more;
220   u16 frag_offset;
221   u8 l4_protocol;
222   u16 l4_offset;
223
224   ip6 = vlib_buffer_get_current (p);
225
226   if (ip6_parse
227       (vm, p, ip6, p->current_length, &l4_protocol, &l4_offset, &frag_offset))
228     return -1;
229
230   frag = (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_offset);
231   ip4 = (ip4_header_t *) u8_ptr_add (ip6, l4_offset - sizeof (*ip4));
232   vlib_buffer_advance (p, l4_offset - sizeof (*ip4));
233
234   frag_id = frag_id_6to4 (frag->identification);
235   frag_more = ip6_frag_hdr_more (frag);
236   frag_offset = ip6_frag_hdr_offset (frag);
237
238   ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
239   ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
240
241   ip4->ip_version_and_header_length =
242     IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
243   ip4->tos = ip6_translate_tos (ip6->ip_version_traffic_class_and_flow_label);
244   ip4->length =
245     u16_net_add (ip6->payload_length,
246                  sizeof (*ip4) - l4_offset + sizeof (*ip6));
247   ip4->fragment_id = frag_id;
248   ip4->flags_and_fragment_offset =
249     clib_host_to_net_u16 (frag_offset |
250                           (frag_more ? IP4_HEADER_FLAG_MORE_FRAGMENTS : 0));
251   ip4->ttl = ip6->hop_limit;
252   ip4->protocol =
253     (l4_protocol == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : l4_protocol;
254   ip4->checksum = ip4_header_checksum (ip4);
255
256   return 0;
257 }
258
259 static uword
260 ip6_map_t_fragmented (vlib_main_t * vm,
261                       vlib_node_runtime_t * node, vlib_frame_t * frame)
262 {
263   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
264   from = vlib_frame_vector_args (frame);
265   n_left_from = frame->n_vectors;
266   next_index = node->cached_next_index;
267   vlib_node_runtime_t *error_node =
268     vlib_node_get_runtime (vm, ip6_map_t_fragmented_node.index);
269
270   while (n_left_from > 0)
271     {
272       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
273
274       while (n_left_from > 0 && n_left_to_next > 0)
275         {
276           u32 pi0;
277           vlib_buffer_t *p0;
278           u32 next0;
279
280           pi0 = to_next[0] = from[0];
281           from += 1;
282           n_left_from -= 1;
283           to_next += 1;
284           n_left_to_next -= 1;
285           next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP;
286           p0 = vlib_get_buffer (vm, pi0);
287
288           if (map_ip6_to_ip4_fragmented (vm, p0))
289             {
290               p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
291               next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
292             }
293           else
294             {
295               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
296                 {
297                   // Send to fragmentation node if necessary
298                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
299                   vnet_buffer (p0)->ip_frag.next_index =
300                     IP_FRAG_NEXT_IP4_LOOKUP;
301                   next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
302                 }
303               else
304                 {
305                   next0 = ip6_map_ip4_lookup_bypass (p0, NULL) ?
306                     IP6_MAPT_FRAGMENTED_NEXT_IP4_REWRITE : next0;
307                 }
308             }
309
310           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
311                                            to_next, n_left_to_next, pi0,
312                                            next0);
313         }
314       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
315     }
316   return frame->n_vectors;
317 }
318
319 /*
320  * Translate IPv6 UDP/TCP packet to IPv4.
321  * Returns 0 on success.
322  * Returns a non-zero error code on error.
323  */
324 always_inline int
325 map_ip6_to_ip4_tcp_udp (vlib_main_t * vm, vlib_buffer_t * p,
326                         bool udp_checksum)
327 {
328   map_main_t *mm = &map_main;
329   ip6_header_t *ip6;
330   u16 *checksum;
331   ip_csum_t csum = 0;
332   ip4_header_t *ip4;
333   u16 fragment_id;
334   u16 flags;
335   u16 frag_offset;
336   u8 l4_protocol;
337   u16 l4_offset;
338   ip6_address_t old_src, old_dst;
339
340   ip6 = vlib_buffer_get_current (p);
341
342   if (ip6_parse
343       (vm, p, ip6, p->current_length, &l4_protocol, &l4_offset, &frag_offset))
344     return -1;
345
346   if (l4_protocol == IP_PROTOCOL_TCP)
347     {
348       tcp_header_t *tcp = (tcp_header_t *) u8_ptr_add (ip6, l4_offset);
349       if (mm->tcp_mss > 0)
350         {
351           csum = tcp->checksum;
352           map_mss_clamping (tcp, &csum, mm->tcp_mss);
353           tcp->checksum = ip_csum_fold (csum);
354         }
355       checksum = &tcp->checksum;
356     }
357   else
358     {
359       udp_header_t *udp = (udp_header_t *) u8_ptr_add (ip6, l4_offset);
360       checksum = &udp->checksum;
361     }
362
363   old_src.as_u64[0] = ip6->src_address.as_u64[0];
364   old_src.as_u64[1] = ip6->src_address.as_u64[1];
365   old_dst.as_u64[0] = ip6->dst_address.as_u64[0];
366   old_dst.as_u64[1] = ip6->dst_address.as_u64[1];
367
368   ip4 = (ip4_header_t *) u8_ptr_add (ip6, l4_offset - sizeof (*ip4));
369
370   vlib_buffer_advance (p, l4_offset - sizeof (*ip4));
371
372   if (PREDICT_FALSE (frag_offset))
373     {
374       // Only the first fragment
375       ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_offset);
376       fragment_id = frag_id_6to4 (hdr->identification);
377       flags = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
378     }
379   else
380     {
381       fragment_id = 0;
382       flags = 0;
383     }
384
385   ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
386   ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
387
388   /*
389    * Drop spoofed packets that from a known domain source.
390    */
391   u32 map_domain_index = -1;
392   u8 error = 0;
393
394   ip4_map_get_domain (&ip4->src_address, &map_domain_index, &error);
395   if (error)
396     return error;
397
398   ip4->ip_version_and_header_length =
399     IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS;
400   ip4->tos = ip6_translate_tos (ip6->ip_version_traffic_class_and_flow_label);
401   ip4->length =
402     u16_net_add (ip6->payload_length,
403                  sizeof (*ip4) + sizeof (*ip6) - l4_offset);
404   ip4->fragment_id = fragment_id;
405   ip4->flags_and_fragment_offset = flags;
406   ip4->ttl = ip6->hop_limit;
407   ip4->protocol = l4_protocol;
408   ip4->checksum = ip4_header_checksum (ip4);
409
410   // UDP checksum is optional over IPv4
411   if (!udp_checksum && l4_protocol == IP_PROTOCOL_UDP)
412     {
413       *checksum = 0;
414     }
415   else
416     {
417       csum = ip_csum_sub_even (*checksum, old_src.as_u64[0]);
418       csum = ip_csum_sub_even (csum, old_src.as_u64[1]);
419       csum = ip_csum_sub_even (csum, old_dst.as_u64[0]);
420       csum = ip_csum_sub_even (csum, old_dst.as_u64[1]);
421       csum = ip_csum_add_even (csum, ip4->dst_address.as_u32);
422       csum = ip_csum_add_even (csum, ip4->src_address.as_u32);
423       *checksum = ip_csum_fold (csum);
424     }
425
426   return 0;
427 }
428
429 static uword
430 ip6_map_t_tcp_udp (vlib_main_t * vm,
431                    vlib_node_runtime_t * node, vlib_frame_t * frame)
432 {
433   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
434   vlib_node_runtime_t *error_node =
435     vlib_node_get_runtime (vm, ip6_map_t_tcp_udp_node.index);
436
437   from = vlib_frame_vector_args (frame);
438   n_left_from = frame->n_vectors;
439   next_index = node->cached_next_index;
440   while (n_left_from > 0)
441     {
442       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
443
444       while (n_left_from > 0 && n_left_to_next > 0)
445         {
446           u32 pi0;
447           vlib_buffer_t *p0;
448           ip6_mapt_tcp_udp_next_t next0;
449
450           pi0 = to_next[0] = from[0];
451           from += 1;
452           n_left_from -= 1;
453           to_next += 1;
454           n_left_to_next -= 1;
455           next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
456
457           p0 = vlib_get_buffer (vm, pi0);
458
459           if (map_ip6_to_ip4_tcp_udp (vm, p0, true))
460             {
461               p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
462               next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
463             }
464           else
465             {
466               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
467                 {
468                   // Send to fragmentation node if necessary
469                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
470                   vnet_buffer (p0)->ip_frag.next_index =
471                     IP_FRAG_NEXT_IP4_LOOKUP;
472                   next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
473                 }
474               else
475                 {
476                   next0 = ip6_map_ip4_lookup_bypass (p0, NULL) ?
477                     IP6_MAPT_TCP_UDP_NEXT_IP4_REWRITE : next0;
478                 }
479             }
480
481           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
482                                            to_next, n_left_to_next, pi0,
483                                            next0);
484         }
485       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
486     }
487   return frame->n_vectors;
488 }
489
490 static uword
491 ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
492 {
493   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
494   vlib_node_runtime_t *error_node =
495     vlib_node_get_runtime (vm, ip6_map_t_node.index);
496   map_main_t *mm = &map_main;
497   vlib_combined_counter_main_t *cm = map_main.domain_counters;
498   u32 thread_index = vm->thread_index;
499
500   from = vlib_frame_vector_args (frame);
501   n_left_from = frame->n_vectors;
502   next_index = node->cached_next_index;
503   while (n_left_from > 0)
504     {
505       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
506
507       while (n_left_from > 0 && n_left_to_next > 0)
508         {
509           u32 pi0;
510           vlib_buffer_t *p0;
511           ip6_header_t *ip60;
512           u8 error0;
513           u32 l4_len0;
514           i32 map_port0;
515           map_domain_t *d0;
516           ip6_frag_hdr_t *frag0;
517           ip6_mapt_next_t next0 = 0;
518           u32 saddr;
519
520           pi0 = to_next[0] = from[0];
521           from += 1;
522           n_left_from -= 1;
523           to_next += 1;
524           n_left_to_next -= 1;
525           error0 = MAP_ERROR_NONE;
526           p0 = vlib_get_buffer (vm, pi0);
527           u16 l4_src_port = vnet_buffer (p0)->ip.reass.l4_src_port;
528
529           ip60 = vlib_buffer_get_current (p0);
530
531           d0 =
532             ip6_map_get_domain (&ip60->dst_address,
533                                 &vnet_buffer (p0)->map_t.map_domain_index,
534                                 &error0);
535           if (!d0)
536             {                   /* Guess it wasn't for us */
537               vnet_feature_next (&next0, p0);
538               goto exit;
539             }
540
541           saddr = map_get_ip4 (&ip60->src_address, d0->ip6_src_len);
542           vnet_buffer (p0)->map_t.v6.saddr = saddr;
543           vnet_buffer (p0)->map_t.v6.daddr =
544             ip6_map_t_embedded_address (d0, &ip60->dst_address);
545           vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
546
547           map_port0 = -1;
548
549           if (PREDICT_FALSE (ip60->hop_limit == 1))
550             {
551               icmp6_error_set_vnet_buffer (p0, ICMP6_time_exceeded,
552                                            ICMP6_time_exceeded_ttl_exceeded_in_transit,
553                                            0);
554               p0->error = error_node->errors[MAP_ERROR_TIME_EXCEEDED];
555               next0 = IP6_MAPT_NEXT_ICMP;
556               goto trace;
557             }
558
559           if (PREDICT_FALSE
560               (ip6_parse (vm, p0, ip60, p0->current_length,
561                           &(vnet_buffer (p0)->map_t.v6.l4_protocol),
562                           &(vnet_buffer (p0)->map_t.v6.l4_offset),
563                           &(vnet_buffer (p0)->map_t.v6.frag_offset))))
564             {
565               error0 =
566                 error0 == MAP_ERROR_NONE ? MAP_ERROR_MALFORMED : error0;
567             }
568
569           l4_len0 =
570             (u32) clib_net_to_host_u16 (ip60->payload_length) +
571             sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
572           frag0 =
573             (ip6_frag_hdr_t *) u8_ptr_add (ip60,
574                                            vnet_buffer (p0)->map_t.v6.
575                                            frag_offset);
576
577           if (PREDICT_FALSE
578               (vnet_buffer (p0)->map_t.v6.frag_offset
579                && ip6_frag_hdr_offset (frag0)))
580             {
581               map_port0 = l4_src_port;
582               next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
583             }
584           else
585             if (PREDICT_TRUE
586                 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
587             {
588               error0 =
589                 l4_len0 <
590                 sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
591               vnet_buffer (p0)->map_t.checksum_offset =
592                 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
593               next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
594               map_port0 = l4_src_port;
595             }
596           else
597             if (PREDICT_TRUE
598                 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
599             {
600               error0 =
601                 l4_len0 <
602                 sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
603               vnet_buffer (p0)->map_t.checksum_offset =
604                 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
605               next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
606               map_port0 = l4_src_port;
607             }
608           else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
609                    IP_PROTOCOL_ICMP6)
610             {
611               error0 =
612                 l4_len0 <
613                 sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
614               next0 = IP6_MAPT_NEXT_MAPT_ICMP;
615               if (((icmp46_header_t *)
616                    u8_ptr_add (ip60,
617                                vnet_buffer (p0)->map_t.v6.l4_offset))->type ==
618                   ICMP6_echo_reply
619                   || ((icmp46_header_t *)
620                       u8_ptr_add (ip60,
621                                   vnet_buffer (p0)->map_t.v6.l4_offset))->
622                   type == ICMP6_echo_request)
623                 map_port0 = l4_src_port;
624             }
625           else
626             {
627               // TODO: In case of 1:1 mapping, it might be possible to
628               // do something with those packets.
629               error0 = MAP_ERROR_BAD_PROTOCOL;
630             }
631
632           if (PREDICT_FALSE (map_port0 != -1) &&
633               (ip60->src_address.as_u64[0] !=
634                map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
635                                 map_port0)
636                || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
637                                                                   vnet_buffer
638                                                                   (p0)->map_t.
639                                                                   v6.saddr,
640                                                                   map_port0)))
641             {
642               // Security check when map_port0 is not zero (non-first
643               // fragment, UDP or TCP)
644               error0 =
645                 error0 == MAP_ERROR_NONE ? MAP_ERROR_SEC_CHECK : error0;
646             }
647
648           if (PREDICT_TRUE
649               (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
650             {
651               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
652                                                thread_index,
653                                                vnet_buffer (p0)->map_t.
654                                                map_domain_index, 1,
655                                                clib_net_to_host_u16 (ip60->
656                                                                      payload_length));
657             }
658
659           if (PREDICT_FALSE
660               (error0 == MAP_ERROR_SEC_CHECK && mm->icmp6_enabled))
661             {
662               icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
663                                            ICMP6_destination_unreachable_source_address_failed_policy,
664                                            0);
665               next0 = IP6_MAPT_NEXT_ICMP;
666             }
667           else
668             {
669               next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
670             }
671
672           p0->error = error_node->errors[error0];
673         trace:
674           if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
675             {
676               map_add_trace (vm, node, p0,
677                              vnet_buffer (p0)->map_t.map_domain_index,
678                              map_port0);
679             }
680         exit:
681           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
682                                            to_next, n_left_to_next, pi0,
683                                            next0);
684         }
685       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
686     }
687   return frame->n_vectors;
688 }
689
690 /* *INDENT-OFF* */
691 VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
692   .function = ip6_map_t_fragmented,
693   .name = "ip6-map-t-fragmented",
694   .vector_size = sizeof (u32),
695   .format_trace = format_map_trace,
696   .type = VLIB_NODE_TYPE_INTERNAL,
697
698   .n_errors = MAP_N_ERROR,
699   .error_counters = map_error_counters,
700
701   .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
702   .next_nodes =
703   {
704     [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
705     [IP6_MAPT_FRAGMENTED_NEXT_IP4_REWRITE] = "ip4-load-balance",
706     [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
707     [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
708   },
709 };
710 /* *INDENT-ON* */
711
712 /* *INDENT-OFF* */
713 VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
714   .function = ip6_map_t_icmp,
715   .name = "ip6-map-t-icmp",
716   .vector_size = sizeof (u32),
717   .format_trace = format_map_trace,
718   .type = VLIB_NODE_TYPE_INTERNAL,
719
720   .n_errors = MAP_N_ERROR,
721   .error_counters = map_error_counters,
722
723   .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
724   .next_nodes =
725   {
726     [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
727     [IP6_MAPT_ICMP_NEXT_IP4_REWRITE] = "ip4-load-balance",
728     [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
729     [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
730   },
731 };
732 /* *INDENT-ON* */
733
734 /* *INDENT-OFF* */
735 VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
736   .function = ip6_map_t_tcp_udp,
737   .name = "ip6-map-t-tcp-udp",
738   .vector_size = sizeof (u32),
739   .format_trace = format_map_trace,
740   .type = VLIB_NODE_TYPE_INTERNAL,
741
742   .n_errors = MAP_N_ERROR,
743   .error_counters = map_error_counters,
744
745   .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
746   .next_nodes =
747   {
748     [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
749     [IP6_MAPT_TCP_UDP_NEXT_IP4_REWRITE] = "ip4-load-balance",
750     [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
751     [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
752   },
753 };
754 /* *INDENT-ON* */
755
756 /* *INDENT-OFF* */
757 VNET_FEATURE_INIT (ip6_map_t_feature, static) = {
758     .arc_name = "ip6-unicast",
759     .node_name = "ip6-map-t",
760     .runs_before = VNET_FEATURES ("ip6-flow-classify"),
761     .runs_after = VNET_FEATURES ("ip6-sv-reassembly-feature"),
762 };
763
764 VLIB_REGISTER_NODE(ip6_map_t_node) = {
765   .function = ip6_map_t,
766   .name = "ip6-map-t",
767   .vector_size = sizeof(u32),
768   .format_trace = format_map_trace,
769   .type = VLIB_NODE_TYPE_INTERNAL,
770
771   .n_errors = MAP_N_ERROR,
772   .error_counters = map_error_counters,
773
774   .n_next_nodes = IP6_MAPT_N_NEXT,
775   .next_nodes =
776   {
777     [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
778     [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
779     [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
780     [IP6_MAPT_NEXT_DROP] = "error-drop",
781     [IP6_MAPT_NEXT_ICMP] = "ip6-icmp-error",
782   },
783 };
784 /* *INDENT-ON* */
785
786 /*
787  * fd.io coding-style-patch-verification: ON
788  *
789  * Local Variables:
790  * eval: (c-set-style "gnu")
791  * End:
792  */