API: Change ip4_address and ip6_address to use type alias.
[vpp.git] / src / plugins / map / ip4_map_t.c
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
19
20 #define IP4_MAP_T_DUAL_LOOP 1
21
22 typedef enum
23 {
24   IP4_MAPT_NEXT_MAPT_TCP_UDP,
25   IP4_MAPT_NEXT_MAPT_ICMP,
26   IP4_MAPT_NEXT_MAPT_FRAGMENTED,
27   IP4_MAPT_NEXT_DROP,
28   IP4_MAPT_N_NEXT
29 } ip4_mapt_next_t;
30
31 typedef enum
32 {
33   IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
34   IP4_MAPT_ICMP_NEXT_IP6_FRAG,
35   IP4_MAPT_ICMP_NEXT_DROP,
36   IP4_MAPT_ICMP_N_NEXT
37 } ip4_mapt_icmp_next_t;
38
39 typedef enum
40 {
41   IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
42   IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
43   IP4_MAPT_TCP_UDP_NEXT_DROP,
44   IP4_MAPT_TCP_UDP_N_NEXT
45 } ip4_mapt_tcp_udp_next_t;
46
47 typedef enum
48 {
49   IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
50   IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
51   IP4_MAPT_FRAGMENTED_NEXT_DROP,
52   IP4_MAPT_FRAGMENTED_N_NEXT
53 } ip4_mapt_fragmented_next_t;
54
55 //This is used to pass information within the buffer data.
56 //Buffer structure being too small to contain big structures like this.
57 /* *INDENT-OFF* */
58 typedef CLIB_PACKED (struct {
59   ip6_address_t daddr;
60   ip6_address_t saddr;
61   //IPv6 header + Fragmentation header will be here
62   //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
63   u8 unused[28];
64 }) ip4_mapt_pseudo_header_t;
65 /* *INDENT-ON* */
66
67
68 static_always_inline int
69 ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
70 {
71   u32 *ignore = NULL;
72   map_ip4_reass_lock ();
73   map_ip4_reass_t *r =
74     map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
75                        ip4->fragment_id,
76                        (ip4->protocol ==
77                         IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
78                        &ignore);
79   if (r)
80     r->port = port;
81
82   map_ip4_reass_unlock ();
83   return !r;
84 }
85
86 static_always_inline i32
87 ip4_map_fragment_get_port (ip4_header_t * ip4)
88 {
89   u32 *ignore = NULL;
90   map_ip4_reass_lock ();
91   map_ip4_reass_t *r =
92     map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
93                        ip4->fragment_id,
94                        (ip4->protocol ==
95                         IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
96                        &ignore);
97   i32 ret = r ? r->port : -1;
98   map_ip4_reass_unlock ();
99   return ret;
100 }
101
102 typedef struct
103 {
104   map_domain_t *d;
105   u16 recv_port;
106 } icmp_to_icmp6_ctx_t;
107
108 static int
109 ip4_to_ip6_set_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *arg)
110 {
111   icmp_to_icmp6_ctx_t *ctx = arg;
112
113   ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address);
114   ip6->dst_address.as_u64[0] =
115     map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
116   ip6->dst_address.as_u64[1] =
117     map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
118
119   return 0;
120 }
121
122 static int
123 ip4_to_ip6_set_inner_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6,
124                               void *arg)
125 {
126   icmp_to_icmp6_ctx_t *ctx = arg;
127
128   //Note that the source address is within the domain
129   //while the destination address is the one outside the domain
130   ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &ip4->dst_address);
131   ip6->src_address.as_u64[0] =
132     map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
133   ip6->src_address.as_u64[1] =
134     map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
135
136   return 0;
137 }
138
139 static uword
140 ip4_map_t_icmp (vlib_main_t * vm,
141                 vlib_node_runtime_t * node, vlib_frame_t * frame)
142 {
143   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
144   vlib_node_runtime_t *error_node =
145     vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
146   from = vlib_frame_vector_args (frame);
147   n_left_from = frame->n_vectors;
148   next_index = node->cached_next_index;
149   vlib_combined_counter_main_t *cm = map_main.domain_counters;
150   u32 thread_index = vm->thread_index;
151
152   while (n_left_from > 0)
153     {
154       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
155
156       while (n_left_from > 0 && n_left_to_next > 0)
157         {
158           u32 pi0;
159           vlib_buffer_t *p0;
160           ip4_mapt_icmp_next_t next0;
161           u8 error0;
162           map_domain_t *d0;
163           u16 len0;
164           icmp_to_icmp6_ctx_t ctx0;
165           ip4_header_t *ip40;
166
167           next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
168           pi0 = to_next[0] = from[0];
169           from += 1;
170           n_left_from -= 1;
171           to_next += 1;
172           n_left_to_next -= 1;
173           error0 = MAP_ERROR_NONE;
174
175           p0 = vlib_get_buffer (vm, pi0);
176           vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t));  //The pseudo-header is not used
177           len0 =
178             clib_net_to_host_u16 (((ip4_header_t *)
179                                    vlib_buffer_get_current (p0))->length);
180           d0 =
181             pool_elt_at_index (map_main.domains,
182                                vnet_buffer (p0)->map_t.map_domain_index);
183
184           ip40 = vlib_buffer_get_current (p0);
185           ctx0.recv_port = ip4_get_port (ip40, 1);
186           ctx0.d = d0;
187           if (ctx0.recv_port == 0)
188             {
189               // In case of 1:1 mapping, we don't care about the port
190               if (!(d0->ea_bits_len == 0 && d0->rules))
191                 {
192                   error0 = MAP_ERROR_ICMP;
193                   goto err0;
194                 }
195             }
196
197           if (icmp_to_icmp6
198               (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
199                ip4_to_ip6_set_inner_icmp_cb, &ctx0))
200             {
201               error0 = MAP_ERROR_ICMP;
202               goto err0;
203             }
204
205           if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
206             {
207               vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
208               vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
209               next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
210             }
211         err0:
212           if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
213             {
214               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
215                                                thread_index,
216                                                vnet_buffer (p0)->
217                                                map_t.map_domain_index, 1,
218                                                len0);
219             }
220           else
221             {
222               next0 = IP4_MAPT_ICMP_NEXT_DROP;
223             }
224           p0->error = error_node->errors[error0];
225           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
226                                            to_next, n_left_to_next, pi0,
227                                            next0);
228         }
229       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
230     }
231   return frame->n_vectors;
232 }
233
234 static int
235 ip4_to_ip6_set_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *ctx)
236 {
237   ip4_mapt_pseudo_header_t *pheader = ctx;
238
239   ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
240   ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
241   ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
242   ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
243
244   return 0;
245 }
246
247 static uword
248 ip4_map_t_fragmented (vlib_main_t * vm,
249                       vlib_node_runtime_t * node, vlib_frame_t * frame)
250 {
251   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
252   from = vlib_frame_vector_args (frame);
253   n_left_from = frame->n_vectors;
254   next_index = node->cached_next_index;
255   vlib_node_runtime_t *error_node =
256     vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
257
258   while (n_left_from > 0)
259     {
260       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
261
262       while (n_left_from > 0 && n_left_to_next > 0)
263         {
264           u32 pi0;
265           vlib_buffer_t *p0;
266           ip4_mapt_pseudo_header_t *pheader0;
267           ip4_mapt_fragmented_next_t next0;
268
269           next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
270           pi0 = to_next[0] = from[0];
271           from += 1;
272           n_left_from -= 1;
273           to_next += 1;
274           n_left_to_next -= 1;
275
276           p0 = vlib_get_buffer (vm, pi0);
277
278           //Accessing pseudo header
279           pheader0 = vlib_buffer_get_current (p0);
280           vlib_buffer_advance (p0, sizeof (*pheader0));
281
282           if (ip4_to_ip6_fragmented (p0, ip4_to_ip6_set_cb, pheader0))
283             {
284               p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
285               next0 = IP4_MAPT_FRAGMENTED_NEXT_DROP;
286             }
287           else
288             {
289               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
290                 {
291                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
292                   vnet_buffer (p0)->ip_frag.next_index =
293                     IP6_FRAG_NEXT_IP6_LOOKUP;
294                   next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
295                 }
296             }
297
298           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
299                                            to_next, n_left_to_next, pi0,
300                                            next0);
301         }
302       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
303     }
304   return frame->n_vectors;
305 }
306
307 static uword
308 ip4_map_t_tcp_udp (vlib_main_t * vm,
309                    vlib_node_runtime_t * node, vlib_frame_t * frame)
310 {
311   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
312   from = vlib_frame_vector_args (frame);
313   n_left_from = frame->n_vectors;
314   next_index = node->cached_next_index;
315   vlib_node_runtime_t *error_node =
316     vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
317
318
319   while (n_left_from > 0)
320     {
321       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
322
323 #ifdef IP4_MAP_T_DUAL_LOOP
324       while (n_left_from >= 4 && n_left_to_next >= 2)
325         {
326           u32 pi0, pi1;
327           vlib_buffer_t *p0, *p1;
328           ip4_mapt_pseudo_header_t *pheader0, *pheader1;
329           ip4_mapt_tcp_udp_next_t next0, next1;
330
331           pi0 = to_next[0] = from[0];
332           pi1 = to_next[1] = from[1];
333           from += 2;
334           n_left_from -= 2;
335           to_next += 2;
336           n_left_to_next -= 2;
337
338           next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
339           next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
340           p0 = vlib_get_buffer (vm, pi0);
341           p1 = vlib_get_buffer (vm, pi1);
342
343           //Accessing pseudo header
344           pheader0 = vlib_buffer_get_current (p0);
345           pheader1 = vlib_buffer_get_current (p1);
346           vlib_buffer_advance (p0, sizeof (*pheader0));
347           vlib_buffer_advance (p1, sizeof (*pheader1));
348
349           if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
350             {
351               p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
352               next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
353             }
354           else
355             {
356               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
357                 {
358                   //Send to fragmentation node if necessary
359                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
360                   vnet_buffer (p0)->ip_frag.next_index =
361                     IP6_FRAG_NEXT_IP6_LOOKUP;
362                   next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
363                 }
364             }
365
366           if (ip4_to_ip6_tcp_udp (p1, ip4_to_ip6_set_cb, pheader1))
367             {
368               p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
369               next1 = IP4_MAPT_TCP_UDP_NEXT_DROP;
370             }
371           else
372             {
373               if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
374                 {
375                   //Send to fragmentation node if necessary
376                   vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
377                   vnet_buffer (p1)->ip_frag.next_index =
378                     IP6_FRAG_NEXT_IP6_LOOKUP;
379                   next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
380                 }
381             }
382
383           vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
384                                            to_next, n_left_to_next, pi0, pi1,
385                                            next0, next1);
386         }
387 #endif
388
389       while (n_left_from > 0 && n_left_to_next > 0)
390         {
391           u32 pi0;
392           vlib_buffer_t *p0;
393           ip4_mapt_pseudo_header_t *pheader0;
394           ip4_mapt_tcp_udp_next_t next0;
395
396           pi0 = to_next[0] = from[0];
397           from += 1;
398           n_left_from -= 1;
399           to_next += 1;
400           n_left_to_next -= 1;
401
402           next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
403           p0 = vlib_get_buffer (vm, pi0);
404
405           //Accessing pseudo header
406           pheader0 = vlib_buffer_get_current (p0);
407           vlib_buffer_advance (p0, sizeof (*pheader0));
408
409           if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
410             {
411               p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
412               next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
413             }
414           else
415             {
416               if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
417                 {
418                   //Send to fragmentation node if necessary
419                   vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
420                   vnet_buffer (p0)->ip_frag.next_index =
421                     IP6_FRAG_NEXT_IP6_LOOKUP;
422                   next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
423                 }
424             }
425           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
426                                            to_next, n_left_to_next, pi0,
427                                            next0);
428         }
429       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
430     }
431
432   return frame->n_vectors;
433 }
434
435 static_always_inline void
436 ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
437                     ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
438                     u8 * error0, ip4_mapt_next_t * next0)
439 {
440   if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
441     {
442       *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
443       if (d0->ea_bits_len == 0 && d0->rules)
444         {
445           *dst_port0 = 0;
446         }
447       else
448         {
449           *dst_port0 = ip4_map_fragment_get_port (ip40);
450           *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
451         }
452     }
453   else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
454     {
455       vnet_buffer (p0)->map_t.checksum_offset = 36;
456       *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
457       *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
458       *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
459     }
460   else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
461     {
462       vnet_buffer (p0)->map_t.checksum_offset = 26;
463       *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
464       *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
465       *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
466     }
467   else if (ip40->protocol == IP_PROTOCOL_ICMP)
468     {
469       *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
470       if (d0->ea_bits_len == 0 && d0->rules)
471         *dst_port0 = 0;
472       else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
473                == ICMP4_echo_reply
474                || ((icmp46_header_t *)
475                    u8_ptr_add (ip40,
476                                sizeof (*ip40)))->code == ICMP4_echo_request)
477         *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
478     }
479   else
480     {
481       *error0 = MAP_ERROR_BAD_PROTOCOL;
482     }
483 }
484
485 static uword
486 ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
487 {
488   u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
489   vlib_node_runtime_t *error_node =
490     vlib_node_get_runtime (vm, ip4_map_t_node.index);
491   from = vlib_frame_vector_args (frame);
492   n_left_from = frame->n_vectors;
493   next_index = node->cached_next_index;
494   vlib_combined_counter_main_t *cm = map_main.domain_counters;
495   u32 thread_index = vm->thread_index;
496
497   while (n_left_from > 0)
498     {
499       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
500
501 #ifdef IP4_MAP_T_DUAL_LOOP
502       while (n_left_from >= 4 && n_left_to_next >= 2)
503         {
504           u32 pi0, pi1;
505           vlib_buffer_t *p0, *p1;
506           ip4_header_t *ip40, *ip41;
507           map_domain_t *d0, *d1;
508           ip4_mapt_next_t next0 = 0, next1 = 0;
509           u16 ip4_len0, ip4_len1;
510           u8 error0, error1;
511           i32 dst_port0, dst_port1;
512           ip4_mapt_pseudo_header_t *pheader0, *pheader1;
513
514           pi0 = to_next[0] = from[0];
515           pi1 = to_next[1] = from[1];
516           from += 2;
517           n_left_from -= 2;
518           to_next += 2;
519           n_left_to_next -= 2;
520           error0 = MAP_ERROR_NONE;
521           error1 = MAP_ERROR_NONE;
522
523           p0 = vlib_get_buffer (vm, pi0);
524           p1 = vlib_get_buffer (vm, pi1);
525           ip40 = vlib_buffer_get_current (p0);
526           ip41 = vlib_buffer_get_current (p1);
527           ip4_len0 = clib_host_to_net_u16 (ip40->length);
528           ip4_len1 = clib_host_to_net_u16 (ip41->length);
529
530           if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
531                              ip40->ip_version_and_header_length != 0x45))
532             {
533               error0 = MAP_ERROR_UNKNOWN;
534               next0 = IP4_MAPT_NEXT_DROP;
535             }
536
537           if (PREDICT_FALSE (p1->current_length < ip4_len1 ||
538                              ip41->ip_version_and_header_length != 0x45))
539             {
540               error1 = MAP_ERROR_UNKNOWN;
541               next1 = IP4_MAPT_NEXT_DROP;
542             }
543
544           vnet_buffer (p0)->map_t.map_domain_index =
545             vnet_buffer (p0)->ip.adj_index[VLIB_TX];
546           d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
547           vnet_buffer (p1)->map_t.map_domain_index =
548             vnet_buffer (p1)->ip.adj_index[VLIB_TX];
549           d1 = ip4_map_get_domain (vnet_buffer (p1)->map_t.map_domain_index);
550
551           vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
552           vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
553
554           dst_port0 = -1;
555           dst_port1 = -1;
556
557           ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
558                               &next0);
559           ip4_map_t_classify (p1, d1, ip41, ip4_len1, &dst_port1, &error1,
560                               &next1);
561
562           //Add MAP-T pseudo header in front of the packet
563           vlib_buffer_advance (p0, -sizeof (*pheader0));
564           vlib_buffer_advance (p1, -sizeof (*pheader1));
565           pheader0 = vlib_buffer_get_current (p0);
566           pheader1 = vlib_buffer_get_current (p1);
567
568           //Save addresses within the packet
569           ip4_map_t_embedded_address (d0, &pheader0->saddr,
570                                       &ip40->src_address);
571           ip4_map_t_embedded_address (d1, &pheader1->saddr,
572                                       &ip41->src_address);
573           pheader0->daddr.as_u64[0] =
574             map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
575           pheader0->daddr.as_u64[1] =
576             map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
577           pheader1->daddr.as_u64[0] =
578             map_get_pfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
579           pheader1->daddr.as_u64[1] =
580             map_get_sfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
581
582           if (PREDICT_FALSE
583               (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
584                && (d0->ea_bits_len != 0 || !d0->rules)
585                && ip4_map_fragment_cache (ip40, dst_port0)))
586             {
587               error0 = MAP_ERROR_FRAGMENT_MEMORY;
588             }
589
590           if (PREDICT_FALSE
591               (ip4_is_first_fragment (ip41) && (dst_port1 != -1)
592                && (d1->ea_bits_len != 0 || !d1->rules)
593                && ip4_map_fragment_cache (ip41, dst_port1)))
594             {
595               error1 = MAP_ERROR_FRAGMENT_MEMORY;
596             }
597
598           if (PREDICT_TRUE
599               (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
600             {
601               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
602                                                thread_index,
603                                                vnet_buffer (p0)->
604                                                map_t.map_domain_index, 1,
605                                                clib_net_to_host_u16
606                                                (ip40->length));
607             }
608
609           if (PREDICT_TRUE
610               (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP))
611             {
612               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
613                                                thread_index,
614                                                vnet_buffer (p1)->
615                                                map_t.map_domain_index, 1,
616                                                clib_net_to_host_u16
617                                                (ip41->length));
618             }
619
620           next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
621           next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
622           p0->error = error_node->errors[error0];
623           p1->error = error_node->errors[error1];
624           vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
625                                            n_left_to_next, pi0, pi1, next0,
626                                            next1);
627         }
628 #endif
629
630       while (n_left_from > 0 && n_left_to_next > 0)
631         {
632           u32 pi0;
633           vlib_buffer_t *p0;
634           ip4_header_t *ip40;
635           map_domain_t *d0;
636           ip4_mapt_next_t next0;
637           u16 ip4_len0;
638           u8 error0;
639           i32 dst_port0;
640           ip4_mapt_pseudo_header_t *pheader0;
641
642           pi0 = to_next[0] = from[0];
643           from += 1;
644           n_left_from -= 1;
645           to_next += 1;
646           n_left_to_next -= 1;
647           error0 = MAP_ERROR_NONE;
648
649           p0 = vlib_get_buffer (vm, pi0);
650           ip40 = vlib_buffer_get_current (p0);
651           ip4_len0 = clib_host_to_net_u16 (ip40->length);
652           if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
653                              ip40->ip_version_and_header_length != 0x45))
654             {
655               error0 = MAP_ERROR_UNKNOWN;
656               next0 = IP4_MAPT_NEXT_DROP;
657             }
658
659           vnet_buffer (p0)->map_t.map_domain_index =
660             vnet_buffer (p0)->ip.adj_index[VLIB_TX];
661           d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
662
663           vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
664
665           dst_port0 = -1;
666           ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
667                               &next0);
668
669           /* Verify that port is not among the well-known ports */
670           if ((d0->psid_length > 0 && d0->psid_offset > 0)
671               && (clib_net_to_host_u16 (dst_port0) <
672                   (0x1 << (16 - d0->psid_offset))))
673             {
674               error0 = MAP_ERROR_SEC_CHECK;
675             }
676
677           //Add MAP-T pseudo header in front of the packet
678           vlib_buffer_advance (p0, -sizeof (*pheader0));
679           pheader0 = vlib_buffer_get_current (p0);
680
681           //Save addresses within the packet
682           ip4_map_t_embedded_address (d0, &pheader0->saddr,
683                                       &ip40->src_address);
684           pheader0->daddr.as_u64[0] =
685             map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
686           pheader0->daddr.as_u64[1] =
687             map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
688
689           //It is important to cache at this stage because the result might be necessary
690           //for packets within the same vector.
691           //Actually, this approach even provides some limited out-of-order fragments support
692           if (PREDICT_FALSE
693               (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
694                && (d0->ea_bits_len != 0 || !d0->rules)
695                && ip4_map_fragment_cache (ip40, dst_port0)))
696             {
697               error0 = MAP_ERROR_UNKNOWN;
698             }
699
700           if (PREDICT_TRUE
701               (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
702             {
703               vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
704                                                thread_index,
705                                                vnet_buffer (p0)->
706                                                map_t.map_domain_index, 1,
707                                                clib_net_to_host_u16
708                                                (ip40->length));
709             }
710
711           next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
712           p0->error = error_node->errors[error0];
713           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
714                                            to_next, n_left_to_next, pi0,
715                                            next0);
716         }
717       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
718     }
719   return frame->n_vectors;
720 }
721
722 static char *map_t_error_strings[] = {
723 #define _(sym,string) string,
724   foreach_map_error
725 #undef _
726 };
727
728 /* *INDENT-OFF* */
729 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
730   .function = ip4_map_t_fragmented,
731   .name = "ip4-map-t-fragmented",
732   .vector_size = sizeof(u32),
733   .format_trace = format_map_trace,
734   .type = VLIB_NODE_TYPE_INTERNAL,
735
736   .n_errors = MAP_N_ERROR,
737   .error_strings = map_t_error_strings,
738
739   .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
740   .next_nodes = {
741       [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
742       [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
743       [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
744   },
745 };
746 /* *INDENT-ON* */
747
748 /* *INDENT-OFF* */
749 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
750   .function = ip4_map_t_icmp,
751   .name = "ip4-map-t-icmp",
752   .vector_size = sizeof(u32),
753   .format_trace = format_map_trace,
754   .type = VLIB_NODE_TYPE_INTERNAL,
755
756   .n_errors = MAP_N_ERROR,
757   .error_strings = map_t_error_strings,
758
759   .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
760   .next_nodes = {
761       [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
762       [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
763       [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
764   },
765 };
766 /* *INDENT-ON* */
767
768 /* *INDENT-OFF* */
769 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
770   .function = ip4_map_t_tcp_udp,
771   .name = "ip4-map-t-tcp-udp",
772   .vector_size = sizeof(u32),
773   .format_trace = format_map_trace,
774   .type = VLIB_NODE_TYPE_INTERNAL,
775
776   .n_errors = MAP_N_ERROR,
777   .error_strings = map_t_error_strings,
778
779   .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
780   .next_nodes = {
781       [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
782       [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
783       [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
784   },
785 };
786 /* *INDENT-ON* */
787
788 /* *INDENT-OFF* */
789 VLIB_REGISTER_NODE(ip4_map_t_node) = {
790   .function = ip4_map_t,
791   .name = "ip4-map-t",
792   .vector_size = sizeof(u32),
793   .format_trace = format_map_trace,
794   .type = VLIB_NODE_TYPE_INTERNAL,
795
796   .n_errors = MAP_N_ERROR,
797   .error_strings = map_t_error_strings,
798
799   .n_next_nodes = IP4_MAPT_N_NEXT,
800   .next_nodes = {
801       [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
802       [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
803       [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
804       [IP4_MAPT_NEXT_DROP] = "error-drop",
805   },
806 };
807 /* *INDENT-ON* */
808
809 /*
810  * fd.io coding-style-patch-verification: ON
811  *
812  * Local Variables:
813  * eval: (c-set-style "gnu")
814  * End:
815  */