2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include "../ip/ip_frag.h"
18 #include <vnet/ip/ip4_to_ip6.h>
20 #define IP4_MAP_T_DUAL_LOOP 1
24 IP4_MAPT_NEXT_MAPT_TCP_UDP,
25 IP4_MAPT_NEXT_MAPT_ICMP,
26 IP4_MAPT_NEXT_MAPT_FRAGMENTED,
33 IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
34 IP4_MAPT_ICMP_NEXT_IP6_FRAG,
35 IP4_MAPT_ICMP_NEXT_DROP,
37 } ip4_mapt_icmp_next_t;
41 IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
42 IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
43 IP4_MAPT_TCP_UDP_NEXT_DROP,
44 IP4_MAPT_TCP_UDP_N_NEXT
45 } ip4_mapt_tcp_udp_next_t;
49 IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
50 IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
51 IP4_MAPT_FRAGMENTED_NEXT_DROP,
52 IP4_MAPT_FRAGMENTED_N_NEXT
53 } ip4_mapt_fragmented_next_t;
55 //This is used to pass information within the buffer data.
56 //Buffer structure being too small to contain big structures like this.
58 typedef CLIB_PACKED (struct {
61 //IPv6 header + Fragmentation header will be here
62 //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
64 }) ip4_mapt_pseudo_header_t;
68 static_always_inline int
69 ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
72 map_ip4_reass_lock ();
74 map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
77 IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
82 map_ip4_reass_unlock ();
86 static_always_inline i32
87 ip4_map_fragment_get_port (ip4_header_t * ip4)
90 map_ip4_reass_lock ();
92 map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
95 IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
97 i32 ret = r ? r->port : -1;
98 map_ip4_reass_unlock ();
106 } icmp_to_icmp6_ctx_t;
109 ip4_to_ip6_set_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *arg)
111 icmp_to_icmp6_ctx_t *ctx = arg;
113 ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address);
114 ip6->dst_address.as_u64[0] =
115 map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
116 ip6->dst_address.as_u64[1] =
117 map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
123 ip4_to_ip6_set_inner_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6,
126 icmp_to_icmp6_ctx_t *ctx = arg;
128 //Note that the source address is within the domain
129 //while the destination address is the one outside the domain
130 ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &ip4->dst_address);
131 ip6->src_address.as_u64[0] =
132 map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
133 ip6->src_address.as_u64[1] =
134 map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
140 ip4_map_t_icmp (vlib_main_t * vm,
141 vlib_node_runtime_t * node, vlib_frame_t * frame)
143 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
144 vlib_node_runtime_t *error_node =
145 vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
146 from = vlib_frame_vector_args (frame);
147 n_left_from = frame->n_vectors;
148 next_index = node->cached_next_index;
149 vlib_combined_counter_main_t *cm = map_main.domain_counters;
150 u32 thread_index = vlib_get_thread_index ();
152 while (n_left_from > 0)
154 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
156 while (n_left_from > 0 && n_left_to_next > 0)
160 ip4_mapt_icmp_next_t next0;
164 icmp_to_icmp6_ctx_t ctx0;
167 next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
168 pi0 = to_next[0] = from[0];
173 error0 = MAP_ERROR_NONE;
175 p0 = vlib_get_buffer (vm, pi0);
176 vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
178 clib_net_to_host_u16 (((ip4_header_t *)
179 vlib_buffer_get_current (p0))->length);
181 pool_elt_at_index (map_main.domains,
182 vnet_buffer (p0)->map_t.map_domain_index);
184 ip40 = vlib_buffer_get_current (p0);
185 ctx0.recv_port = ip4_get_port (ip40, 1);
187 if (ctx0.recv_port == 0)
189 // In case of 1:1 mapping, we don't care about the port
190 if (!(d0->ea_bits_len == 0 && d0->rules))
192 error0 = MAP_ERROR_ICMP;
198 (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
199 ip4_to_ip6_set_inner_icmp_cb, &ctx0))
201 error0 = MAP_ERROR_ICMP;
205 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
207 vnet_buffer (p0)->ip_frag.header_offset = 0;
208 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
209 vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
210 next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
213 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
215 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
218 map_t.map_domain_index, 1,
223 next0 = IP4_MAPT_ICMP_NEXT_DROP;
225 p0->error = error_node->errors[error0];
226 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
227 to_next, n_left_to_next, pi0,
230 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
232 return frame->n_vectors;
236 ip4_to_ip6_set_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *ctx)
238 ip4_mapt_pseudo_header_t *pheader = ctx;
240 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
241 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
242 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
243 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
249 ip4_map_t_fragmented (vlib_main_t * vm,
250 vlib_node_runtime_t * node, vlib_frame_t * frame)
252 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
253 from = vlib_frame_vector_args (frame);
254 n_left_from = frame->n_vectors;
255 next_index = node->cached_next_index;
256 vlib_node_runtime_t *error_node =
257 vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
259 while (n_left_from > 0)
261 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
263 while (n_left_from > 0 && n_left_to_next > 0)
267 ip4_mapt_pseudo_header_t *pheader0;
268 ip4_mapt_fragmented_next_t next0;
270 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
271 pi0 = to_next[0] = from[0];
277 p0 = vlib_get_buffer (vm, pi0);
279 //Accessing pseudo header
280 pheader0 = vlib_buffer_get_current (p0);
281 vlib_buffer_advance (p0, sizeof (*pheader0));
283 if (ip4_to_ip6_fragmented (p0, ip4_to_ip6_set_cb, pheader0))
285 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
286 next0 = IP4_MAPT_FRAGMENTED_NEXT_DROP;
290 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
292 vnet_buffer (p0)->ip_frag.header_offset = 0;
293 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
294 vnet_buffer (p0)->ip_frag.next_index =
295 IP6_FRAG_NEXT_IP6_LOOKUP;
296 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
300 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
301 to_next, n_left_to_next, pi0,
304 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
306 return frame->n_vectors;
310 ip4_map_t_tcp_udp (vlib_main_t * vm,
311 vlib_node_runtime_t * node, vlib_frame_t * frame)
313 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
314 from = vlib_frame_vector_args (frame);
315 n_left_from = frame->n_vectors;
316 next_index = node->cached_next_index;
317 vlib_node_runtime_t *error_node =
318 vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
321 while (n_left_from > 0)
323 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
325 #ifdef IP4_MAP_T_DUAL_LOOP
326 while (n_left_from >= 4 && n_left_to_next >= 2)
329 vlib_buffer_t *p0, *p1;
330 ip4_mapt_pseudo_header_t *pheader0, *pheader1;
331 ip4_mapt_tcp_udp_next_t next0, next1;
333 pi0 = to_next[0] = from[0];
334 pi1 = to_next[1] = from[1];
340 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
341 next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
342 p0 = vlib_get_buffer (vm, pi0);
343 p1 = vlib_get_buffer (vm, pi1);
345 //Accessing pseudo header
346 pheader0 = vlib_buffer_get_current (p0);
347 pheader1 = vlib_buffer_get_current (p1);
348 vlib_buffer_advance (p0, sizeof (*pheader0));
349 vlib_buffer_advance (p1, sizeof (*pheader1));
351 if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
353 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
354 next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
358 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
360 //Send to fragmentation node if necessary
361 vnet_buffer (p0)->ip_frag.header_offset = 0;
362 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
363 vnet_buffer (p0)->ip_frag.next_index =
364 IP6_FRAG_NEXT_IP6_LOOKUP;
365 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
369 if (ip4_to_ip6_tcp_udp (p1, ip4_to_ip6_set_cb, pheader1))
371 p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
372 next1 = IP4_MAPT_TCP_UDP_NEXT_DROP;
376 if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
378 //Send to fragmentation node if necessary
379 vnet_buffer (p1)->ip_frag.header_offset = 0;
380 vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
381 vnet_buffer (p1)->ip_frag.next_index =
382 IP6_FRAG_NEXT_IP6_LOOKUP;
383 next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
387 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
388 to_next, n_left_to_next, pi0, pi1,
393 while (n_left_from > 0 && n_left_to_next > 0)
397 ip4_mapt_pseudo_header_t *pheader0;
398 ip4_mapt_tcp_udp_next_t next0;
400 pi0 = to_next[0] = from[0];
406 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
407 p0 = vlib_get_buffer (vm, pi0);
409 //Accessing pseudo header
410 pheader0 = vlib_buffer_get_current (p0);
411 vlib_buffer_advance (p0, sizeof (*pheader0));
413 if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
415 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
416 next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
420 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
422 //Send to fragmentation node if necessary
423 vnet_buffer (p0)->ip_frag.header_offset = 0;
424 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
425 vnet_buffer (p0)->ip_frag.next_index =
426 IP6_FRAG_NEXT_IP6_LOOKUP;
427 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
430 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
431 to_next, n_left_to_next, pi0,
434 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
437 return frame->n_vectors;
440 static_always_inline void
441 ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
442 ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
443 u8 * error0, ip4_mapt_next_t * next0)
445 if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
447 *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
448 if (d0->ea_bits_len == 0 && d0->rules)
454 *dst_port0 = ip4_map_fragment_get_port (ip40);
455 *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
458 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
460 vnet_buffer (p0)->map_t.checksum_offset = 36;
461 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
462 *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
463 *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
465 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
467 vnet_buffer (p0)->map_t.checksum_offset = 26;
468 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
469 *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
470 *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
472 else if (ip40->protocol == IP_PROTOCOL_ICMP)
474 *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
475 if (d0->ea_bits_len == 0 && d0->rules)
477 else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
479 || ((icmp46_header_t *)
481 sizeof (*ip40)))->code == ICMP4_echo_request)
482 *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
486 *error0 = MAP_ERROR_BAD_PROTOCOL;
491 ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
493 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
494 vlib_node_runtime_t *error_node =
495 vlib_node_get_runtime (vm, ip4_map_t_node.index);
496 from = vlib_frame_vector_args (frame);
497 n_left_from = frame->n_vectors;
498 next_index = node->cached_next_index;
499 vlib_combined_counter_main_t *cm = map_main.domain_counters;
500 u32 thread_index = vlib_get_thread_index ();
502 while (n_left_from > 0)
504 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
506 #ifdef IP4_MAP_T_DUAL_LOOP
507 while (n_left_from >= 4 && n_left_to_next >= 2)
510 vlib_buffer_t *p0, *p1;
511 ip4_header_t *ip40, *ip41;
512 map_domain_t *d0, *d1;
513 ip4_mapt_next_t next0 = 0, next1 = 0;
514 u16 ip4_len0, ip4_len1;
516 i32 dst_port0, dst_port1;
517 ip4_mapt_pseudo_header_t *pheader0, *pheader1;
519 pi0 = to_next[0] = from[0];
520 pi1 = to_next[1] = from[1];
525 error0 = MAP_ERROR_NONE;
526 error1 = MAP_ERROR_NONE;
528 p0 = vlib_get_buffer (vm, pi0);
529 p1 = vlib_get_buffer (vm, pi1);
530 ip40 = vlib_buffer_get_current (p0);
531 ip41 = vlib_buffer_get_current (p1);
532 ip4_len0 = clib_host_to_net_u16 (ip40->length);
533 ip4_len1 = clib_host_to_net_u16 (ip41->length);
535 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
536 ip40->ip_version_and_header_length != 0x45))
538 error0 = MAP_ERROR_UNKNOWN;
539 next0 = IP4_MAPT_NEXT_DROP;
542 if (PREDICT_FALSE (p1->current_length < ip4_len1 ||
543 ip41->ip_version_and_header_length != 0x45))
545 error1 = MAP_ERROR_UNKNOWN;
546 next1 = IP4_MAPT_NEXT_DROP;
549 vnet_buffer (p0)->map_t.map_domain_index =
550 vnet_buffer (p0)->ip.adj_index[VLIB_TX];
551 d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
552 vnet_buffer (p1)->map_t.map_domain_index =
553 vnet_buffer (p1)->ip.adj_index[VLIB_TX];
554 d1 = ip4_map_get_domain (vnet_buffer (p1)->map_t.map_domain_index);
556 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
557 vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
562 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
564 ip4_map_t_classify (p1, d1, ip41, ip4_len1, &dst_port1, &error1,
567 //Add MAP-T pseudo header in front of the packet
568 vlib_buffer_advance (p0, -sizeof (*pheader0));
569 vlib_buffer_advance (p1, -sizeof (*pheader1));
570 pheader0 = vlib_buffer_get_current (p0);
571 pheader1 = vlib_buffer_get_current (p1);
573 //Save addresses within the packet
574 ip4_map_t_embedded_address (d0, &pheader0->saddr,
576 ip4_map_t_embedded_address (d1, &pheader1->saddr,
578 pheader0->daddr.as_u64[0] =
579 map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
580 pheader0->daddr.as_u64[1] =
581 map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
582 pheader1->daddr.as_u64[0] =
583 map_get_pfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
584 pheader1->daddr.as_u64[1] =
585 map_get_sfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
588 (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
589 && (d0->ea_bits_len != 0 || !d0->rules)
590 && ip4_map_fragment_cache (ip40, dst_port0)))
592 error0 = MAP_ERROR_FRAGMENT_MEMORY;
596 (ip4_is_first_fragment (ip41) && (dst_port1 != -1)
597 && (d1->ea_bits_len != 0 || !d1->rules)
598 && ip4_map_fragment_cache (ip41, dst_port1)))
600 error1 = MAP_ERROR_FRAGMENT_MEMORY;
604 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
606 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
609 map_t.map_domain_index, 1,
615 (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP))
617 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
620 map_t.map_domain_index, 1,
625 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
626 next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
627 p0->error = error_node->errors[error0];
628 p1->error = error_node->errors[error1];
629 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
630 n_left_to_next, pi0, pi1, next0,
635 while (n_left_from > 0 && n_left_to_next > 0)
641 ip4_mapt_next_t next0;
645 ip4_mapt_pseudo_header_t *pheader0;
647 pi0 = to_next[0] = from[0];
652 error0 = MAP_ERROR_NONE;
654 p0 = vlib_get_buffer (vm, pi0);
655 ip40 = vlib_buffer_get_current (p0);
656 ip4_len0 = clib_host_to_net_u16 (ip40->length);
657 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
658 ip40->ip_version_and_header_length != 0x45))
660 error0 = MAP_ERROR_UNKNOWN;
661 next0 = IP4_MAPT_NEXT_DROP;
664 vnet_buffer (p0)->map_t.map_domain_index =
665 vnet_buffer (p0)->ip.adj_index[VLIB_TX];
666 d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
668 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
671 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
674 //Add MAP-T pseudo header in front of the packet
675 vlib_buffer_advance (p0, -sizeof (*pheader0));
676 pheader0 = vlib_buffer_get_current (p0);
678 //Save addresses within the packet
679 ip4_map_t_embedded_address (d0, &pheader0->saddr,
681 pheader0->daddr.as_u64[0] =
682 map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
683 pheader0->daddr.as_u64[1] =
684 map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
686 //It is important to cache at this stage because the result might be necessary
687 //for packets within the same vector.
688 //Actually, this approach even provides some limited out-of-order fragments support
690 (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
691 && (d0->ea_bits_len != 0 || !d0->rules)
692 && ip4_map_fragment_cache (ip40, dst_port0)))
694 error0 = MAP_ERROR_UNKNOWN;
698 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
700 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
703 map_t.map_domain_index, 1,
708 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
709 p0->error = error_node->errors[error0];
710 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
711 to_next, n_left_to_next, pi0,
714 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
716 return frame->n_vectors;
719 static char *map_t_error_strings[] = {
720 #define _(sym,string) string,
726 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
727 .function = ip4_map_t_fragmented,
728 .name = "ip4-map-t-fragmented",
729 .vector_size = sizeof(u32),
730 .format_trace = format_map_trace,
731 .type = VLIB_NODE_TYPE_INTERNAL,
733 .n_errors = MAP_N_ERROR,
734 .error_strings = map_t_error_strings,
736 .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
738 [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
739 [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
740 [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
746 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
747 .function = ip4_map_t_icmp,
748 .name = "ip4-map-t-icmp",
749 .vector_size = sizeof(u32),
750 .format_trace = format_map_trace,
751 .type = VLIB_NODE_TYPE_INTERNAL,
753 .n_errors = MAP_N_ERROR,
754 .error_strings = map_t_error_strings,
756 .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
758 [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
759 [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
760 [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
766 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
767 .function = ip4_map_t_tcp_udp,
768 .name = "ip4-map-t-tcp-udp",
769 .vector_size = sizeof(u32),
770 .format_trace = format_map_trace,
771 .type = VLIB_NODE_TYPE_INTERNAL,
773 .n_errors = MAP_N_ERROR,
774 .error_strings = map_t_error_strings,
776 .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
778 [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
779 [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
780 [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
786 VLIB_REGISTER_NODE(ip4_map_t_node) = {
787 .function = ip4_map_t,
789 .vector_size = sizeof(u32),
790 .format_trace = format_map_trace,
791 .type = VLIB_NODE_TYPE_INTERNAL,
793 .n_errors = MAP_N_ERROR,
794 .error_strings = map_t_error_strings,
796 .n_next_nodes = IP4_MAPT_N_NEXT,
798 [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
799 [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
800 [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
801 [IP4_MAPT_NEXT_DROP] = "error-drop",
807 * fd.io coding-style-patch-verification: ON
810 * eval: (c-set-style "gnu")