2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
22 IP4_MAPT_NEXT_MAPT_TCP_UDP,
23 IP4_MAPT_NEXT_MAPT_ICMP,
24 IP4_MAPT_NEXT_MAPT_FRAGMENTED,
25 IP4_MAPT_NEXT_ICMP_ERROR,
32 IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
33 IP4_MAPT_ICMP_NEXT_IP6_REWRITE,
34 IP4_MAPT_ICMP_NEXT_IP6_FRAG,
35 IP4_MAPT_ICMP_NEXT_DROP,
37 } ip4_mapt_icmp_next_t;
41 IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
42 IP4_MAPT_TCP_UDP_NEXT_IP6_REWRITE,
43 IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
44 IP4_MAPT_TCP_UDP_NEXT_DROP,
45 IP4_MAPT_TCP_UDP_N_NEXT
46 } ip4_mapt_tcp_udp_next_t;
50 IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
51 IP4_MAPT_FRAGMENTED_NEXT_IP6_REWRITE,
52 IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
53 IP4_MAPT_FRAGMENTED_NEXT_DROP,
54 IP4_MAPT_FRAGMENTED_N_NEXT
55 } ip4_mapt_fragmented_next_t;
57 //This is used to pass information within the buffer data.
58 //Buffer structure being too small to contain big structures like this.
60 typedef CLIB_PACKED (struct {
63 //IPv6 header + Fragmentation header will be here
64 //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
66 }) ip4_mapt_pseudo_header_t;
73 } icmp_to_icmp6_ctx_t;
76 ip4_to_ip6_set_icmp_cb (vlib_buffer_t * b, ip4_header_t * ip4,
77 ip6_header_t * ip6, void *arg)
79 icmp_to_icmp6_ctx_t *ctx = arg;
81 ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address);
82 ip6->dst_address.as_u64[0] =
83 map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
84 ip6->dst_address.as_u64[1] =
85 map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
91 ip4_to_ip6_set_inner_icmp_cb (vlib_buffer_t * b, ip4_header_t * ip4,
92 ip6_header_t * ip6, void *arg)
94 icmp_to_icmp6_ctx_t *ctx = arg;
96 //Note that the source address is within the domain
97 //while the destination address is the one outside the domain
98 ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &ip4->dst_address);
99 ip6->src_address.as_u64[0] =
100 map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
101 ip6->src_address.as_u64[1] =
102 map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
108 ip4_map_t_icmp (vlib_main_t * vm,
109 vlib_node_runtime_t * node, vlib_frame_t * frame)
111 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
112 vlib_node_runtime_t *error_node =
113 vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
114 from = vlib_frame_vector_args (frame);
115 n_left_from = frame->n_vectors;
116 next_index = node->cached_next_index;
117 vlib_combined_counter_main_t *cm = map_main.domain_counters;
118 u32 thread_index = vm->thread_index;
120 while (n_left_from > 0)
122 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
124 while (n_left_from > 0 && n_left_to_next > 0)
128 ip4_mapt_icmp_next_t next0;
132 icmp_to_icmp6_ctx_t ctx0;
135 next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
136 pi0 = to_next[0] = from[0];
141 error0 = MAP_ERROR_NONE;
143 p0 = vlib_get_buffer (vm, pi0);
144 vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
146 clib_net_to_host_u16 (((ip4_header_t *)
147 vlib_buffer_get_current (p0))->length);
149 pool_elt_at_index (map_main.domains,
150 vnet_buffer (p0)->map_t.map_domain_index);
152 ip40 = vlib_buffer_get_current (p0);
153 ctx0.recv_port = ip4_get_port (ip40, 1);
155 if (ctx0.recv_port == 0)
157 // In case of 1:1 mapping, we don't care about the port
158 if (!(d0->ea_bits_len == 0 && d0->rules))
160 error0 = MAP_ERROR_ICMP;
166 (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
167 ip4_to_ip6_set_inner_icmp_cb, &ctx0))
169 error0 = MAP_ERROR_ICMP;
173 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
175 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
176 vnet_buffer (p0)->ip_frag.next_index = IP_FRAG_NEXT_IP6_LOOKUP;
177 next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
181 next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
182 IP4_MAPT_ICMP_NEXT_IP6_REWRITE : next0;
185 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
187 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
190 map_t.map_domain_index, 1,
195 next0 = IP4_MAPT_ICMP_NEXT_DROP;
197 p0->error = error_node->errors[error0];
198 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
199 to_next, n_left_to_next, pi0,
202 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
204 return frame->n_vectors;
208 * Translate fragmented IPv4 UDP/TCP packet to IPv6.
211 map_ip4_to_ip6_fragmented (vlib_buffer_t * p,
212 ip4_mapt_pseudo_header_t * pheader)
216 ip6_frag_hdr_t *frag;
218 ip4 = vlib_buffer_get_current (p);
219 frag = (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
221 (ip6_header_t *) u8_ptr_add (ip4,
222 sizeof (*ip4) - sizeof (*frag) -
224 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
226 //We know that the protocol was one of ICMP, TCP or UDP
227 //because the first fragment was found and cached
229 (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol;
230 frag->identification = frag_id_4to6 (ip4->fragment_id);
232 frag->fragment_offset_and_more =
233 ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip4),
235 (ip4->flags_and_fragment_offset) &
236 IP4_HEADER_FLAG_MORE_FRAGMENTS);
238 ip6->ip_version_traffic_class_and_flow_label =
239 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
240 ip6->payload_length =
241 clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
242 sizeof (*ip4) + sizeof (*frag));
243 ip6->hop_limit = ip4->ttl;
244 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
246 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
247 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
248 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
249 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
255 ip4_map_t_fragmented (vlib_main_t * vm,
256 vlib_node_runtime_t * node, vlib_frame_t * frame)
258 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
259 from = vlib_frame_vector_args (frame);
260 n_left_from = frame->n_vectors;
261 next_index = node->cached_next_index;
262 vlib_node_runtime_t *error_node =
263 vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
265 while (n_left_from > 0)
267 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
269 while (n_left_from > 0 && n_left_to_next > 0)
273 ip4_mapt_pseudo_header_t *pheader0;
274 ip4_mapt_fragmented_next_t next0;
276 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
277 pi0 = to_next[0] = from[0];
283 p0 = vlib_get_buffer (vm, pi0);
285 //Accessing pseudo header
286 pheader0 = vlib_buffer_get_current (p0);
287 vlib_buffer_advance (p0, sizeof (*pheader0));
289 if (map_ip4_to_ip6_fragmented (p0, pheader0))
291 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
292 next0 = IP4_MAPT_FRAGMENTED_NEXT_DROP;
296 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
298 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
299 vnet_buffer (p0)->ip_frag.next_index =
300 IP_FRAG_NEXT_IP6_LOOKUP;
301 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
305 next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
306 IP4_MAPT_FRAGMENTED_NEXT_IP6_REWRITE : next0;
310 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
311 to_next, n_left_to_next, pi0,
314 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
316 return frame->n_vectors;
320 * Translate IPv4 UDP/TCP packet to IPv6.
323 map_ip4_to_ip6_tcp_udp (vlib_buffer_t * p, ip4_mapt_pseudo_header_t * pheader)
325 map_main_t *mm = &map_main;
330 ip6_frag_hdr_t *frag;
332 ip4_address_t old_src, old_dst;
334 ip4 = vlib_buffer_get_current (p);
336 if (ip4->protocol == IP_PROTOCOL_UDP)
338 udp_header_t *udp = ip4_next_header (ip4);
339 checksum = &udp->checksum;
342 * UDP checksum is optional over IPv4 but mandatory for IPv6 We
343 * do not check udp->length sanity but use our safe computed
346 if (PREDICT_FALSE (!*checksum))
348 u16 udp_len = clib_host_to_net_u16 (ip4->length) - sizeof (*ip4);
349 csum = ip_incremental_checksum (0, udp, udp_len);
350 csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
352 ip_csum_with_carry (csum, clib_host_to_net_u16 (IP_PROTOCOL_UDP));
353 csum = ip_csum_with_carry (csum, *((u64 *) (&ip4->src_address)));
354 *checksum = ~ip_csum_fold (csum);
359 tcp_header_t *tcp = ip4_next_header (ip4);
362 csum = tcp->checksum;
363 map_mss_clamping (tcp, &csum, mm->tcp_mss);
364 tcp->checksum = ip_csum_fold (csum);
366 checksum = &tcp->checksum;
369 old_src.as_u32 = ip4->src_address.as_u32;
370 old_dst.as_u32 = ip4->dst_address.as_u32;
372 /* Deal with fragmented packets */
373 if (PREDICT_FALSE (ip4->flags_and_fragment_offset &
374 clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
377 (ip6_header_t *) u8_ptr_add (ip4,
378 sizeof (*ip4) - sizeof (*ip6) -
381 (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
382 frag_id = frag_id_4to6 (ip4->fragment_id);
383 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
387 ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
388 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
392 ip6->ip_version_traffic_class_and_flow_label =
393 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
394 ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
395 ip6->hop_limit = ip4->ttl;
396 ip6->protocol = ip4->protocol;
397 if (PREDICT_FALSE (frag != NULL))
399 frag->next_hdr = ip6->protocol;
400 frag->identification = frag_id;
402 frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more (0, 1);
403 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
404 ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
407 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
408 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
409 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
410 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
412 csum = ip_csum_sub_even (*checksum, old_src.as_u32);
413 csum = ip_csum_sub_even (csum, old_dst.as_u32);
414 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
415 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
416 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
417 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
418 *checksum = ip_csum_fold (csum);
424 ip4_map_t_tcp_udp (vlib_main_t * vm,
425 vlib_node_runtime_t * node, vlib_frame_t * frame)
427 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
428 from = vlib_frame_vector_args (frame);
429 n_left_from = frame->n_vectors;
430 next_index = node->cached_next_index;
431 vlib_node_runtime_t *error_node =
432 vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
435 while (n_left_from > 0)
437 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
439 while (n_left_from > 0 && n_left_to_next > 0)
443 ip4_mapt_pseudo_header_t *pheader0;
444 ip4_mapt_tcp_udp_next_t next0;
446 pi0 = to_next[0] = from[0];
452 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
453 p0 = vlib_get_buffer (vm, pi0);
455 //Accessing pseudo header
456 pheader0 = vlib_buffer_get_current (p0);
457 vlib_buffer_advance (p0, sizeof (*pheader0));
459 if (map_ip4_to_ip6_tcp_udp (p0, pheader0))
461 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
462 next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
466 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
468 //Send to fragmentation node if necessary
469 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
470 vnet_buffer (p0)->ip_frag.next_index =
471 IP_FRAG_NEXT_IP6_LOOKUP;
472 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
476 next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
477 IP4_MAPT_TCP_UDP_NEXT_IP6_REWRITE : next0;
480 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
481 to_next, n_left_to_next, pi0,
484 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
487 return frame->n_vectors;
490 static_always_inline void
491 ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
492 ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
493 u8 * error0, ip4_mapt_next_t * next0, u16 l4_dst_port)
495 if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
497 *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
498 if (d0->ea_bits_len == 0 && d0->rules)
504 *dst_port0 = l4_dst_port;
505 *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
508 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
510 vnet_buffer (p0)->map_t.checksum_offset = 36;
511 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
512 *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
513 *dst_port0 = l4_dst_port;
515 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
517 vnet_buffer (p0)->map_t.checksum_offset = 26;
518 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
519 *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
520 *dst_port0 = l4_dst_port;
522 else if (ip40->protocol == IP_PROTOCOL_ICMP)
524 *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
525 if (d0->ea_bits_len == 0 && d0->rules)
527 else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
529 || ((icmp46_header_t *)
531 sizeof (*ip40)))->code == ICMP4_echo_request)
532 *dst_port0 = l4_dst_port;
536 *error0 = MAP_ERROR_BAD_PROTOCOL;
541 ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
543 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
544 vlib_node_runtime_t *error_node =
545 vlib_node_get_runtime (vm, ip4_map_t_node.index);
546 from = vlib_frame_vector_args (frame);
547 n_left_from = frame->n_vectors;
548 next_index = node->cached_next_index;
549 vlib_combined_counter_main_t *cm = map_main.domain_counters;
550 u32 thread_index = vm->thread_index;
552 while (n_left_from > 0)
554 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
556 while (n_left_from > 0 && n_left_to_next > 0)
562 ip4_mapt_next_t next0 = 0;
566 ip4_mapt_pseudo_header_t *pheader0;
568 pi0 = to_next[0] = from[0];
573 error0 = MAP_ERROR_NONE;
575 p0 = vlib_get_buffer (vm, pi0);
577 u16 l4_dst_port = vnet_buffer (p0)->ip.reass.l4_dst_port;
579 ip40 = vlib_buffer_get_current (p0);
580 ip4_len0 = clib_host_to_net_u16 (ip40->length);
581 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
582 ip40->ip_version_and_header_length != 0x45))
584 error0 = MAP_ERROR_UNKNOWN;
587 d0 = ip4_map_get_domain (&ip40->dst_address,
588 &vnet_buffer (p0)->map_t.map_domain_index,
592 { /* Guess it wasn't for us */
593 vnet_feature_next (&next0, p0);
599 if (PREDICT_FALSE (ip40->ttl == 1))
601 icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
602 ICMP4_time_exceeded_ttl_exceeded_in_transit,
604 p0->error = error_node->errors[MAP_ERROR_TIME_EXCEEDED];
605 next0 = IP4_MAPT_NEXT_ICMP_ERROR;
610 ip40->flags_and_fragment_offset &
611 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
613 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
616 (df0 && !map_main.frag_ignore_df
619 (sizeof (ip6_header_t) - sizeof (ip4_header_t))) >
620 vnet_buffer (p0)->map_t.mtu)))
622 icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
623 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
624 vnet_buffer (p0)->map_t.mtu -
625 (sizeof (ip6_header_t) -
626 sizeof (ip4_header_t)));
627 p0->error = error_node->errors[MAP_ERROR_DF_SET];
628 next0 = IP4_MAPT_NEXT_ICMP_ERROR;
632 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
633 &next0, l4_dst_port);
635 /* Verify that port is not among the well-known ports */
636 if ((d0->psid_length > 0 && d0->psid_offset > 0)
637 && (clib_net_to_host_u16 (dst_port0) <
638 (0x1 << (16 - d0->psid_offset))))
640 error0 = MAP_ERROR_SEC_CHECK;
643 //Add MAP-T pseudo header in front of the packet
644 vlib_buffer_advance (p0, -sizeof (*pheader0));
645 pheader0 = vlib_buffer_get_current (p0);
647 //Save addresses within the packet
648 ip4_map_t_embedded_address (d0, &pheader0->saddr,
650 pheader0->daddr.as_u64[0] =
651 map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
652 pheader0->daddr.as_u64[1] =
653 map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
656 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
658 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
661 map_t.map_domain_index, 1,
666 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
667 p0->error = error_node->errors[error0];
669 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
671 map_add_trace (vm, node, p0, d0 - map_main.domains, dst_port0);
674 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
675 to_next, n_left_to_next, pi0,
678 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
680 return frame->n_vectors;
683 static char *map_t_error_strings[] = {
684 #define _(sym,string) string,
690 VNET_FEATURE_INIT (ip4_map_t_feature, static) = {
691 .arc_name = "ip4-unicast",
692 .node_name = "ip4-map-t",
693 .runs_before = VNET_FEATURES ("ip4-flow-classify"),
694 .runs_after = VNET_FEATURES ("ip4-sv-reassembly-feature"),
697 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
698 .function = ip4_map_t_fragmented,
699 .name = "ip4-map-t-fragmented",
700 .vector_size = sizeof(u32),
701 .format_trace = format_map_trace,
702 .type = VLIB_NODE_TYPE_INTERNAL,
704 .n_errors = MAP_N_ERROR,
705 .error_strings = map_t_error_strings,
707 .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
709 [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
710 [IP4_MAPT_FRAGMENTED_NEXT_IP6_REWRITE] = "ip6-load-balance",
711 [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
712 [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
718 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
719 .function = ip4_map_t_icmp,
720 .name = "ip4-map-t-icmp",
721 .vector_size = sizeof(u32),
722 .format_trace = format_map_trace,
723 .type = VLIB_NODE_TYPE_INTERNAL,
725 .n_errors = MAP_N_ERROR,
726 .error_strings = map_t_error_strings,
728 .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
730 [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
731 [IP4_MAPT_ICMP_NEXT_IP6_REWRITE] = "ip6-load-balance",
732 [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
733 [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
739 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
740 .function = ip4_map_t_tcp_udp,
741 .name = "ip4-map-t-tcp-udp",
742 .vector_size = sizeof(u32),
743 .format_trace = format_map_trace,
744 .type = VLIB_NODE_TYPE_INTERNAL,
746 .n_errors = MAP_N_ERROR,
747 .error_strings = map_t_error_strings,
749 .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
751 [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
752 [IP4_MAPT_TCP_UDP_NEXT_IP6_REWRITE] = "ip6-load-balance",
753 [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
754 [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
760 VLIB_REGISTER_NODE(ip4_map_t_node) = {
761 .function = ip4_map_t,
763 .vector_size = sizeof(u32),
764 .format_trace = format_map_trace,
765 .type = VLIB_NODE_TYPE_INTERNAL,
767 .n_errors = MAP_N_ERROR,
768 .error_strings = map_t_error_strings,
770 .n_next_nodes = IP4_MAPT_N_NEXT,
772 [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
773 [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
774 [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
775 [IP4_MAPT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
776 [IP4_MAPT_NEXT_DROP] = "error-drop",
782 * fd.io coding-style-patch-verification: ON
785 * eval: (c-set-style "gnu")