2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
22 IP4_MAPT_NEXT_MAPT_TCP_UDP,
23 IP4_MAPT_NEXT_MAPT_ICMP,
24 IP4_MAPT_NEXT_MAPT_FRAGMENTED,
31 IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
32 IP4_MAPT_ICMP_NEXT_IP6_FRAG,
33 IP4_MAPT_ICMP_NEXT_DROP,
35 } ip4_mapt_icmp_next_t;
39 IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
40 IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
41 IP4_MAPT_TCP_UDP_NEXT_DROP,
42 IP4_MAPT_TCP_UDP_N_NEXT
43 } ip4_mapt_tcp_udp_next_t;
47 IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
48 IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
49 IP4_MAPT_FRAGMENTED_NEXT_DROP,
50 IP4_MAPT_FRAGMENTED_N_NEXT
51 } ip4_mapt_fragmented_next_t;
53 //This is used to pass information within the buffer data.
54 //Buffer structure being too small to contain big structures like this.
56 typedef CLIB_PACKED (struct {
59 //IPv6 header + Fragmentation header will be here
60 //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
62 }) ip4_mapt_pseudo_header_t;
69 } icmp_to_icmp6_ctx_t;
72 ip4_to_ip6_set_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *arg)
74 icmp_to_icmp6_ctx_t *ctx = arg;
76 ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address);
77 ip6->dst_address.as_u64[0] =
78 map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
79 ip6->dst_address.as_u64[1] =
80 map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
86 ip4_to_ip6_set_inner_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6,
89 icmp_to_icmp6_ctx_t *ctx = arg;
91 //Note that the source address is within the domain
92 //while the destination address is the one outside the domain
93 ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &ip4->dst_address);
94 ip6->src_address.as_u64[0] =
95 map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
96 ip6->src_address.as_u64[1] =
97 map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
103 ip4_map_t_icmp (vlib_main_t * vm,
104 vlib_node_runtime_t * node, vlib_frame_t * frame)
106 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
107 vlib_node_runtime_t *error_node =
108 vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
109 from = vlib_frame_vector_args (frame);
110 n_left_from = frame->n_vectors;
111 next_index = node->cached_next_index;
112 vlib_combined_counter_main_t *cm = map_main.domain_counters;
113 u32 thread_index = vm->thread_index;
115 while (n_left_from > 0)
117 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
119 while (n_left_from > 0 && n_left_to_next > 0)
123 ip4_mapt_icmp_next_t next0;
127 icmp_to_icmp6_ctx_t ctx0;
130 next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
131 pi0 = to_next[0] = from[0];
136 error0 = MAP_ERROR_NONE;
138 p0 = vlib_get_buffer (vm, pi0);
139 vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
141 clib_net_to_host_u16 (((ip4_header_t *)
142 vlib_buffer_get_current (p0))->length);
144 pool_elt_at_index (map_main.domains,
145 vnet_buffer (p0)->map_t.map_domain_index);
147 ip40 = vlib_buffer_get_current (p0);
148 ctx0.recv_port = ip4_get_port (ip40, 1);
150 if (ctx0.recv_port == 0)
152 // In case of 1:1 mapping, we don't care about the port
153 if (!(d0->ea_bits_len == 0 && d0->rules))
155 error0 = MAP_ERROR_ICMP;
161 (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
162 ip4_to_ip6_set_inner_icmp_cb, &ctx0))
164 error0 = MAP_ERROR_ICMP;
168 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
170 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
171 vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
172 next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
175 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
177 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
180 map_t.map_domain_index, 1,
185 next0 = IP4_MAPT_ICMP_NEXT_DROP;
187 p0->error = error_node->errors[error0];
188 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
189 to_next, n_left_to_next, pi0,
192 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
194 return frame->n_vectors;
198 * Translate fragmented IPv4 UDP/TCP packet to IPv6.
201 map_ip4_to_ip6_fragmented (vlib_buffer_t * p,
202 ip4_mapt_pseudo_header_t * pheader)
206 ip6_frag_hdr_t *frag;
208 ip4 = vlib_buffer_get_current (p);
209 frag = (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
211 (ip6_header_t *) u8_ptr_add (ip4,
212 sizeof (*ip4) - sizeof (*frag) -
214 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
216 //We know that the protocol was one of ICMP, TCP or UDP
217 //because the first fragment was found and cached
219 (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol;
220 frag->identification = frag_id_4to6 (ip4->fragment_id);
222 frag->fragment_offset_and_more =
223 ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip4),
225 (ip4->flags_and_fragment_offset) &
226 IP4_HEADER_FLAG_MORE_FRAGMENTS);
228 ip6->ip_version_traffic_class_and_flow_label =
229 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
230 ip6->payload_length =
231 clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
232 sizeof (*ip4) + sizeof (*frag));
233 ip6->hop_limit = ip4->ttl;
234 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
236 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
237 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
238 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
239 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
245 ip4_map_t_fragmented (vlib_main_t * vm,
246 vlib_node_runtime_t * node, vlib_frame_t * frame)
248 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
249 from = vlib_frame_vector_args (frame);
250 n_left_from = frame->n_vectors;
251 next_index = node->cached_next_index;
252 vlib_node_runtime_t *error_node =
253 vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
255 while (n_left_from > 0)
257 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
259 while (n_left_from > 0 && n_left_to_next > 0)
263 ip4_mapt_pseudo_header_t *pheader0;
264 ip4_mapt_fragmented_next_t next0;
266 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
267 pi0 = to_next[0] = from[0];
273 p0 = vlib_get_buffer (vm, pi0);
275 //Accessing pseudo header
276 pheader0 = vlib_buffer_get_current (p0);
277 vlib_buffer_advance (p0, sizeof (*pheader0));
279 if (map_ip4_to_ip6_fragmented (p0, pheader0))
281 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
282 next0 = IP4_MAPT_FRAGMENTED_NEXT_DROP;
286 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
288 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
289 vnet_buffer (p0)->ip_frag.next_index =
290 IP6_FRAG_NEXT_IP6_LOOKUP;
291 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
295 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
296 to_next, n_left_to_next, pi0,
299 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
301 return frame->n_vectors;
305 * Translate IPv4 UDP/TCP packet to IPv6.
308 map_ip4_to_ip6_tcp_udp (vlib_buffer_t * p, ip4_mapt_pseudo_header_t * pheader)
310 map_main_t *mm = &map_main;
315 ip6_frag_hdr_t *frag;
317 ip4_address_t old_src, old_dst;
319 ip4 = vlib_buffer_get_current (p);
321 if (ip4->protocol == IP_PROTOCOL_UDP)
323 udp_header_t *udp = ip4_next_header (ip4);
324 checksum = &udp->checksum;
327 * UDP checksum is optional over IPv4 but mandatory for IPv6 We
328 * do not check udp->length sanity but use our safe computed
331 if (PREDICT_FALSE (!*checksum))
333 u16 udp_len = clib_host_to_net_u16 (ip4->length) - sizeof (*ip4);
334 csum = ip_incremental_checksum (0, udp, udp_len);
335 csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
337 ip_csum_with_carry (csum, clib_host_to_net_u16 (IP_PROTOCOL_UDP));
338 csum = ip_csum_with_carry (csum, *((u64 *) (&ip4->src_address)));
339 *checksum = ~ip_csum_fold (csum);
344 tcp_header_t *tcp = ip4_next_header (ip4);
347 csum = tcp->checksum;
348 map_mss_clamping (tcp, &csum, mm->tcp_mss);
349 tcp->checksum = ip_csum_fold (csum);
351 checksum = &tcp->checksum;
354 old_src.as_u32 = ip4->src_address.as_u32;
355 old_dst.as_u32 = ip4->dst_address.as_u32;
357 /* Deal with fragmented packets */
358 if (PREDICT_FALSE (ip4->flags_and_fragment_offset &
359 clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
362 (ip6_header_t *) u8_ptr_add (ip4,
363 sizeof (*ip4) - sizeof (*ip6) -
366 (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
367 frag_id = frag_id_4to6 (ip4->fragment_id);
368 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
372 ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
373 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
377 ip6->ip_version_traffic_class_and_flow_label =
378 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
379 ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
380 ip6->hop_limit = ip4->ttl;
381 ip6->protocol = ip4->protocol;
382 if (PREDICT_FALSE (frag != NULL))
384 frag->next_hdr = ip6->protocol;
385 frag->identification = frag_id;
387 frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more (0, 1);
388 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
389 ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
392 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
393 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
394 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
395 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
397 csum = ip_csum_sub_even (*checksum, old_src.as_u32);
398 csum = ip_csum_sub_even (csum, old_dst.as_u32);
399 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
400 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
401 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
402 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
403 *checksum = ip_csum_fold (csum);
409 ip4_map_t_tcp_udp (vlib_main_t * vm,
410 vlib_node_runtime_t * node, vlib_frame_t * frame)
412 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
413 from = vlib_frame_vector_args (frame);
414 n_left_from = frame->n_vectors;
415 next_index = node->cached_next_index;
416 vlib_node_runtime_t *error_node =
417 vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
420 while (n_left_from > 0)
422 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
424 while (n_left_from > 0 && n_left_to_next > 0)
428 ip4_mapt_pseudo_header_t *pheader0;
429 ip4_mapt_tcp_udp_next_t next0;
431 pi0 = to_next[0] = from[0];
437 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
438 p0 = vlib_get_buffer (vm, pi0);
440 //Accessing pseudo header
441 pheader0 = vlib_buffer_get_current (p0);
442 vlib_buffer_advance (p0, sizeof (*pheader0));
444 if (map_ip4_to_ip6_tcp_udp (p0, pheader0))
446 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
447 next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
451 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
453 //Send to fragmentation node if necessary
454 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
455 vnet_buffer (p0)->ip_frag.next_index =
456 IP6_FRAG_NEXT_IP6_LOOKUP;
457 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
460 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
461 to_next, n_left_to_next, pi0,
464 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
467 return frame->n_vectors;
470 static_always_inline void
471 ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
472 ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
473 u8 * error0, ip4_mapt_next_t * next0, u16 l4_dst_port)
475 if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
477 *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
478 if (d0->ea_bits_len == 0 && d0->rules)
484 *dst_port0 = l4_dst_port;
485 *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
488 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
490 vnet_buffer (p0)->map_t.checksum_offset = 36;
491 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
492 *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
493 *dst_port0 = l4_dst_port;
495 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
497 vnet_buffer (p0)->map_t.checksum_offset = 26;
498 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
499 *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
500 *dst_port0 = l4_dst_port;
502 else if (ip40->protocol == IP_PROTOCOL_ICMP)
504 *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
505 if (d0->ea_bits_len == 0 && d0->rules)
507 else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
509 || ((icmp46_header_t *)
511 sizeof (*ip40)))->code == ICMP4_echo_request)
512 *dst_port0 = l4_dst_port;
516 *error0 = MAP_ERROR_BAD_PROTOCOL;
521 ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
523 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
524 vlib_node_runtime_t *error_node =
525 vlib_node_get_runtime (vm, ip4_map_t_node.index);
526 from = vlib_frame_vector_args (frame);
527 n_left_from = frame->n_vectors;
528 next_index = node->cached_next_index;
529 vlib_combined_counter_main_t *cm = map_main.domain_counters;
530 u32 thread_index = vm->thread_index;
532 while (n_left_from > 0)
534 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
536 while (n_left_from > 0 && n_left_to_next > 0)
542 ip4_mapt_next_t next0 = 0;
546 ip4_mapt_pseudo_header_t *pheader0;
548 pi0 = to_next[0] = from[0];
553 error0 = MAP_ERROR_NONE;
555 p0 = vlib_get_buffer (vm, pi0);
557 u16 l4_dst_port = vnet_buffer (p0)->ip.reass.l4_dst_port;
559 ip40 = vlib_buffer_get_current (p0);
560 ip4_len0 = clib_host_to_net_u16 (ip40->length);
561 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
562 ip40->ip_version_and_header_length != 0x45))
564 error0 = MAP_ERROR_UNKNOWN;
567 d0 = ip4_map_get_domain (&ip40->dst_address,
568 &vnet_buffer (p0)->map_t.map_domain_index,
572 { /* Guess it wasn't for us */
573 vnet_feature_next (&next0, p0);
577 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
580 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
581 &next0, l4_dst_port);
583 /* Verify that port is not among the well-known ports */
584 if ((d0->psid_length > 0 && d0->psid_offset > 0)
585 && (clib_net_to_host_u16 (dst_port0) <
586 (0x1 << (16 - d0->psid_offset))))
588 error0 = MAP_ERROR_SEC_CHECK;
591 //Add MAP-T pseudo header in front of the packet
592 vlib_buffer_advance (p0, -sizeof (*pheader0));
593 pheader0 = vlib_buffer_get_current (p0);
595 //Save addresses within the packet
596 ip4_map_t_embedded_address (d0, &pheader0->saddr,
598 pheader0->daddr.as_u64[0] =
599 map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
600 pheader0->daddr.as_u64[1] =
601 map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
604 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
606 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
609 map_t.map_domain_index, 1,
614 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
615 p0->error = error_node->errors[error0];
617 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
619 map_add_trace (vm, node, p0, d0 - map_main.domains, dst_port0);
622 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
623 to_next, n_left_to_next, pi0,
626 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
628 return frame->n_vectors;
631 static char *map_t_error_strings[] = {
632 #define _(sym,string) string,
638 VNET_FEATURE_INIT (ip4_map_t_feature, static) = {
639 .arc_name = "ip4-unicast",
640 .node_name = "ip4-map-t",
641 .runs_before = VNET_FEATURES ("ip4-flow-classify"),
642 .runs_after = VNET_FEATURES ("ip4-sv-reassembly-feature"),
645 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
646 .function = ip4_map_t_fragmented,
647 .name = "ip4-map-t-fragmented",
648 .vector_size = sizeof(u32),
649 .format_trace = format_map_trace,
650 .type = VLIB_NODE_TYPE_INTERNAL,
652 .n_errors = MAP_N_ERROR,
653 .error_strings = map_t_error_strings,
655 .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
657 [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
658 [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
659 [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
665 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
666 .function = ip4_map_t_icmp,
667 .name = "ip4-map-t-icmp",
668 .vector_size = sizeof(u32),
669 .format_trace = format_map_trace,
670 .type = VLIB_NODE_TYPE_INTERNAL,
672 .n_errors = MAP_N_ERROR,
673 .error_strings = map_t_error_strings,
675 .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
677 [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
678 [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
679 [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
685 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
686 .function = ip4_map_t_tcp_udp,
687 .name = "ip4-map-t-tcp-udp",
688 .vector_size = sizeof(u32),
689 .format_trace = format_map_trace,
690 .type = VLIB_NODE_TYPE_INTERNAL,
692 .n_errors = MAP_N_ERROR,
693 .error_strings = map_t_error_strings,
695 .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
697 [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
698 [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
699 [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
705 VLIB_REGISTER_NODE(ip4_map_t_node) = {
706 .function = ip4_map_t,
708 .vector_size = sizeof(u32),
709 .format_trace = format_map_trace,
710 .type = VLIB_NODE_TYPE_INTERNAL,
712 .n_errors = MAP_N_ERROR,
713 .error_strings = map_t_error_strings,
715 .n_next_nodes = IP4_MAPT_N_NEXT,
717 [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
718 [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
719 [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
720 [IP4_MAPT_NEXT_DROP] = "error-drop",
726 * fd.io coding-style-patch-verification: ON
729 * eval: (c-set-style "gnu")