2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
22 IP4_MAPT_NEXT_MAPT_TCP_UDP,
23 IP4_MAPT_NEXT_MAPT_ICMP,
24 IP4_MAPT_NEXT_MAPT_FRAGMENTED,
25 IP4_MAPT_NEXT_ICMP_ERROR,
32 IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
33 IP4_MAPT_ICMP_NEXT_IP6_FRAG,
34 IP4_MAPT_ICMP_NEXT_DROP,
36 } ip4_mapt_icmp_next_t;
40 IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
41 IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
42 IP4_MAPT_TCP_UDP_NEXT_DROP,
43 IP4_MAPT_TCP_UDP_N_NEXT
44 } ip4_mapt_tcp_udp_next_t;
48 IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
49 IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
50 IP4_MAPT_FRAGMENTED_NEXT_DROP,
51 IP4_MAPT_FRAGMENTED_N_NEXT
52 } ip4_mapt_fragmented_next_t;
54 //This is used to pass information within the buffer data.
55 //Buffer structure being too small to contain big structures like this.
57 typedef CLIB_PACKED (struct {
60 //IPv6 header + Fragmentation header will be here
61 //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
63 }) ip4_mapt_pseudo_header_t;
70 } icmp_to_icmp6_ctx_t;
73 ip4_to_ip6_set_icmp_cb (vlib_buffer_t * b, ip4_header_t * ip4,
74 ip6_header_t * ip6, void *arg)
76 icmp_to_icmp6_ctx_t *ctx = arg;
78 ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address);
79 ip6->dst_address.as_u64[0] =
80 map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
81 ip6->dst_address.as_u64[1] =
82 map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
88 ip4_to_ip6_set_inner_icmp_cb (vlib_buffer_t * b, ip4_header_t * ip4,
89 ip6_header_t * ip6, void *arg)
91 icmp_to_icmp6_ctx_t *ctx = arg;
93 //Note that the source address is within the domain
94 //while the destination address is the one outside the domain
95 ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &ip4->dst_address);
96 ip6->src_address.as_u64[0] =
97 map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
98 ip6->src_address.as_u64[1] =
99 map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
105 ip4_map_t_icmp (vlib_main_t * vm,
106 vlib_node_runtime_t * node, vlib_frame_t * frame)
108 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
109 vlib_node_runtime_t *error_node =
110 vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
111 from = vlib_frame_vector_args (frame);
112 n_left_from = frame->n_vectors;
113 next_index = node->cached_next_index;
114 vlib_combined_counter_main_t *cm = map_main.domain_counters;
115 u32 thread_index = vm->thread_index;
117 while (n_left_from > 0)
119 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
121 while (n_left_from > 0 && n_left_to_next > 0)
125 ip4_mapt_icmp_next_t next0;
129 icmp_to_icmp6_ctx_t ctx0;
132 next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
133 pi0 = to_next[0] = from[0];
138 error0 = MAP_ERROR_NONE;
140 p0 = vlib_get_buffer (vm, pi0);
141 vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
143 clib_net_to_host_u16 (((ip4_header_t *)
144 vlib_buffer_get_current (p0))->length);
146 pool_elt_at_index (map_main.domains,
147 vnet_buffer (p0)->map_t.map_domain_index);
149 ip40 = vlib_buffer_get_current (p0);
150 ctx0.recv_port = ip4_get_port (ip40, 1);
152 if (ctx0.recv_port == 0)
154 // In case of 1:1 mapping, we don't care about the port
155 if (!(d0->ea_bits_len == 0 && d0->rules))
157 error0 = MAP_ERROR_ICMP;
163 (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
164 ip4_to_ip6_set_inner_icmp_cb, &ctx0))
166 error0 = MAP_ERROR_ICMP;
170 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
172 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
173 vnet_buffer (p0)->ip_frag.next_index = IP_FRAG_NEXT_IP6_LOOKUP;
174 next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
177 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
179 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
182 map_t.map_domain_index, 1,
187 next0 = IP4_MAPT_ICMP_NEXT_DROP;
189 p0->error = error_node->errors[error0];
190 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
191 to_next, n_left_to_next, pi0,
194 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
196 return frame->n_vectors;
200 * Translate fragmented IPv4 UDP/TCP packet to IPv6.
203 map_ip4_to_ip6_fragmented (vlib_buffer_t * p,
204 ip4_mapt_pseudo_header_t * pheader)
208 ip6_frag_hdr_t *frag;
210 ip4 = vlib_buffer_get_current (p);
211 frag = (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
213 (ip6_header_t *) u8_ptr_add (ip4,
214 sizeof (*ip4) - sizeof (*frag) -
216 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
218 //We know that the protocol was one of ICMP, TCP or UDP
219 //because the first fragment was found and cached
221 (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol;
222 frag->identification = frag_id_4to6 (ip4->fragment_id);
224 frag->fragment_offset_and_more =
225 ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip4),
227 (ip4->flags_and_fragment_offset) &
228 IP4_HEADER_FLAG_MORE_FRAGMENTS);
230 ip6->ip_version_traffic_class_and_flow_label =
231 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
232 ip6->payload_length =
233 clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
234 sizeof (*ip4) + sizeof (*frag));
235 ip6->hop_limit = ip4->ttl;
236 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
238 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
239 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
240 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
241 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
247 ip4_map_t_fragmented (vlib_main_t * vm,
248 vlib_node_runtime_t * node, vlib_frame_t * frame)
250 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
251 from = vlib_frame_vector_args (frame);
252 n_left_from = frame->n_vectors;
253 next_index = node->cached_next_index;
254 vlib_node_runtime_t *error_node =
255 vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
257 while (n_left_from > 0)
259 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
261 while (n_left_from > 0 && n_left_to_next > 0)
265 ip4_mapt_pseudo_header_t *pheader0;
266 ip4_mapt_fragmented_next_t next0;
268 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
269 pi0 = to_next[0] = from[0];
275 p0 = vlib_get_buffer (vm, pi0);
277 //Accessing pseudo header
278 pheader0 = vlib_buffer_get_current (p0);
279 vlib_buffer_advance (p0, sizeof (*pheader0));
281 if (map_ip4_to_ip6_fragmented (p0, pheader0))
283 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
284 next0 = IP4_MAPT_FRAGMENTED_NEXT_DROP;
288 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
290 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
291 vnet_buffer (p0)->ip_frag.next_index =
292 IP_FRAG_NEXT_IP6_LOOKUP;
293 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
297 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
298 to_next, n_left_to_next, pi0,
301 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
303 return frame->n_vectors;
307 * Translate IPv4 UDP/TCP packet to IPv6.
310 map_ip4_to_ip6_tcp_udp (vlib_buffer_t * p, ip4_mapt_pseudo_header_t * pheader)
312 map_main_t *mm = &map_main;
317 ip6_frag_hdr_t *frag;
319 ip4_address_t old_src, old_dst;
321 ip4 = vlib_buffer_get_current (p);
323 if (ip4->protocol == IP_PROTOCOL_UDP)
325 udp_header_t *udp = ip4_next_header (ip4);
326 checksum = &udp->checksum;
329 * UDP checksum is optional over IPv4 but mandatory for IPv6 We
330 * do not check udp->length sanity but use our safe computed
333 if (PREDICT_FALSE (!*checksum))
335 u16 udp_len = clib_host_to_net_u16 (ip4->length) - sizeof (*ip4);
336 csum = ip_incremental_checksum (0, udp, udp_len);
337 csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
339 ip_csum_with_carry (csum, clib_host_to_net_u16 (IP_PROTOCOL_UDP));
340 csum = ip_csum_with_carry (csum, *((u64 *) (&ip4->src_address)));
341 *checksum = ~ip_csum_fold (csum);
346 tcp_header_t *tcp = ip4_next_header (ip4);
349 csum = tcp->checksum;
350 map_mss_clamping (tcp, &csum, mm->tcp_mss);
351 tcp->checksum = ip_csum_fold (csum);
353 checksum = &tcp->checksum;
356 old_src.as_u32 = ip4->src_address.as_u32;
357 old_dst.as_u32 = ip4->dst_address.as_u32;
359 /* Deal with fragmented packets */
360 if (PREDICT_FALSE (ip4->flags_and_fragment_offset &
361 clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
364 (ip6_header_t *) u8_ptr_add (ip4,
365 sizeof (*ip4) - sizeof (*ip6) -
368 (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
369 frag_id = frag_id_4to6 (ip4->fragment_id);
370 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
374 ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
375 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
379 ip6->ip_version_traffic_class_and_flow_label =
380 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
381 ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
382 ip6->hop_limit = ip4->ttl;
383 ip6->protocol = ip4->protocol;
384 if (PREDICT_FALSE (frag != NULL))
386 frag->next_hdr = ip6->protocol;
387 frag->identification = frag_id;
389 frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more (0, 1);
390 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
391 ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
394 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
395 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
396 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
397 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
399 csum = ip_csum_sub_even (*checksum, old_src.as_u32);
400 csum = ip_csum_sub_even (csum, old_dst.as_u32);
401 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
402 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
403 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
404 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
405 *checksum = ip_csum_fold (csum);
411 ip4_map_t_tcp_udp (vlib_main_t * vm,
412 vlib_node_runtime_t * node, vlib_frame_t * frame)
414 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
415 from = vlib_frame_vector_args (frame);
416 n_left_from = frame->n_vectors;
417 next_index = node->cached_next_index;
418 vlib_node_runtime_t *error_node =
419 vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
422 while (n_left_from > 0)
424 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
426 while (n_left_from > 0 && n_left_to_next > 0)
430 ip4_mapt_pseudo_header_t *pheader0;
431 ip4_mapt_tcp_udp_next_t next0;
433 pi0 = to_next[0] = from[0];
439 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
440 p0 = vlib_get_buffer (vm, pi0);
442 //Accessing pseudo header
443 pheader0 = vlib_buffer_get_current (p0);
444 vlib_buffer_advance (p0, sizeof (*pheader0));
446 if (map_ip4_to_ip6_tcp_udp (p0, pheader0))
448 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
449 next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
453 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
455 //Send to fragmentation node if necessary
456 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
457 vnet_buffer (p0)->ip_frag.next_index =
458 IP_FRAG_NEXT_IP6_LOOKUP;
459 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
462 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
463 to_next, n_left_to_next, pi0,
466 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
469 return frame->n_vectors;
472 static_always_inline void
473 ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
474 ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
475 u8 * error0, ip4_mapt_next_t * next0, u16 l4_dst_port)
477 if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
479 *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
480 if (d0->ea_bits_len == 0 && d0->rules)
486 *dst_port0 = l4_dst_port;
487 *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
490 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
492 vnet_buffer (p0)->map_t.checksum_offset = 36;
493 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
494 *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
495 *dst_port0 = l4_dst_port;
497 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
499 vnet_buffer (p0)->map_t.checksum_offset = 26;
500 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
501 *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
502 *dst_port0 = l4_dst_port;
504 else if (ip40->protocol == IP_PROTOCOL_ICMP)
506 *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
507 if (d0->ea_bits_len == 0 && d0->rules)
509 else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
511 || ((icmp46_header_t *)
513 sizeof (*ip40)))->code == ICMP4_echo_request)
514 *dst_port0 = l4_dst_port;
518 *error0 = MAP_ERROR_BAD_PROTOCOL;
523 ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
525 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
526 vlib_node_runtime_t *error_node =
527 vlib_node_get_runtime (vm, ip4_map_t_node.index);
528 from = vlib_frame_vector_args (frame);
529 n_left_from = frame->n_vectors;
530 next_index = node->cached_next_index;
531 vlib_combined_counter_main_t *cm = map_main.domain_counters;
532 u32 thread_index = vm->thread_index;
534 while (n_left_from > 0)
536 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
538 while (n_left_from > 0 && n_left_to_next > 0)
544 ip4_mapt_next_t next0 = 0;
548 ip4_mapt_pseudo_header_t *pheader0;
550 pi0 = to_next[0] = from[0];
555 error0 = MAP_ERROR_NONE;
557 p0 = vlib_get_buffer (vm, pi0);
559 u16 l4_dst_port = vnet_buffer (p0)->ip.reass.l4_dst_port;
561 ip40 = vlib_buffer_get_current (p0);
562 ip4_len0 = clib_host_to_net_u16 (ip40->length);
563 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
564 ip40->ip_version_and_header_length != 0x45))
566 error0 = MAP_ERROR_UNKNOWN;
569 d0 = ip4_map_get_domain (&ip40->dst_address,
570 &vnet_buffer (p0)->map_t.map_domain_index,
574 { /* Guess it wasn't for us */
575 vnet_feature_next (&next0, p0);
581 if (PREDICT_FALSE (ip40->ttl == 1))
583 icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
584 ICMP4_time_exceeded_ttl_exceeded_in_transit,
586 p0->error = error_node->errors[MAP_ERROR_TIME_EXCEEDED];
587 next0 = IP4_MAPT_NEXT_ICMP_ERROR;
592 ip40->flags_and_fragment_offset &
593 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
595 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
598 (df0 && !map_main.frag_ignore_df
601 (sizeof (ip6_header_t) - sizeof (ip4_header_t))) >
602 vnet_buffer (p0)->map_t.mtu)))
604 icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
605 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
606 vnet_buffer (p0)->map_t.mtu -
607 (sizeof (ip6_header_t) -
608 sizeof (ip4_header_t)));
609 p0->error = error_node->errors[MAP_ERROR_DF_SET];
610 next0 = IP4_MAPT_NEXT_ICMP_ERROR;
614 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
615 &next0, l4_dst_port);
617 /* Verify that port is not among the well-known ports */
618 if ((d0->psid_length > 0 && d0->psid_offset > 0)
619 && (clib_net_to_host_u16 (dst_port0) <
620 (0x1 << (16 - d0->psid_offset))))
622 error0 = MAP_ERROR_SEC_CHECK;
625 //Add MAP-T pseudo header in front of the packet
626 vlib_buffer_advance (p0, -sizeof (*pheader0));
627 pheader0 = vlib_buffer_get_current (p0);
629 //Save addresses within the packet
630 ip4_map_t_embedded_address (d0, &pheader0->saddr,
632 pheader0->daddr.as_u64[0] =
633 map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
634 pheader0->daddr.as_u64[1] =
635 map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
638 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
640 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
643 map_t.map_domain_index, 1,
648 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
649 p0->error = error_node->errors[error0];
651 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
653 map_add_trace (vm, node, p0, d0 - map_main.domains, dst_port0);
656 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
657 to_next, n_left_to_next, pi0,
660 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
662 return frame->n_vectors;
665 static char *map_t_error_strings[] = {
666 #define _(sym,string) string,
672 VNET_FEATURE_INIT (ip4_map_t_feature, static) = {
673 .arc_name = "ip4-unicast",
674 .node_name = "ip4-map-t",
675 .runs_before = VNET_FEATURES ("ip4-flow-classify"),
676 .runs_after = VNET_FEATURES ("ip4-sv-reassembly-feature"),
679 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
680 .function = ip4_map_t_fragmented,
681 .name = "ip4-map-t-fragmented",
682 .vector_size = sizeof(u32),
683 .format_trace = format_map_trace,
684 .type = VLIB_NODE_TYPE_INTERNAL,
686 .n_errors = MAP_N_ERROR,
687 .error_strings = map_t_error_strings,
689 .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
691 [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
692 [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
693 [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
699 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
700 .function = ip4_map_t_icmp,
701 .name = "ip4-map-t-icmp",
702 .vector_size = sizeof(u32),
703 .format_trace = format_map_trace,
704 .type = VLIB_NODE_TYPE_INTERNAL,
706 .n_errors = MAP_N_ERROR,
707 .error_strings = map_t_error_strings,
709 .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
711 [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
712 [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
713 [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
719 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
720 .function = ip4_map_t_tcp_udp,
721 .name = "ip4-map-t-tcp-udp",
722 .vector_size = sizeof(u32),
723 .format_trace = format_map_trace,
724 .type = VLIB_NODE_TYPE_INTERNAL,
726 .n_errors = MAP_N_ERROR,
727 .error_strings = map_t_error_strings,
729 .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
731 [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
732 [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
733 [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
739 VLIB_REGISTER_NODE(ip4_map_t_node) = {
740 .function = ip4_map_t,
742 .vector_size = sizeof(u32),
743 .format_trace = format_map_trace,
744 .type = VLIB_NODE_TYPE_INTERNAL,
746 .n_errors = MAP_N_ERROR,
747 .error_strings = map_t_error_strings,
749 .n_next_nodes = IP4_MAPT_N_NEXT,
751 [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
752 [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
753 [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
754 [IP4_MAPT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
755 [IP4_MAPT_NEXT_DROP] = "error-drop",
761 * fd.io coding-style-patch-verification: ON
764 * eval: (c-set-style "gnu")