2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
22 IP4_MAPT_NEXT_MAPT_TCP_UDP,
23 IP4_MAPT_NEXT_MAPT_ICMP,
24 IP4_MAPT_NEXT_MAPT_FRAGMENTED,
25 IP4_MAPT_NEXT_ICMP_ERROR,
32 IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
33 IP4_MAPT_ICMP_NEXT_IP6_REWRITE,
34 IP4_MAPT_ICMP_NEXT_IP6_FRAG,
35 IP4_MAPT_ICMP_NEXT_DROP,
37 } ip4_mapt_icmp_next_t;
41 IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
42 IP4_MAPT_TCP_UDP_NEXT_IP6_REWRITE,
43 IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
44 IP4_MAPT_TCP_UDP_NEXT_DROP,
45 IP4_MAPT_TCP_UDP_N_NEXT
46 } ip4_mapt_tcp_udp_next_t;
50 IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
51 IP4_MAPT_FRAGMENTED_NEXT_IP6_REWRITE,
52 IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
53 IP4_MAPT_FRAGMENTED_NEXT_DROP,
54 IP4_MAPT_FRAGMENTED_N_NEXT
55 } ip4_mapt_fragmented_next_t;
57 //This is used to pass information within the buffer data.
58 //Buffer structure being too small to contain big structures like this.
60 typedef CLIB_PACKED (struct {
63 //IPv6 header + Fragmentation header will be here
64 //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
66 }) ip4_mapt_pseudo_header_t;
73 } icmp_to_icmp6_ctx_t;
76 ip4_to_ip6_set_icmp_cb (vlib_buffer_t * b, ip4_header_t * ip4,
77 ip6_header_t * ip6, void *arg)
79 icmp_to_icmp6_ctx_t *ctx = arg;
81 ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address);
82 ip6->dst_address.as_u64[0] =
83 map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
84 ip6->dst_address.as_u64[1] =
85 map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
91 ip4_to_ip6_set_inner_icmp_cb (vlib_buffer_t * b, ip4_header_t * ip4,
92 ip6_header_t * ip6, void *arg)
94 icmp_to_icmp6_ctx_t *ctx = arg;
95 ip4_address_t old_src, old_dst;
97 old_src.as_u32 = ip4->src_address.as_u32;
98 old_dst.as_u32 = ip4->dst_address.as_u32;
100 //Note that the source address is within the domain
101 //while the destination address is the one outside the domain
102 ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &old_dst);
103 ip6->src_address.as_u64[0] =
104 map_get_pfx_net (ctx->d, old_src.as_u32, ctx->recv_port);
105 ip6->src_address.as_u64[1] =
106 map_get_sfx_net (ctx->d, old_src.as_u32, ctx->recv_port);
112 ip4_map_t_icmp (vlib_main_t * vm,
113 vlib_node_runtime_t * node, vlib_frame_t * frame)
115 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
116 vlib_node_runtime_t *error_node =
117 vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
118 from = vlib_frame_vector_args (frame);
119 n_left_from = frame->n_vectors;
120 next_index = node->cached_next_index;
121 vlib_combined_counter_main_t *cm = map_main.domain_counters;
122 u32 thread_index = vm->thread_index;
124 while (n_left_from > 0)
126 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
128 while (n_left_from > 0 && n_left_to_next > 0)
132 ip4_mapt_icmp_next_t next0;
136 icmp_to_icmp6_ctx_t ctx0;
139 next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
140 pi0 = to_next[0] = from[0];
145 error0 = MAP_ERROR_NONE;
147 p0 = vlib_get_buffer (vm, pi0);
148 vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
150 clib_net_to_host_u16 (((ip4_header_t *)
151 vlib_buffer_get_current (p0))->length);
153 pool_elt_at_index (map_main.domains,
154 vnet_buffer (p0)->map_t.map_domain_index);
156 ip40 = vlib_buffer_get_current (p0);
157 ctx0.recv_port = ip4_get_port (ip40, 0);
159 if (ctx0.recv_port == 0)
161 // In case of 1:1 mapping, we don't care about the port
162 if (!(d0->ea_bits_len == 0 && d0->rules))
164 error0 = MAP_ERROR_ICMP;
170 (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
171 ip4_to_ip6_set_inner_icmp_cb, &ctx0))
173 error0 = MAP_ERROR_ICMP;
177 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
179 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
180 vnet_buffer (p0)->ip_frag.next_index = IP_FRAG_NEXT_IP6_LOOKUP;
181 next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
185 next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
186 IP4_MAPT_ICMP_NEXT_IP6_REWRITE : next0;
189 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
191 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
194 map_t.map_domain_index, 1,
199 next0 = IP4_MAPT_ICMP_NEXT_DROP;
201 p0->error = error_node->errors[error0];
202 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
203 to_next, n_left_to_next, pi0,
206 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
208 return frame->n_vectors;
212 * Translate fragmented IPv4 UDP/TCP packet to IPv6.
215 map_ip4_to_ip6_fragmented (vlib_buffer_t * p,
216 ip4_mapt_pseudo_header_t * pheader)
220 ip6_frag_hdr_t *frag;
222 ip4 = vlib_buffer_get_current (p);
223 frag = (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
225 (ip6_header_t *) u8_ptr_add (ip4,
226 sizeof (*ip4) - sizeof (*frag) -
228 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
230 //We know that the protocol was one of ICMP, TCP or UDP
231 //because the first fragment was found and cached
233 (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol;
234 frag->identification = frag_id_4to6 (ip4->fragment_id);
236 frag->fragment_offset_and_more =
237 ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip4),
239 (ip4->flags_and_fragment_offset) &
240 IP4_HEADER_FLAG_MORE_FRAGMENTS);
242 ip6->ip_version_traffic_class_and_flow_label =
243 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
244 ip6->payload_length =
245 clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
246 sizeof (*ip4) + sizeof (*frag));
247 ip6->hop_limit = ip4->ttl;
248 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
250 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
251 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
252 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
253 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
259 ip4_map_t_fragmented (vlib_main_t * vm,
260 vlib_node_runtime_t * node, vlib_frame_t * frame)
262 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
263 from = vlib_frame_vector_args (frame);
264 n_left_from = frame->n_vectors;
265 next_index = node->cached_next_index;
266 vlib_node_runtime_t *error_node =
267 vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
269 while (n_left_from > 0)
271 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
273 while (n_left_from > 0 && n_left_to_next > 0)
277 ip4_mapt_pseudo_header_t *pheader0;
278 ip4_mapt_fragmented_next_t next0;
280 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
281 pi0 = to_next[0] = from[0];
287 p0 = vlib_get_buffer (vm, pi0);
289 //Accessing pseudo header
290 pheader0 = vlib_buffer_get_current (p0);
291 vlib_buffer_advance (p0, sizeof (*pheader0));
293 if (map_ip4_to_ip6_fragmented (p0, pheader0))
295 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
296 next0 = IP4_MAPT_FRAGMENTED_NEXT_DROP;
300 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
302 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
303 vnet_buffer (p0)->ip_frag.next_index =
304 IP_FRAG_NEXT_IP6_LOOKUP;
305 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
309 next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
310 IP4_MAPT_FRAGMENTED_NEXT_IP6_REWRITE : next0;
314 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
315 to_next, n_left_to_next, pi0,
318 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
320 return frame->n_vectors;
324 * Translate IPv4 UDP/TCP packet to IPv6.
327 map_ip4_to_ip6_tcp_udp (vlib_buffer_t * p, ip4_mapt_pseudo_header_t * pheader)
329 map_main_t *mm = &map_main;
334 ip6_frag_hdr_t *frag;
336 ip4_address_t old_src, old_dst;
338 ip4 = vlib_buffer_get_current (p);
340 if (ip4->protocol == IP_PROTOCOL_UDP)
342 udp_header_t *udp = ip4_next_header (ip4);
343 checksum = &udp->checksum;
346 * UDP checksum is optional over IPv4 but mandatory for IPv6 We
347 * do not check udp->length sanity but use our safe computed
350 if (PREDICT_FALSE (!*checksum))
352 u16 udp_len = clib_host_to_net_u16 (ip4->length) - sizeof (*ip4);
353 csum = ip_incremental_checksum (0, udp, udp_len);
354 csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
356 ip_csum_with_carry (csum, clib_host_to_net_u16 (IP_PROTOCOL_UDP));
357 csum = ip_csum_with_carry (csum, *((u64 *) (&ip4->src_address)));
358 *checksum = ~ip_csum_fold (csum);
363 tcp_header_t *tcp = ip4_next_header (ip4);
366 csum = tcp->checksum;
367 map_mss_clamping (tcp, &csum, mm->tcp_mss);
368 tcp->checksum = ip_csum_fold (csum);
370 checksum = &tcp->checksum;
373 old_src.as_u32 = ip4->src_address.as_u32;
374 old_dst.as_u32 = ip4->dst_address.as_u32;
376 /* Deal with fragmented packets */
377 if (PREDICT_FALSE (ip4->flags_and_fragment_offset &
378 clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
381 (ip6_header_t *) u8_ptr_add (ip4,
382 sizeof (*ip4) - sizeof (*ip6) -
385 (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
386 frag_id = frag_id_4to6 (ip4->fragment_id);
387 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
391 ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
392 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
396 ip6->ip_version_traffic_class_and_flow_label =
397 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
398 ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
399 ip6->hop_limit = ip4->ttl;
400 ip6->protocol = ip4->protocol;
401 if (PREDICT_FALSE (frag != NULL))
403 frag->next_hdr = ip6->protocol;
404 frag->identification = frag_id;
406 frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more (0, 1);
407 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
408 ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
411 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
412 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
413 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
414 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
416 csum = ip_csum_sub_even (*checksum, old_src.as_u32);
417 csum = ip_csum_sub_even (csum, old_dst.as_u32);
418 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
419 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
420 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
421 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
422 *checksum = ip_csum_fold (csum);
428 ip4_map_t_tcp_udp (vlib_main_t * vm,
429 vlib_node_runtime_t * node, vlib_frame_t * frame)
431 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
432 from = vlib_frame_vector_args (frame);
433 n_left_from = frame->n_vectors;
434 next_index = node->cached_next_index;
435 vlib_node_runtime_t *error_node =
436 vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
439 while (n_left_from > 0)
441 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
443 while (n_left_from > 0 && n_left_to_next > 0)
447 ip4_mapt_pseudo_header_t *pheader0;
448 ip4_mapt_tcp_udp_next_t next0;
450 pi0 = to_next[0] = from[0];
456 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
457 p0 = vlib_get_buffer (vm, pi0);
459 //Accessing pseudo header
460 pheader0 = vlib_buffer_get_current (p0);
461 vlib_buffer_advance (p0, sizeof (*pheader0));
463 if (map_ip4_to_ip6_tcp_udp (p0, pheader0))
465 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
466 next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
470 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
472 //Send to fragmentation node if necessary
473 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
474 vnet_buffer (p0)->ip_frag.next_index =
475 IP_FRAG_NEXT_IP6_LOOKUP;
476 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
480 next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
481 IP4_MAPT_TCP_UDP_NEXT_IP6_REWRITE : next0;
484 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
485 to_next, n_left_to_next, pi0,
488 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
491 return frame->n_vectors;
494 static_always_inline void
495 ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
496 ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
497 u8 * error0, ip4_mapt_next_t * next0, u16 l4_dst_port)
499 if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
501 *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
502 if (d0->ea_bits_len == 0 && d0->rules)
508 *dst_port0 = l4_dst_port;
509 *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
512 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
514 vnet_buffer (p0)->map_t.checksum_offset = 36;
515 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
516 *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
517 *dst_port0 = l4_dst_port;
519 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
521 vnet_buffer (p0)->map_t.checksum_offset = 26;
522 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
523 *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
524 *dst_port0 = l4_dst_port;
526 else if (ip40->protocol == IP_PROTOCOL_ICMP)
528 *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
529 if (d0->ea_bits_len == 0 && d0->rules)
531 else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->type
533 || ((icmp46_header_t *)
535 sizeof (*ip40)))->type == ICMP4_echo_request)
536 *dst_port0 = l4_dst_port;
540 *error0 = MAP_ERROR_BAD_PROTOCOL;
545 ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
547 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
548 vlib_node_runtime_t *error_node =
549 vlib_node_get_runtime (vm, ip4_map_t_node.index);
550 from = vlib_frame_vector_args (frame);
551 n_left_from = frame->n_vectors;
552 next_index = node->cached_next_index;
553 vlib_combined_counter_main_t *cm = map_main.domain_counters;
554 u32 thread_index = vm->thread_index;
556 while (n_left_from > 0)
558 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
560 while (n_left_from > 0 && n_left_to_next > 0)
566 ip4_mapt_next_t next0 = 0;
570 ip4_mapt_pseudo_header_t *pheader0;
572 pi0 = to_next[0] = from[0];
577 error0 = MAP_ERROR_NONE;
579 p0 = vlib_get_buffer (vm, pi0);
581 u16 l4_dst_port = vnet_buffer (p0)->ip.reass.l4_dst_port;
583 ip40 = vlib_buffer_get_current (p0);
584 ip4_len0 = clib_host_to_net_u16 (ip40->length);
585 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
586 ip40->ip_version_and_header_length != 0x45))
588 error0 = MAP_ERROR_UNKNOWN;
591 d0 = ip4_map_get_domain (&ip40->dst_address,
592 &vnet_buffer (p0)->map_t.map_domain_index,
596 { /* Guess it wasn't for us */
597 vnet_feature_next (&next0, p0);
603 if (PREDICT_FALSE (ip40->ttl == 1))
605 icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
606 ICMP4_time_exceeded_ttl_exceeded_in_transit,
608 p0->error = error_node->errors[MAP_ERROR_TIME_EXCEEDED];
609 next0 = IP4_MAPT_NEXT_ICMP_ERROR;
614 ip40->flags_and_fragment_offset &
615 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
617 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
620 (df0 && !map_main.frag_ignore_df
623 (sizeof (ip6_header_t) - sizeof (ip4_header_t))) >
624 vnet_buffer (p0)->map_t.mtu)))
626 icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
627 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
628 vnet_buffer (p0)->map_t.mtu -
629 (sizeof (ip6_header_t) -
630 sizeof (ip4_header_t)));
631 p0->error = error_node->errors[MAP_ERROR_DF_SET];
632 next0 = IP4_MAPT_NEXT_ICMP_ERROR;
636 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
637 &next0, l4_dst_port);
639 /* Verify that port is not among the well-known ports */
640 if ((d0->psid_length > 0 && d0->psid_offset > 0)
641 && (clib_net_to_host_u16 (dst_port0) <
642 (0x1 << (16 - d0->psid_offset))))
644 error0 = MAP_ERROR_SEC_CHECK;
647 //Add MAP-T pseudo header in front of the packet
648 vlib_buffer_advance (p0, -sizeof (*pheader0));
649 pheader0 = vlib_buffer_get_current (p0);
651 //Save addresses within the packet
652 ip4_map_t_embedded_address (d0, &pheader0->saddr,
654 pheader0->daddr.as_u64[0] =
655 map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
656 pheader0->daddr.as_u64[1] =
657 map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
660 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
662 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
665 map_t.map_domain_index, 1,
670 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
671 p0->error = error_node->errors[error0];
673 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
675 map_add_trace (vm, node, p0, d0 - map_main.domains, dst_port0);
678 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
679 to_next, n_left_to_next, pi0,
682 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
684 return frame->n_vectors;
688 VNET_FEATURE_INIT (ip4_map_t_feature, static) = {
689 .arc_name = "ip4-unicast",
690 .node_name = "ip4-map-t",
691 .runs_before = VNET_FEATURES ("ip4-flow-classify"),
692 .runs_after = VNET_FEATURES ("ip4-sv-reassembly-feature"),
695 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
696 .function = ip4_map_t_fragmented,
697 .name = "ip4-map-t-fragmented",
698 .vector_size = sizeof(u32),
699 .format_trace = format_map_trace,
700 .type = VLIB_NODE_TYPE_INTERNAL,
702 .n_errors = MAP_N_ERROR,
703 .error_counters = map_error_counters,
705 .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
707 [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
708 [IP4_MAPT_FRAGMENTED_NEXT_IP6_REWRITE] = "ip6-load-balance",
709 [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
710 [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
716 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
717 .function = ip4_map_t_icmp,
718 .name = "ip4-map-t-icmp",
719 .vector_size = sizeof(u32),
720 .format_trace = format_map_trace,
721 .type = VLIB_NODE_TYPE_INTERNAL,
723 .n_errors = MAP_N_ERROR,
724 .error_counters = map_error_counters,
726 .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
728 [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
729 [IP4_MAPT_ICMP_NEXT_IP6_REWRITE] = "ip6-load-balance",
730 [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
731 [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
737 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
738 .function = ip4_map_t_tcp_udp,
739 .name = "ip4-map-t-tcp-udp",
740 .vector_size = sizeof(u32),
741 .format_trace = format_map_trace,
742 .type = VLIB_NODE_TYPE_INTERNAL,
744 .n_errors = MAP_N_ERROR,
745 .error_counters = map_error_counters,
747 .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
749 [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
750 [IP4_MAPT_TCP_UDP_NEXT_IP6_REWRITE] = "ip6-load-balance",
751 [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
752 [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
758 VLIB_REGISTER_NODE(ip4_map_t_node) = {
759 .function = ip4_map_t,
761 .vector_size = sizeof(u32),
762 .format_trace = format_map_trace,
763 .type = VLIB_NODE_TYPE_INTERNAL,
765 .n_errors = MAP_N_ERROR,
766 .error_counters = map_error_counters,
768 .n_next_nodes = IP4_MAPT_N_NEXT,
770 [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
771 [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
772 [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
773 [IP4_MAPT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
774 [IP4_MAPT_NEXT_DROP] = "error-drop",
780 * fd.io coding-style-patch-verification: ON
783 * eval: (c-set-style "gnu")