2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
22 IP4_MAPT_NEXT_MAPT_TCP_UDP,
23 IP4_MAPT_NEXT_MAPT_ICMP,
24 IP4_MAPT_NEXT_MAPT_FRAGMENTED,
25 IP4_MAPT_NEXT_ICMP_ERROR,
32 IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
33 IP4_MAPT_ICMP_NEXT_IP6_REWRITE,
34 IP4_MAPT_ICMP_NEXT_IP6_FRAG,
35 IP4_MAPT_ICMP_NEXT_DROP,
37 } ip4_mapt_icmp_next_t;
41 IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
42 IP4_MAPT_TCP_UDP_NEXT_IP6_REWRITE,
43 IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
44 IP4_MAPT_TCP_UDP_NEXT_DROP,
45 IP4_MAPT_TCP_UDP_N_NEXT
46 } ip4_mapt_tcp_udp_next_t;
50 IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
51 IP4_MAPT_FRAGMENTED_NEXT_IP6_REWRITE,
52 IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
53 IP4_MAPT_FRAGMENTED_NEXT_DROP,
54 IP4_MAPT_FRAGMENTED_N_NEXT
55 } ip4_mapt_fragmented_next_t;
57 //This is used to pass information within the buffer data.
58 //Buffer structure being too small to contain big structures like this.
59 typedef CLIB_PACKED (struct {
62 //IPv6 header + Fragmentation header will be here
63 //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
65 }) ip4_mapt_pseudo_header_t;
71 } icmp_to_icmp6_ctx_t;
74 ip4_to_ip6_set_icmp_cb (vlib_buffer_t * b, ip4_header_t * ip4,
75 ip6_header_t * ip6, void *arg)
77 icmp_to_icmp6_ctx_t *ctx = arg;
79 ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address);
80 ip6->dst_address.as_u64[0] =
81 map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
82 ip6->dst_address.as_u64[1] =
83 map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
89 ip4_to_ip6_set_inner_icmp_cb (vlib_buffer_t * b, ip4_header_t * ip4,
90 ip6_header_t * ip6, void *arg)
92 icmp_to_icmp6_ctx_t *ctx = arg;
93 ip4_address_t old_src, old_dst;
95 old_src.as_u32 = ip4->src_address.as_u32;
96 old_dst.as_u32 = ip4->dst_address.as_u32;
98 //Note that the source address is within the domain
99 //while the destination address is the one outside the domain
100 ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &old_dst);
101 ip6->src_address.as_u64[0] =
102 map_get_pfx_net (ctx->d, old_src.as_u32, ctx->recv_port);
103 ip6->src_address.as_u64[1] =
104 map_get_sfx_net (ctx->d, old_src.as_u32, ctx->recv_port);
110 ip4_map_t_icmp (vlib_main_t * vm,
111 vlib_node_runtime_t * node, vlib_frame_t * frame)
113 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
114 vlib_node_runtime_t *error_node =
115 vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
116 from = vlib_frame_vector_args (frame);
117 n_left_from = frame->n_vectors;
118 next_index = node->cached_next_index;
119 vlib_combined_counter_main_t *cm = map_main.domain_counters;
120 u32 thread_index = vm->thread_index;
122 while (n_left_from > 0)
124 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
126 while (n_left_from > 0 && n_left_to_next > 0)
130 ip4_mapt_icmp_next_t next0;
134 icmp_to_icmp6_ctx_t ctx0;
137 next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
138 pi0 = to_next[0] = from[0];
143 error0 = MAP_ERROR_NONE;
145 p0 = vlib_get_buffer (vm, pi0);
146 vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
148 clib_net_to_host_u16 (((ip4_header_t *)
149 vlib_buffer_get_current (p0))->length);
151 pool_elt_at_index (map_main.domains,
152 vnet_buffer (p0)->map_t.map_domain_index);
154 ip40 = vlib_buffer_get_current (p0);
155 ctx0.recv_port = ip4_get_port (ip40, 0);
157 if (ctx0.recv_port == 0)
159 // In case of 1:1 mapping, we don't care about the port
160 if (!(d0->ea_bits_len == 0 && d0->rules))
162 error0 = MAP_ERROR_ICMP;
168 (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
169 ip4_to_ip6_set_inner_icmp_cb, &ctx0))
171 error0 = MAP_ERROR_ICMP;
175 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
177 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
178 vnet_buffer (p0)->ip_frag.next_index = IP_FRAG_NEXT_IP6_LOOKUP;
179 next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
183 next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
184 IP4_MAPT_ICMP_NEXT_IP6_REWRITE : next0;
187 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
189 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
192 map_t.map_domain_index, 1,
197 next0 = IP4_MAPT_ICMP_NEXT_DROP;
199 p0->error = error_node->errors[error0];
200 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
201 to_next, n_left_to_next, pi0,
204 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
206 return frame->n_vectors;
210 * Translate fragmented IPv4 UDP/TCP packet to IPv6.
213 map_ip4_to_ip6_fragmented (vlib_buffer_t * p,
214 ip4_mapt_pseudo_header_t * pheader)
218 ip6_frag_hdr_t *frag;
220 ip4 = vlib_buffer_get_current (p);
221 frag = (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
223 (ip6_header_t *) u8_ptr_add (ip4,
224 sizeof (*ip4) - sizeof (*frag) -
226 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
228 //We know that the protocol was one of ICMP, TCP or UDP
229 //because the first fragment was found and cached
231 (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol;
232 frag->identification = frag_id_4to6 (ip4->fragment_id);
234 frag->fragment_offset_and_more =
235 ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip4),
237 (ip4->flags_and_fragment_offset) &
238 IP4_HEADER_FLAG_MORE_FRAGMENTS);
240 ip6->ip_version_traffic_class_and_flow_label =
241 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
242 ip6->payload_length =
243 clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
244 sizeof (*ip4) + sizeof (*frag));
245 ip6->hop_limit = ip4->ttl;
246 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
248 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
249 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
250 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
251 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
257 ip4_map_t_fragmented (vlib_main_t * vm,
258 vlib_node_runtime_t * node, vlib_frame_t * frame)
260 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
261 from = vlib_frame_vector_args (frame);
262 n_left_from = frame->n_vectors;
263 next_index = node->cached_next_index;
264 vlib_node_runtime_t *error_node =
265 vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
267 while (n_left_from > 0)
269 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
271 while (n_left_from > 0 && n_left_to_next > 0)
275 ip4_mapt_pseudo_header_t *pheader0;
276 ip4_mapt_fragmented_next_t next0;
278 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
279 pi0 = to_next[0] = from[0];
285 p0 = vlib_get_buffer (vm, pi0);
287 //Accessing pseudo header
288 pheader0 = vlib_buffer_get_current (p0);
289 vlib_buffer_advance (p0, sizeof (*pheader0));
291 if (map_ip4_to_ip6_fragmented (p0, pheader0))
293 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
294 next0 = IP4_MAPT_FRAGMENTED_NEXT_DROP;
298 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
300 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
301 vnet_buffer (p0)->ip_frag.next_index =
302 IP_FRAG_NEXT_IP6_LOOKUP;
303 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
307 next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
308 IP4_MAPT_FRAGMENTED_NEXT_IP6_REWRITE : next0;
312 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
313 to_next, n_left_to_next, pi0,
316 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
318 return frame->n_vectors;
322 * Translate IPv4 UDP/TCP packet to IPv6.
325 map_ip4_to_ip6_tcp_udp (vlib_buffer_t * p, ip4_mapt_pseudo_header_t * pheader)
327 map_main_t *mm = &map_main;
332 ip6_frag_hdr_t *frag;
334 ip4_address_t old_src, old_dst;
336 ip4 = vlib_buffer_get_current (p);
338 if (ip4->protocol == IP_PROTOCOL_UDP)
340 udp_header_t *udp = ip4_next_header (ip4);
341 checksum = &udp->checksum;
344 * UDP checksum is optional over IPv4 but mandatory for IPv6 We
345 * do not check udp->length sanity but use our safe computed
348 if (PREDICT_FALSE (!*checksum))
350 u16 udp_len = clib_host_to_net_u16 (ip4->length) - sizeof (*ip4);
351 csum = ip_incremental_checksum (0, udp, udp_len);
352 csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
354 ip_csum_with_carry (csum, clib_host_to_net_u16 (IP_PROTOCOL_UDP));
355 csum = ip_csum_with_carry (csum, *((u64 *) (&ip4->src_address)));
356 *checksum = ~ip_csum_fold (csum);
361 tcp_header_t *tcp = ip4_next_header (ip4);
364 csum = tcp->checksum;
365 map_mss_clamping (tcp, &csum, mm->tcp_mss);
366 tcp->checksum = ip_csum_fold (csum);
368 checksum = &tcp->checksum;
371 old_src.as_u32 = ip4->src_address.as_u32;
372 old_dst.as_u32 = ip4->dst_address.as_u32;
374 /* Deal with fragmented packets */
375 if (PREDICT_FALSE (ip4->flags_and_fragment_offset &
376 clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
379 (ip6_header_t *) u8_ptr_add (ip4,
380 sizeof (*ip4) - sizeof (*ip6) -
383 (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
384 frag_id = frag_id_4to6 (ip4->fragment_id);
385 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
389 ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
390 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
394 ip6->ip_version_traffic_class_and_flow_label =
395 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
396 ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
397 ip6->hop_limit = ip4->ttl;
398 ip6->protocol = ip4->protocol;
399 if (PREDICT_FALSE (frag != NULL))
401 frag->next_hdr = ip6->protocol;
402 frag->identification = frag_id;
404 frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more (0, 1);
405 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
406 ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
409 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
410 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
411 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
412 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
414 csum = ip_csum_sub_even (*checksum, old_src.as_u32);
415 csum = ip_csum_sub_even (csum, old_dst.as_u32);
416 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
417 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
418 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
419 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
420 *checksum = ip_csum_fold (csum);
426 ip4_map_t_tcp_udp (vlib_main_t * vm,
427 vlib_node_runtime_t * node, vlib_frame_t * frame)
429 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
430 from = vlib_frame_vector_args (frame);
431 n_left_from = frame->n_vectors;
432 next_index = node->cached_next_index;
433 vlib_node_runtime_t *error_node =
434 vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
437 while (n_left_from > 0)
439 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
441 while (n_left_from > 0 && n_left_to_next > 0)
445 ip4_mapt_pseudo_header_t *pheader0;
446 ip4_mapt_tcp_udp_next_t next0;
448 pi0 = to_next[0] = from[0];
454 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
455 p0 = vlib_get_buffer (vm, pi0);
457 //Accessing pseudo header
458 pheader0 = vlib_buffer_get_current (p0);
459 vlib_buffer_advance (p0, sizeof (*pheader0));
461 if (map_ip4_to_ip6_tcp_udp (p0, pheader0))
463 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
464 next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
468 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
470 //Send to fragmentation node if necessary
471 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
472 vnet_buffer (p0)->ip_frag.next_index =
473 IP_FRAG_NEXT_IP6_LOOKUP;
474 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
478 next0 = ip4_map_ip6_lookup_bypass (p0, NULL) ?
479 IP4_MAPT_TCP_UDP_NEXT_IP6_REWRITE : next0;
482 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
483 to_next, n_left_to_next, pi0,
486 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
489 return frame->n_vectors;
492 static_always_inline void
493 ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
494 ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
495 u8 * error0, ip4_mapt_next_t * next0, u16 l4_dst_port)
497 if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
499 *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
500 if (d0->ea_bits_len == 0 && d0->rules)
506 *dst_port0 = l4_dst_port;
507 *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
510 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
512 vnet_buffer (p0)->map_t.checksum_offset = 36;
513 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
514 *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
515 *dst_port0 = l4_dst_port;
517 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
519 vnet_buffer (p0)->map_t.checksum_offset = 26;
520 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
521 *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
522 *dst_port0 = l4_dst_port;
524 else if (ip40->protocol == IP_PROTOCOL_ICMP)
526 *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
527 if (d0->ea_bits_len == 0 && d0->rules)
529 else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->type
531 || ((icmp46_header_t *)
533 sizeof (*ip40)))->type == ICMP4_echo_request)
534 *dst_port0 = l4_dst_port;
538 *error0 = MAP_ERROR_BAD_PROTOCOL;
543 ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
545 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
546 vlib_node_runtime_t *error_node =
547 vlib_node_get_runtime (vm, ip4_map_t_node.index);
548 from = vlib_frame_vector_args (frame);
549 n_left_from = frame->n_vectors;
550 next_index = node->cached_next_index;
551 vlib_combined_counter_main_t *cm = map_main.domain_counters;
552 u32 thread_index = vm->thread_index;
554 while (n_left_from > 0)
556 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
558 while (n_left_from > 0 && n_left_to_next > 0)
564 ip4_mapt_next_t next0 = 0;
568 ip4_mapt_pseudo_header_t *pheader0;
570 pi0 = to_next[0] = from[0];
575 error0 = MAP_ERROR_NONE;
577 p0 = vlib_get_buffer (vm, pi0);
579 u16 l4_dst_port = vnet_buffer (p0)->ip.reass.l4_dst_port;
581 ip40 = vlib_buffer_get_current (p0);
582 ip4_len0 = clib_host_to_net_u16 (ip40->length);
583 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
584 ip40->ip_version_and_header_length != 0x45))
586 error0 = MAP_ERROR_UNKNOWN;
589 d0 = ip4_map_get_domain (&ip40->dst_address,
590 &vnet_buffer (p0)->map_t.map_domain_index,
594 { /* Guess it wasn't for us */
595 vnet_feature_next (&next0, p0);
601 if (PREDICT_FALSE (ip40->ttl == 1))
603 icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
604 ICMP4_time_exceeded_ttl_exceeded_in_transit,
606 p0->error = error_node->errors[MAP_ERROR_TIME_EXCEEDED];
607 next0 = IP4_MAPT_NEXT_ICMP_ERROR;
612 ip40->flags_and_fragment_offset &
613 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
615 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
618 (df0 && !map_main.frag_ignore_df
621 (sizeof (ip6_header_t) - sizeof (ip4_header_t))) >
622 vnet_buffer (p0)->map_t.mtu)))
624 icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
625 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
626 vnet_buffer (p0)->map_t.mtu -
627 (sizeof (ip6_header_t) -
628 sizeof (ip4_header_t)));
629 p0->error = error_node->errors[MAP_ERROR_DF_SET];
630 next0 = IP4_MAPT_NEXT_ICMP_ERROR;
634 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
635 &next0, l4_dst_port);
637 /* Verify that port is not among the well-known ports */
638 if ((d0->psid_length > 0 && d0->psid_offset > 0)
639 && (clib_net_to_host_u16 (dst_port0) <
640 (0x1 << (16 - d0->psid_offset))))
642 error0 = MAP_ERROR_SEC_CHECK;
645 //Add MAP-T pseudo header in front of the packet
646 vlib_buffer_advance (p0, -sizeof (*pheader0));
647 pheader0 = vlib_buffer_get_current (p0);
649 //Save addresses within the packet
650 ip4_map_t_embedded_address (d0, &pheader0->saddr,
652 pheader0->daddr.as_u64[0] =
653 map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
654 pheader0->daddr.as_u64[1] =
655 map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
658 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
660 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
663 map_t.map_domain_index, 1,
668 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
669 p0->error = error_node->errors[error0];
671 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
673 map_add_trace (vm, node, p0, d0 - map_main.domains, dst_port0);
676 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
677 to_next, n_left_to_next, pi0,
680 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
682 return frame->n_vectors;
685 VNET_FEATURE_INIT (ip4_map_t_feature, static) = {
686 .arc_name = "ip4-unicast",
687 .node_name = "ip4-map-t",
688 .runs_before = VNET_FEATURES ("ip4-flow-classify"),
689 .runs_after = VNET_FEATURES ("ip4-sv-reassembly-feature"),
692 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
693 .function = ip4_map_t_fragmented,
694 .name = "ip4-map-t-fragmented",
695 .vector_size = sizeof(u32),
696 .format_trace = format_map_trace,
697 .type = VLIB_NODE_TYPE_INTERNAL,
699 .n_errors = MAP_N_ERROR,
700 .error_counters = map_error_counters,
702 .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
704 [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
705 [IP4_MAPT_FRAGMENTED_NEXT_IP6_REWRITE] = "ip6-load-balance",
706 [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
707 [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
711 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
712 .function = ip4_map_t_icmp,
713 .name = "ip4-map-t-icmp",
714 .vector_size = sizeof(u32),
715 .format_trace = format_map_trace,
716 .type = VLIB_NODE_TYPE_INTERNAL,
718 .n_errors = MAP_N_ERROR,
719 .error_counters = map_error_counters,
721 .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
723 [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
724 [IP4_MAPT_ICMP_NEXT_IP6_REWRITE] = "ip6-load-balance",
725 [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
726 [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
730 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
731 .function = ip4_map_t_tcp_udp,
732 .name = "ip4-map-t-tcp-udp",
733 .vector_size = sizeof(u32),
734 .format_trace = format_map_trace,
735 .type = VLIB_NODE_TYPE_INTERNAL,
737 .n_errors = MAP_N_ERROR,
738 .error_counters = map_error_counters,
740 .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
742 [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
743 [IP4_MAPT_TCP_UDP_NEXT_IP6_REWRITE] = "ip6-load-balance",
744 [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
745 [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
749 VLIB_REGISTER_NODE(ip4_map_t_node) = {
750 .function = ip4_map_t,
752 .vector_size = sizeof(u32),
753 .format_trace = format_map_trace,
754 .type = VLIB_NODE_TYPE_INTERNAL,
756 .n_errors = MAP_N_ERROR,
757 .error_counters = map_error_counters,
759 .n_next_nodes = IP4_MAPT_N_NEXT,
761 [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
762 [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
763 [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
764 [IP4_MAPT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
765 [IP4_MAPT_NEXT_DROP] = "error-drop",
770 * fd.io coding-style-patch-verification: ON
773 * eval: (c-set-style "gnu")