2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
22 IP4_MAPT_NEXT_MAPT_TCP_UDP,
23 IP4_MAPT_NEXT_MAPT_ICMP,
24 IP4_MAPT_NEXT_MAPT_FRAGMENTED,
31 IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
32 IP4_MAPT_ICMP_NEXT_IP6_FRAG,
33 IP4_MAPT_ICMP_NEXT_DROP,
35 } ip4_mapt_icmp_next_t;
39 IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
40 IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
41 IP4_MAPT_TCP_UDP_NEXT_DROP,
42 IP4_MAPT_TCP_UDP_N_NEXT
43 } ip4_mapt_tcp_udp_next_t;
47 IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
48 IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
49 IP4_MAPT_FRAGMENTED_NEXT_DROP,
50 IP4_MAPT_FRAGMENTED_N_NEXT
51 } ip4_mapt_fragmented_next_t;
53 //This is used to pass information within the buffer data.
54 //Buffer structure being too small to contain big structures like this.
56 typedef CLIB_PACKED (struct {
59 //IPv6 header + Fragmentation header will be here
60 //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
62 }) ip4_mapt_pseudo_header_t;
66 static_always_inline int
67 ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
70 map_ip4_reass_lock ();
72 map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
75 IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
80 map_ip4_reass_unlock ();
84 static_always_inline i32
85 ip4_map_fragment_get_port (ip4_header_t * ip4)
88 map_ip4_reass_lock ();
90 map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
93 IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
95 i32 ret = r ? r->port : -1;
96 map_ip4_reass_unlock ();
104 } icmp_to_icmp6_ctx_t;
107 ip4_to_ip6_set_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *arg)
109 icmp_to_icmp6_ctx_t *ctx = arg;
111 ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address);
112 ip6->dst_address.as_u64[0] =
113 map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
114 ip6->dst_address.as_u64[1] =
115 map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
121 ip4_to_ip6_set_inner_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6,
124 icmp_to_icmp6_ctx_t *ctx = arg;
126 //Note that the source address is within the domain
127 //while the destination address is the one outside the domain
128 ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &ip4->dst_address);
129 ip6->src_address.as_u64[0] =
130 map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
131 ip6->src_address.as_u64[1] =
132 map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
138 ip4_map_t_icmp (vlib_main_t * vm,
139 vlib_node_runtime_t * node, vlib_frame_t * frame)
141 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
142 vlib_node_runtime_t *error_node =
143 vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
144 from = vlib_frame_vector_args (frame);
145 n_left_from = frame->n_vectors;
146 next_index = node->cached_next_index;
147 vlib_combined_counter_main_t *cm = map_main.domain_counters;
148 u32 thread_index = vm->thread_index;
150 while (n_left_from > 0)
152 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
154 while (n_left_from > 0 && n_left_to_next > 0)
158 ip4_mapt_icmp_next_t next0;
162 icmp_to_icmp6_ctx_t ctx0;
165 next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
166 pi0 = to_next[0] = from[0];
171 error0 = MAP_ERROR_NONE;
173 p0 = vlib_get_buffer (vm, pi0);
174 vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
176 clib_net_to_host_u16 (((ip4_header_t *)
177 vlib_buffer_get_current (p0))->length);
179 pool_elt_at_index (map_main.domains,
180 vnet_buffer (p0)->map_t.map_domain_index);
182 ip40 = vlib_buffer_get_current (p0);
183 ctx0.recv_port = ip4_get_port (ip40, 1);
185 if (ctx0.recv_port == 0)
187 // In case of 1:1 mapping, we don't care about the port
188 if (!(d0->ea_bits_len == 0 && d0->rules))
190 error0 = MAP_ERROR_ICMP;
196 (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
197 ip4_to_ip6_set_inner_icmp_cb, &ctx0))
199 error0 = MAP_ERROR_ICMP;
203 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
205 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
206 vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
207 next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
210 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
212 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
215 map_t.map_domain_index, 1,
220 next0 = IP4_MAPT_ICMP_NEXT_DROP;
222 p0->error = error_node->errors[error0];
223 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
224 to_next, n_left_to_next, pi0,
227 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
229 return frame->n_vectors;
233 * Translate fragmented IPv4 UDP/TCP packet to IPv6.
236 map_ip4_to_ip6_fragmented (vlib_buffer_t * p,
237 ip4_mapt_pseudo_header_t * pheader)
241 ip6_frag_hdr_t *frag;
243 ip4 = vlib_buffer_get_current (p);
244 frag = (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
246 (ip6_header_t *) u8_ptr_add (ip4,
247 sizeof (*ip4) - sizeof (*frag) -
249 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
251 //We know that the protocol was one of ICMP, TCP or UDP
252 //because the first fragment was found and cached
254 (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol;
255 frag->identification = frag_id_4to6 (ip4->fragment_id);
257 frag->fragment_offset_and_more =
258 ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip4),
260 (ip4->flags_and_fragment_offset) &
261 IP4_HEADER_FLAG_MORE_FRAGMENTS);
263 ip6->ip_version_traffic_class_and_flow_label =
264 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
265 ip6->payload_length =
266 clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
267 sizeof (*ip4) + sizeof (*frag));
268 ip6->hop_limit = ip4->ttl;
269 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
271 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
272 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
273 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
274 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
280 ip4_map_t_fragmented (vlib_main_t * vm,
281 vlib_node_runtime_t * node, vlib_frame_t * frame)
283 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
284 from = vlib_frame_vector_args (frame);
285 n_left_from = frame->n_vectors;
286 next_index = node->cached_next_index;
287 vlib_node_runtime_t *error_node =
288 vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
290 while (n_left_from > 0)
292 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
294 while (n_left_from > 0 && n_left_to_next > 0)
298 ip4_mapt_pseudo_header_t *pheader0;
299 ip4_mapt_fragmented_next_t next0;
301 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
302 pi0 = to_next[0] = from[0];
308 p0 = vlib_get_buffer (vm, pi0);
310 //Accessing pseudo header
311 pheader0 = vlib_buffer_get_current (p0);
312 vlib_buffer_advance (p0, sizeof (*pheader0));
314 if (map_ip4_to_ip6_fragmented (p0, pheader0))
316 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
317 next0 = IP4_MAPT_FRAGMENTED_NEXT_DROP;
321 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
323 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
324 vnet_buffer (p0)->ip_frag.next_index =
325 IP6_FRAG_NEXT_IP6_LOOKUP;
326 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
330 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
331 to_next, n_left_to_next, pi0,
334 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
336 return frame->n_vectors;
340 * Translate IPv4 UDP/TCP packet to IPv6.
343 map_ip4_to_ip6_tcp_udp (vlib_buffer_t * p, ip4_mapt_pseudo_header_t * pheader)
345 map_main_t *mm = &map_main;
350 ip6_frag_hdr_t *frag;
352 ip4_address_t old_src, old_dst;
354 ip4 = vlib_buffer_get_current (p);
356 if (ip4->protocol == IP_PROTOCOL_UDP)
358 udp_header_t *udp = ip4_next_header (ip4);
359 checksum = &udp->checksum;
362 * UDP checksum is optional over IPv4 but mandatory for IPv6 We
363 * do not check udp->length sanity but use our safe computed
366 if (PREDICT_FALSE (!*checksum))
368 u16 udp_len = clib_host_to_net_u16 (ip4->length) - sizeof (*ip4);
369 csum = ip_incremental_checksum (0, udp, udp_len);
370 csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
372 ip_csum_with_carry (csum, clib_host_to_net_u16 (IP_PROTOCOL_UDP));
373 csum = ip_csum_with_carry (csum, *((u64 *) (&ip4->src_address)));
374 *checksum = ~ip_csum_fold (csum);
379 tcp_header_t *tcp = ip4_next_header (ip4);
382 csum = tcp->checksum;
383 map_mss_clamping (tcp, &csum, mm->tcp_mss);
384 tcp->checksum = ip_csum_fold (csum);
386 checksum = &tcp->checksum;
389 old_src.as_u32 = ip4->src_address.as_u32;
390 old_dst.as_u32 = ip4->dst_address.as_u32;
392 /* Deal with fragmented packets */
393 if (PREDICT_FALSE (ip4->flags_and_fragment_offset &
394 clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
397 (ip6_header_t *) u8_ptr_add (ip4,
398 sizeof (*ip4) - sizeof (*ip6) -
401 (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
402 frag_id = frag_id_4to6 (ip4->fragment_id);
403 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
407 ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
408 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
412 ip6->ip_version_traffic_class_and_flow_label =
413 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
414 ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
415 ip6->hop_limit = ip4->ttl;
416 ip6->protocol = ip4->protocol;
417 if (PREDICT_FALSE (frag != NULL))
419 frag->next_hdr = ip6->protocol;
420 frag->identification = frag_id;
422 frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more (0, 1);
423 ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
424 ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
427 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
428 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
429 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
430 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
432 csum = ip_csum_sub_even (*checksum, old_src.as_u32);
433 csum = ip_csum_sub_even (csum, old_dst.as_u32);
434 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
435 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
436 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
437 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
438 *checksum = ip_csum_fold (csum);
444 ip4_map_t_tcp_udp (vlib_main_t * vm,
445 vlib_node_runtime_t * node, vlib_frame_t * frame)
447 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
448 from = vlib_frame_vector_args (frame);
449 n_left_from = frame->n_vectors;
450 next_index = node->cached_next_index;
451 vlib_node_runtime_t *error_node =
452 vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
455 while (n_left_from > 0)
457 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
459 while (n_left_from > 0 && n_left_to_next > 0)
463 ip4_mapt_pseudo_header_t *pheader0;
464 ip4_mapt_tcp_udp_next_t next0;
466 pi0 = to_next[0] = from[0];
472 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
473 p0 = vlib_get_buffer (vm, pi0);
475 //Accessing pseudo header
476 pheader0 = vlib_buffer_get_current (p0);
477 vlib_buffer_advance (p0, sizeof (*pheader0));
479 if (map_ip4_to_ip6_tcp_udp (p0, pheader0))
481 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
482 next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
486 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
488 //Send to fragmentation node if necessary
489 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
490 vnet_buffer (p0)->ip_frag.next_index =
491 IP6_FRAG_NEXT_IP6_LOOKUP;
492 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
495 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
496 to_next, n_left_to_next, pi0,
499 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
502 return frame->n_vectors;
505 static_always_inline void
506 ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
507 ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
508 u8 * error0, ip4_mapt_next_t * next0)
510 if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
512 *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
513 if (d0->ea_bits_len == 0 && d0->rules)
519 *dst_port0 = ip4_map_fragment_get_port (ip40);
520 *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
523 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
525 vnet_buffer (p0)->map_t.checksum_offset = 36;
526 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
527 *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
528 *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
530 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
532 vnet_buffer (p0)->map_t.checksum_offset = 26;
533 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
534 *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
535 *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
537 else if (ip40->protocol == IP_PROTOCOL_ICMP)
539 *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
540 if (d0->ea_bits_len == 0 && d0->rules)
542 else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
544 || ((icmp46_header_t *)
546 sizeof (*ip40)))->code == ICMP4_echo_request)
547 *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
551 *error0 = MAP_ERROR_BAD_PROTOCOL;
556 ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
558 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
559 vlib_node_runtime_t *error_node =
560 vlib_node_get_runtime (vm, ip4_map_t_node.index);
561 from = vlib_frame_vector_args (frame);
562 n_left_from = frame->n_vectors;
563 next_index = node->cached_next_index;
564 vlib_combined_counter_main_t *cm = map_main.domain_counters;
565 u32 thread_index = vm->thread_index;
567 while (n_left_from > 0)
569 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
571 while (n_left_from > 0 && n_left_to_next > 0)
577 ip4_mapt_next_t next0 = 0;
581 ip4_mapt_pseudo_header_t *pheader0;
583 pi0 = to_next[0] = from[0];
588 error0 = MAP_ERROR_NONE;
590 p0 = vlib_get_buffer (vm, pi0);
591 ip40 = vlib_buffer_get_current (p0);
592 ip4_len0 = clib_host_to_net_u16 (ip40->length);
593 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
594 ip40->ip_version_and_header_length != 0x45))
596 error0 = MAP_ERROR_UNKNOWN;
599 d0 = ip4_map_get_domain (&ip40->dst_address,
600 &vnet_buffer (p0)->map_t.map_domain_index,
604 { /* Guess it wasn't for us */
605 vnet_feature_next (&next0, p0);
609 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
612 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
615 /* Verify that port is not among the well-known ports */
616 if ((d0->psid_length > 0 && d0->psid_offset > 0)
617 && (clib_net_to_host_u16 (dst_port0) <
618 (0x1 << (16 - d0->psid_offset))))
620 error0 = MAP_ERROR_SEC_CHECK;
623 //Add MAP-T pseudo header in front of the packet
624 vlib_buffer_advance (p0, -sizeof (*pheader0));
625 pheader0 = vlib_buffer_get_current (p0);
627 //Save addresses within the packet
628 ip4_map_t_embedded_address (d0, &pheader0->saddr,
630 pheader0->daddr.as_u64[0] =
631 map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
632 pheader0->daddr.as_u64[1] =
633 map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
635 // It is important to cache at this stage because the result
636 // might be necessary for packets within the same vector.
637 // Actually, this approach even provides some limited
638 // out-of-order fragments support
640 (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
641 && (d0->ea_bits_len != 0 || !d0->rules)
642 && ip4_map_fragment_cache (ip40, dst_port0)))
644 error0 = MAP_ERROR_UNKNOWN;
648 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
650 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
653 map_t.map_domain_index, 1,
658 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
659 p0->error = error_node->errors[error0];
661 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
662 to_next, n_left_to_next, pi0,
665 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
667 return frame->n_vectors;
670 static char *map_t_error_strings[] = {
671 #define _(sym,string) string,
676 VNET_FEATURE_INIT (ip4_map_t_feature, static) =
678 .arc_name = "ip4-unicast",.node_name = "ip4-map-t",.runs_before =
679 VNET_FEATURES ("ip4-flow-classify"),};
682 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
683 .function = ip4_map_t_fragmented,
684 .name = "ip4-map-t-fragmented",
685 .vector_size = sizeof(u32),
686 .format_trace = format_map_trace,
687 .type = VLIB_NODE_TYPE_INTERNAL,
689 .n_errors = MAP_N_ERROR,
690 .error_strings = map_t_error_strings,
692 .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
694 [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
695 [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
696 [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
702 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
703 .function = ip4_map_t_icmp,
704 .name = "ip4-map-t-icmp",
705 .vector_size = sizeof(u32),
706 .format_trace = format_map_trace,
707 .type = VLIB_NODE_TYPE_INTERNAL,
709 .n_errors = MAP_N_ERROR,
710 .error_strings = map_t_error_strings,
712 .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
714 [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
715 [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
716 [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
722 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
723 .function = ip4_map_t_tcp_udp,
724 .name = "ip4-map-t-tcp-udp",
725 .vector_size = sizeof(u32),
726 .format_trace = format_map_trace,
727 .type = VLIB_NODE_TYPE_INTERNAL,
729 .n_errors = MAP_N_ERROR,
730 .error_strings = map_t_error_strings,
732 .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
734 [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
735 [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
736 [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
742 VLIB_REGISTER_NODE(ip4_map_t_node) = {
743 .function = ip4_map_t,
745 .vector_size = sizeof(u32),
746 .format_trace = format_map_trace,
747 .type = VLIB_NODE_TYPE_INTERNAL,
749 .n_errors = MAP_N_ERROR,
750 .error_strings = map_t_error_strings,
752 .n_next_nodes = IP4_MAPT_N_NEXT,
754 [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
755 [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
756 [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
757 [IP4_MAPT_NEXT_DROP] = "error-drop",
763 * fd.io coding-style-patch-verification: ON
766 * eval: (c-set-style "gnu")