2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * Defines used for testing various optimisation schemes
18 #define MAP_ENCAP_DUAL 0
21 #include "../ip/ip_frag.h"
23 vlib_node_registration_t ip4_map_reass_node;
27 IP4_MAP_NEXT_IP6_LOOKUP,
28 #ifdef MAP_SKIP_IP6_LOOKUP
29 IP4_MAP_NEXT_IP6_REWRITE,
31 IP4_MAP_NEXT_IP4_FRAGMENT,
32 IP4_MAP_NEXT_IP6_FRAGMENT,
34 IP4_MAP_NEXT_ICMP_ERROR,
39 enum ip4_map_reass_next_t
41 IP4_MAP_REASS_NEXT_IP6_LOOKUP,
42 IP4_MAP_REASS_NEXT_IP4_FRAGMENT,
43 IP4_MAP_REASS_NEXT_DROP,
52 } map_ip4_map_reass_trace_t;
55 format_ip4_map_reass_trace (u8 * s, va_list * args)
57 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
59 map_ip4_map_reass_trace_t *t = va_arg (*args, map_ip4_map_reass_trace_t *);
60 return format (s, "MAP domain index: %d L4 port: %u Status: %s",
61 t->map_domain_index, t->port,
62 t->cached ? "cached" : "forwarded");
69 ip4_map_get_port (ip4_header_t * ip, map_dir_e dir)
71 /* Find port information */
72 if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) ||
73 (ip->protocol == IP_PROTOCOL_UDP)))
75 udp_header_t *udp = (void *) (ip + 1);
76 return (dir == MAP_SENDER ? udp->src_port : udp->dst_port);
78 else if (ip->protocol == IP_PROTOCOL_ICMP)
81 * 1) ICMP Echo request or Echo reply
82 * 2) ICMP Error with inner packet being UDP or TCP
83 * 3) ICMP Error with inner packet being ICMP Echo request or Echo reply
85 icmp46_header_t *icmp = (void *) (ip + 1);
86 if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
88 return *((u16 *) (icmp + 1));
90 else if (clib_net_to_host_u16 (ip->length) >= 56)
91 { // IP + ICMP + IP + L4 header
92 ip4_header_t *icmp_ip = (ip4_header_t *) (icmp + 2);
93 if (PREDICT_TRUE ((icmp_ip->protocol == IP_PROTOCOL_TCP) ||
94 (icmp_ip->protocol == IP_PROTOCOL_UDP)))
96 udp_header_t *udp = (void *) (icmp_ip + 1);
97 return (dir == MAP_SENDER ? udp->dst_port : udp->src_port);
99 else if (icmp_ip->protocol == IP_PROTOCOL_ICMP)
101 icmp46_header_t *inner_icmp = (void *) (icmp_ip + 1);
102 if (inner_icmp->type == ICMP4_echo_request
103 || inner_icmp->type == ICMP4_echo_reply)
104 return (*((u16 *) (inner_icmp + 1)));
111 static_always_inline u16
112 ip4_map_port_and_security_check (map_domain_t * d, ip4_header_t * ip,
113 u32 * next, u8 * error)
117 if (d->psid_length > 0)
119 if (ip4_get_fragment_offset (ip) == 0)
122 ((ip->ip_version_and_header_length != 0x45)
123 || clib_host_to_net_u16 (ip->length) < 28))
127 port = ip4_map_get_port (ip, MAP_RECEIVER);
130 /* Verify that port is not among the well-known ports */
131 if ((d->psid_offset > 0)
132 && (clib_net_to_host_u16 (port) <
133 (0x1 << (16 - d->psid_offset))))
135 *error = MAP_ERROR_ENCAP_SEC_CHECK;
139 if (ip4_get_fragment_more (ip))
140 *next = IP4_MAP_NEXT_REASS;
146 *error = MAP_ERROR_BAD_PROTOCOL;
151 *next = IP4_MAP_NEXT_REASS;
160 static_always_inline u32
161 ip4_map_vtcfl (ip4_header_t * ip4, vlib_buffer_t * p)
163 map_main_t *mm = &map_main;
164 u8 tc = mm->tc_copy ? ip4->tos : mm->tc;
165 u32 vtcfl = 0x6 << 28;
167 vtcfl |= vnet_buffer (p)->ip.flow_hash & 0x000fffff;
169 return (clib_host_to_net_u32 (vtcfl));
172 static_always_inline bool
173 ip4_map_ip6_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
175 #ifdef MAP_SKIP_IP6_LOOKUP
176 map_main_t *mm = &map_main;
177 u32 adj_index0 = mm->adj6_index;
180 ip_lookup_main_t *lm6 = &ip6_main.lookup_main;
181 ip_adjacency_t *adj = ip_get_adjacency (lm6, mm->adj6_index);
184 u32 hash_c0 = ip4_compute_flow_hash (ip, IP_FLOW_HASH_DEFAULT);
185 adj_index0 += (hash_c0 & (adj->n_adj - 1));
187 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = adj_index0;
198 ip4_map_decrement_ttl (ip4_header_t * ip, u8 * error)
202 /* Input node should have reject packets with ttl 0. */
203 ASSERT (ip->ttl > 0);
205 u32 checksum = ip->checksum + clib_host_to_net_u16 (0x0100);
206 checksum += checksum >= 0xffff;
207 ip->checksum = checksum;
210 *error = ttl <= 0 ? IP4_ERROR_TIME_EXPIRED : *error;
212 /* Verify checksum. */
213 ASSERT (ip->checksum == ip4_header_checksum (ip));
217 ip4_map_fragment (vlib_buffer_t * b, u16 mtu, bool df, u8 * error)
219 map_main_t *mm = &map_main;
223 ip_frag_set_vnet_buffer (b, sizeof (ip6_header_t), mtu,
224 IP4_FRAG_NEXT_IP6_LOOKUP,
225 IP_FRAG_FLAG_IP6_HEADER);
226 return (IP4_MAP_NEXT_IP4_FRAGMENT);
230 if (df && !mm->frag_ignore_df)
232 icmp4_error_set_vnet_buffer (b, ICMP4_destination_unreachable,
233 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
235 vlib_buffer_advance (b, sizeof (ip6_header_t));
236 *error = MAP_ERROR_DF_SET;
237 return (IP4_MAP_NEXT_ICMP_ERROR);
239 ip_frag_set_vnet_buffer (b, 0, mtu, IP6_FRAG_NEXT_IP6_LOOKUP,
240 IP_FRAG_FLAG_IP6_HEADER);
241 return (IP4_MAP_NEXT_IP6_FRAGMENT);
249 ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
251 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
252 vlib_node_runtime_t *error_node =
253 vlib_node_get_runtime (vm, ip4_map_node.index);
254 from = vlib_frame_vector_args (frame);
255 n_left_from = frame->n_vectors;
256 next_index = node->cached_next_index;
257 map_main_t *mm = &map_main;
258 vlib_combined_counter_main_t *cm = mm->domain_counters;
259 u32 cpu_index = os_get_cpu_number ();
261 while (n_left_from > 0)
263 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
266 while (n_left_from >= 4 && n_left_to_next >= 2)
269 vlib_buffer_t *p0, *p1;
270 map_domain_t *d0, *d1;
271 u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE;
272 ip4_header_t *ip40, *ip41;
273 u16 port0 = 0, port1 = 0;
274 ip6_header_t *ip6h0, *ip6h1;
275 u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
276 u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 =
277 IP4_MAP_NEXT_IP6_LOOKUP;
279 /* Prefetch next iteration. */
281 vlib_buffer_t *p2, *p3;
283 p2 = vlib_get_buffer (vm, from[2]);
284 p3 = vlib_get_buffer (vm, from[3]);
286 vlib_prefetch_buffer_header (p2, STORE);
287 vlib_prefetch_buffer_header (p3, STORE);
288 /* IPv4 + 8 = 28. possibly plus -40 */
289 CLIB_PREFETCH (p2->data - 40, 68, STORE);
290 CLIB_PREFETCH (p3->data - 40, 68, STORE);
293 pi0 = to_next[0] = from[0];
294 pi1 = to_next[1] = from[1];
300 p0 = vlib_get_buffer (vm, pi0);
301 p1 = vlib_get_buffer (vm, pi1);
302 ip40 = vlib_buffer_get_current (p0);
303 ip41 = vlib_buffer_get_current (p1);
305 ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
308 ip4_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
314 * Shared IPv4 address
316 port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
317 port1 = ip4_map_port_and_security_check (d1, ip41, &next1, &error1);
319 /* Decrement IPv4 TTL */
320 ip4_map_decrement_ttl (ip40, &error0);
321 ip4_map_decrement_ttl (ip41, &error1);
323 ip40->flags_and_fragment_offset &
324 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
326 ip41->flags_and_fragment_offset &
327 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
330 u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
331 u32 da41 = clib_net_to_host_u32 (ip41->dst_address.as_u32);
332 u16 dp40 = clib_net_to_host_u16 (port0);
333 u16 dp41 = clib_net_to_host_u16 (port1);
334 u64 dal60 = map_get_pfx (d0, da40, dp40);
335 u64 dal61 = map_get_pfx (d1, da41, dp41);
336 u64 dar60 = map_get_sfx (d0, da40, dp40);
337 u64 dar61 = map_get_sfx (d1, da41, dp41);
338 if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
339 && next0 != IP4_MAP_NEXT_REASS)
340 error0 = MAP_ERROR_NO_BINDING;
341 if (dal61 == 0 && dar61 == 0 && error1 == MAP_ERROR_NONE
342 && next1 != IP4_MAP_NEXT_REASS)
343 error1 = MAP_ERROR_NO_BINDING;
345 /* construct ipv6 header */
346 vlib_buffer_advance (p0, -sizeof (ip6_header_t));
347 vlib_buffer_advance (p1, -sizeof (ip6_header_t));
348 ip6h0 = vlib_buffer_get_current (p0);
349 ip6h1 = vlib_buffer_get_current (p1);
350 vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
351 vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
353 ip6h0->ip_version_traffic_class_and_flow_label =
354 ip4_map_vtcfl (ip40, p0);
355 ip6h1->ip_version_traffic_class_and_flow_label =
356 ip4_map_vtcfl (ip41, p1);
357 ip6h0->payload_length = ip40->length;
358 ip6h1->payload_length = ip41->length;
359 ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
360 ip6h1->protocol = IP_PROTOCOL_IP_IN_IP;
361 ip6h0->hop_limit = 0x40;
362 ip6h1->hop_limit = 0x40;
363 ip6h0->src_address = d0->ip6_src;
364 ip6h1->src_address = d1->ip6_src;
365 ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
366 ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
367 ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64 (dal61);
368 ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64 (dar61);
371 * Determine next node. Can be one of:
372 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
374 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
378 && (clib_net_to_host_u16 (ip6h0->payload_length) +
379 sizeof (*ip6h0) > d0->mtu)))
381 next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
386 ip4_map_ip6_lookup_bypass (p0,
388 IP4_MAP_NEXT_IP6_REWRITE : next0;
389 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
391 map_domain_index0, 1,
393 (ip6h0->payload_length) +
399 next0 = IP4_MAP_NEXT_DROP;
403 * Determine next node. Can be one of:
404 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
406 if (PREDICT_TRUE (error1 == MAP_ERROR_NONE))
410 && (clib_net_to_host_u16 (ip6h1->payload_length) +
411 sizeof (*ip6h1) > d1->mtu)))
413 next1 = ip4_map_fragment (p1, d1->mtu, df1, &error1);
418 ip4_map_ip6_lookup_bypass (p1,
420 IP4_MAP_NEXT_IP6_REWRITE : next1;
421 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
423 map_domain_index1, 1,
425 (ip6h1->payload_length) +
431 next1 = IP4_MAP_NEXT_DROP;
434 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
436 map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
437 tr->map_domain_index = map_domain_index0;
440 if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
442 map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
443 tr->map_domain_index = map_domain_index1;
447 p0->error = error_node->errors[error0];
448 p1->error = error_node->errors[error1];
450 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
451 n_left_to_next, pi0, pi1, next0,
455 while (n_left_from > 0 && n_left_to_next > 0)
460 u8 error0 = MAP_ERROR_NONE;
464 u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP;
465 u32 map_domain_index0 = ~0;
467 pi0 = to_next[0] = from[0];
473 p0 = vlib_get_buffer (vm, pi0);
474 ip40 = vlib_buffer_get_current (p0);
476 ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
481 * Shared IPv4 address
483 port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
485 /* Decrement IPv4 TTL */
486 ip4_map_decrement_ttl (ip40, &error0);
488 ip40->flags_and_fragment_offset &
489 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
492 u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
493 u16 dp40 = clib_net_to_host_u16 (port0);
494 u64 dal60 = map_get_pfx (d0, da40, dp40);
495 u64 dar60 = map_get_sfx (d0, da40, dp40);
496 if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
497 && next0 != IP4_MAP_NEXT_REASS)
498 error0 = MAP_ERROR_NO_BINDING;
500 /* construct ipv6 header */
501 vlib_buffer_advance (p0, -(sizeof (ip6_header_t)));
502 ip6h0 = vlib_buffer_get_current (p0);
503 vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
505 ip6h0->ip_version_traffic_class_and_flow_label =
506 ip4_map_vtcfl (ip40, p0);
507 ip6h0->payload_length = ip40->length;
508 ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
509 ip6h0->hop_limit = 0x40;
510 ip6h0->src_address = d0->ip6_src;
511 ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
512 ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
515 * Determine next node. Can be one of:
516 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
518 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
522 && (clib_net_to_host_u16 (ip6h0->payload_length) +
523 sizeof (*ip6h0) > d0->mtu)))
525 next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
530 ip4_map_ip6_lookup_bypass (p0,
532 IP4_MAP_NEXT_IP6_REWRITE : next0;
533 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
535 map_domain_index0, 1,
537 (ip6h0->payload_length) +
543 next0 = IP4_MAP_NEXT_DROP;
546 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
548 map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
549 tr->map_domain_index = map_domain_index0;
553 p0->error = error_node->errors[error0];
554 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
555 n_left_to_next, pi0, next0);
557 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
560 return frame->n_vectors;
567 ip4_map_reass (vlib_main_t * vm,
568 vlib_node_runtime_t * node, vlib_frame_t * frame)
570 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
571 vlib_node_runtime_t *error_node =
572 vlib_node_get_runtime (vm, ip4_map_reass_node.index);
573 from = vlib_frame_vector_args (frame);
574 n_left_from = frame->n_vectors;
575 next_index = node->cached_next_index;
576 map_main_t *mm = &map_main;
577 vlib_combined_counter_main_t *cm = mm->domain_counters;
578 u32 cpu_index = os_get_cpu_number ();
579 u32 *fragments_to_drop = NULL;
580 u32 *fragments_to_loopback = NULL;
582 while (n_left_from > 0)
584 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
586 while (n_left_from > 0 && n_left_to_next > 0)
591 u8 error0 = MAP_ERROR_NONE;
595 u32 next0 = IP4_MAP_REASS_NEXT_IP6_LOOKUP;
596 u32 map_domain_index0;
599 pi0 = to_next[0] = from[0];
605 p0 = vlib_get_buffer (vm, pi0);
606 ip60 = vlib_buffer_get_current (p0);
607 ip40 = (ip4_header_t *) (ip60 + 1);
609 ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
612 map_ip4_reass_lock ();
613 map_ip4_reass_t *r = map_ip4_reass_get (ip40->src_address.as_u32,
614 ip40->dst_address.as_u32,
618 if (PREDICT_FALSE (!r))
620 // Could not create a caching entry
621 error0 = MAP_ERROR_FRAGMENT_MEMORY;
623 else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
627 // We know the port already
630 else if (map_ip4_reass_add_fragment (r, pi0))
632 // Not enough space for caching
633 error0 = MAP_ERROR_FRAGMENT_MEMORY;
634 map_ip4_reass_free (r, &fragments_to_drop);
643 ip4_get_port (ip40, MAP_RECEIVER, p0->current_length)) < 0)
645 // Could not find port. We'll free the reassembly.
646 error0 = MAP_ERROR_BAD_PROTOCOL;
648 map_ip4_reass_free (r, &fragments_to_drop);
653 map_ip4_reass_get_fragments (r, &fragments_to_loopback);
656 #ifdef MAP_IP4_REASS_COUNT_BYTES
659 r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
660 if (!ip4_get_fragment_more (ip40))
662 ip4_get_fragment_offset (ip40) * 8 +
663 clib_host_to_net_u16 (ip40->length) - 20;
664 if (r->forwarded >= r->expected_total)
665 map_ip4_reass_free (r, &fragments_to_drop);
669 map_ip4_reass_unlock ();
671 // NOTE: Most operations have already been performed by ip4_map
672 // All we need is the right destination address
673 ip60->dst_address.as_u64[0] =
674 map_get_pfx_net (d0, ip40->dst_address.as_u32, port0);
675 ip60->dst_address.as_u64[1] =
676 map_get_sfx_net (d0, ip40->dst_address.as_u32, port0);
680 && (clib_net_to_host_u16 (ip60->payload_length) +
681 sizeof (*ip60) > d0->mtu)))
683 vnet_buffer (p0)->ip_frag.header_offset = sizeof (*ip60);
684 vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
685 vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
686 vnet_buffer (p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
687 next0 = IP4_MAP_REASS_NEXT_IP4_FRAGMENT;
690 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
692 map_ip4_map_reass_trace_t *tr =
693 vlib_add_trace (vm, node, p0, sizeof (*tr));
694 tr->map_domain_index = map_domain_index0;
707 if (error0 == MAP_ERROR_NONE)
708 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
709 cpu_index, map_domain_index0,
712 (ip60->payload_length) + 40);
714 (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP;
715 p0->error = error_node->errors[error0];
716 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
717 n_left_to_next, pi0, next0);
720 //Loopback when we reach the end of the inpu vector
721 if (n_left_from == 0 && vec_len (fragments_to_loopback))
723 from = vlib_frame_vector_args (frame);
724 u32 len = vec_len (fragments_to_loopback);
725 if (len <= VLIB_FRAME_SIZE)
727 clib_memcpy (from, fragments_to_loopback,
730 vec_reset_length (fragments_to_loopback);
735 fragments_to_loopback + (len -
737 sizeof (u32) * VLIB_FRAME_SIZE);
738 n_left_from = VLIB_FRAME_SIZE;
739 _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
743 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
746 map_send_all_to_node (vm, fragments_to_drop, node,
747 &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
748 IP4_MAP_REASS_NEXT_DROP);
750 vec_free (fragments_to_drop);
751 vec_free (fragments_to_loopback);
752 return frame->n_vectors;
755 static char *map_error_strings[] = {
756 #define _(sym,string) string,
762 VLIB_REGISTER_NODE(ip4_map_node) = {
765 .vector_size = sizeof(u32),
766 .format_trace = format_map_trace,
767 .type = VLIB_NODE_TYPE_INTERNAL,
769 .n_errors = MAP_N_ERROR,
770 .error_strings = map_error_strings,
772 .n_next_nodes = IP4_MAP_N_NEXT,
774 [IP4_MAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
775 #ifdef MAP_SKIP_IP6_LOOKUP
776 [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-rewrite",
778 [IP4_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
779 [IP4_MAP_NEXT_IP6_FRAGMENT] = "ip6-frag",
780 [IP4_MAP_NEXT_REASS] = "ip4-map-reass",
781 [IP4_MAP_NEXT_ICMP_ERROR] = "ip4-icmp-error",
782 [IP4_MAP_NEXT_DROP] = "error-drop",
788 VLIB_REGISTER_NODE(ip4_map_reass_node) = {
789 .function = ip4_map_reass,
790 .name = "ip4-map-reass",
791 .vector_size = sizeof(u32),
792 .format_trace = format_ip4_map_reass_trace,
793 .type = VLIB_NODE_TYPE_INTERNAL,
795 .n_errors = MAP_N_ERROR,
796 .error_strings = map_error_strings,
798 .n_next_nodes = IP4_MAP_REASS_N_NEXT,
800 [IP4_MAP_REASS_NEXT_IP6_LOOKUP] = "ip6-lookup",
801 [IP4_MAP_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
802 [IP4_MAP_REASS_NEXT_DROP] = "error-drop",
808 * fd.io coding-style-patch-verification: ON
811 * eval: (c-set-style "gnu")