2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * Defines used for testing various optimisation schemes
18 #define MAP_ENCAP_DUAL 0
21 #include "../ip/ip_frag.h"
23 vlib_node_registration_t ip4_map_reass_node;
26 IP4_MAP_NEXT_IP6_LOOKUP,
27 #ifdef MAP_SKIP_IP6_LOOKUP
28 IP4_MAP_NEXT_IP6_REWRITE,
30 IP4_MAP_NEXT_FRAGMENT,
36 enum ip4_map_reass_next_t {
37 IP4_MAP_REASS_NEXT_IP6_LOOKUP,
38 IP4_MAP_REASS_NEXT_IP4_FRAGMENT,
39 IP4_MAP_REASS_NEXT_DROP,
47 } map_ip4_map_reass_trace_t;
50 format_ip4_map_reass_trace (u8 *s, va_list *args)
52 CLIB_UNUSED(vlib_main_t *vm) = va_arg (*args, vlib_main_t *);
53 CLIB_UNUSED(vlib_node_t *node) = va_arg (*args, vlib_node_t *);
54 map_ip4_map_reass_trace_t *t = va_arg (*args, map_ip4_map_reass_trace_t *);
55 return format(s, "MAP domain index: %d L4 port: %u Status: %s", t->map_domain_index,
56 t->port, t->cached?"cached":"forwarded");
63 ip4_map_get_port (ip4_header_t *ip, map_dir_e dir)
65 /* Find port information */
66 if (PREDICT_TRUE((ip->protocol == IP_PROTOCOL_TCP) ||
67 (ip->protocol == IP_PROTOCOL_UDP))) {
68 udp_header_t *udp = (void *)(ip + 1);
69 return (dir == MAP_SENDER ? udp->src_port : udp->dst_port);
70 } else if (ip->protocol == IP_PROTOCOL_ICMP) {
72 * 1) ICMP Echo request or Echo reply
73 * 2) ICMP Error with inner packet being UDP or TCP
74 * 3) ICMP Error with inner packet being ICMP Echo request or Echo reply
76 icmp46_header_t *icmp = (void *)(ip + 1);
77 if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply) {
78 return *((u16 *)(icmp + 1));
79 } else if (clib_net_to_host_u16(ip->length) >= 64) { // IP + ICMP + IP + L4 header
80 ip4_header_t *icmp_ip = (ip4_header_t *)(icmp + 2);
81 if (PREDICT_TRUE((icmp_ip->protocol == IP_PROTOCOL_TCP) ||
82 (icmp_ip->protocol == IP_PROTOCOL_UDP))) {
83 udp_header_t *udp = (void *)(icmp_ip + 1);
84 return (dir == MAP_SENDER ? udp->dst_port : udp->src_port);
85 } else if (icmp_ip->protocol == IP_PROTOCOL_ICMP) {
86 icmp46_header_t *inner_icmp = (void *)(icmp_ip + 1);
87 if (inner_icmp->type == ICMP4_echo_request || inner_icmp->type == ICMP4_echo_reply)
88 return (*((u16 *)(inner_icmp + 1)));
95 static_always_inline u16
96 ip4_map_port_and_security_check (map_domain_t *d, ip4_header_t *ip, u32 *next, u8 *error)
100 if (d->psid_length > 0) {
101 if (!ip4_is_fragment(ip)) {
102 if (PREDICT_FALSE((ip->ip_version_and_header_length != 0x45) || clib_host_to_net_u16(ip->length) < 28)) {
105 port = ip4_map_get_port(ip, MAP_RECEIVER);
107 /* Verify that port is not among the well-known ports */
108 if ((d->psid_offset > 0) && (clib_net_to_host_u16(port) < (0x1 << (16 - d->psid_offset)))) {
109 *error = MAP_ERROR_ENCAP_SEC_CHECK;
114 *error = MAP_ERROR_BAD_PROTOCOL;
117 *next = IP4_MAP_NEXT_REASS;
126 static_always_inline u32
127 ip4_map_vtcfl (ip4_header_t *ip4, vlib_buffer_t *p)
129 map_main_t *mm = &map_main;
130 u8 tc = mm->tc_copy ? ip4->tos : mm->tc;
131 u32 vtcfl = 0x6 << 28;
133 vtcfl |= vnet_buffer(p)->ip.flow_hash && 0x000fffff;
135 return (clib_host_to_net_u32(vtcfl));
138 static_always_inline bool
139 ip4_map_ip6_lookup_bypass (vlib_buffer_t *p0, ip4_header_t *ip)
141 #ifdef MAP_SKIP_IP6_LOOKUP
142 map_main_t *mm = &map_main;
143 u32 adj_index0 = mm->adj6_index;
144 if (adj_index0 > 0) {
145 ip_lookup_main_t *lm6 = &ip6_main.lookup_main;
146 ip_adjacency_t *adj = ip_get_adjacency(lm6, mm->adj6_index);
147 if (adj->n_adj > 1) {
148 u32 hash_c0 = ip4_compute_flow_hash(ip, IP_FLOW_HASH_DEFAULT);
149 adj_index0 += (hash_c0 & (adj->n_adj - 1));
151 vnet_buffer(p0)->ip.adj_index[VLIB_TX] = adj_index0;
162 ip4_map (vlib_main_t *vm,
163 vlib_node_runtime_t *node,
166 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
167 vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_node.index);
168 from = vlib_frame_vector_args(frame);
169 n_left_from = frame->n_vectors;
170 next_index = node->cached_next_index;
171 map_main_t *mm = &map_main;
172 vlib_combined_counter_main_t *cm = mm->domain_counters;
173 u32 cpu_index = os_get_cpu_number();
175 while (n_left_from > 0) {
176 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
179 while (n_left_from > 4 && n_left_to_next > 2) {
181 vlib_buffer_t *p0, *p1;
182 map_domain_t *d0, *d1;
183 u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE;
184 ip4_header_t *ip40, *ip41;
185 u16 port0 = 0, port1 = 0;
186 ip6_header_t *ip6h0, *ip6h1;
187 u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
188 u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 = IP4_MAP_NEXT_IP6_LOOKUP;
190 /* Prefetch next iteration. */
192 vlib_buffer_t *p2, *p3;
194 p2 = vlib_get_buffer(vm, from[2]);
195 p3 = vlib_get_buffer(vm, from[3]);
197 vlib_prefetch_buffer_header(p2, STORE);
198 vlib_prefetch_buffer_header(p3, STORE);
199 /* IPv4 + 8 = 28. possibly plus -40 */
200 CLIB_PREFETCH (p2->data-40, 68, STORE);
201 CLIB_PREFETCH (p3->data-40, 68, STORE);
204 pi0 = to_next[0] = from[0];
205 pi1 = to_next[1] = from[1];
211 p0 = vlib_get_buffer(vm, pi0);
212 p1 = vlib_get_buffer(vm, pi1);
213 ip40 = vlib_buffer_get_current(p0);
214 ip41 = vlib_buffer_get_current(p1);
215 p0->current_length = clib_net_to_host_u16(ip40->length);
216 p1->current_length = clib_net_to_host_u16(ip41->length);
217 d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
218 d1 = ip4_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX], &map_domain_index1);
223 * Shared IPv4 address
225 port0 = ip4_map_port_and_security_check(d0, ip40, &next0, &error0);
226 port1 = ip4_map_port_and_security_check(d1, ip41, &next1, &error1);
229 u32 da40 = clib_net_to_host_u32(ip40->dst_address.as_u32);
230 u32 da41 = clib_net_to_host_u32(ip41->dst_address.as_u32);
231 u16 dp40 = clib_net_to_host_u16(port0);
232 u16 dp41 = clib_net_to_host_u16(port1);
233 u64 dal60 = map_get_pfx(d0, da40, dp40);
234 u64 dal61 = map_get_pfx(d1, da41, dp41);
235 u64 dar60 = map_get_sfx(d0, da40, dp40);
236 u64 dar61 = map_get_sfx(d1, da41, dp41);
237 if (dal60 == 0 && dar60 == 0) error0 = MAP_ERROR_UNKNOWN;
238 if (dal61 == 0 && dar61 == 0) error1 = MAP_ERROR_UNKNOWN;
240 /* construct ipv6 header */
241 vlib_buffer_advance(p0, - sizeof(ip6_header_t));
242 vlib_buffer_advance(p1, - sizeof(ip6_header_t));
243 ip6h0 = vlib_buffer_get_current(p0);
244 ip6h1 = vlib_buffer_get_current(p1);
245 vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0;
246 vnet_buffer(p1)->sw_if_index[VLIB_TX] = (u32)~0;
248 ip6h0->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip40, p0);
249 ip6h1->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip41, p1);
250 ip6h0->payload_length = ip40->length;
251 ip6h1->payload_length = ip41->length;
252 ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
253 ip6h1->protocol = IP_PROTOCOL_IP_IN_IP;
254 ip6h0->hop_limit = 0x40;
255 ip6h1->hop_limit = 0x40;
256 ip6h0->src_address = d0->ip6_src;
257 ip6h1->src_address = d1->ip6_src;
258 ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64(dal60);
259 ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64(dar60);
260 ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64(dal61);
261 ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64(dar61);
264 * Determine next node. Can be one of:
265 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
267 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
268 if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip6h0->payload_length) + sizeof(*ip6h0) > d0->mtu))) {
269 vnet_buffer(p0)->ip_frag.header_offset = sizeof(*ip6h0);
270 vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
271 vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
272 vnet_buffer(p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
273 next0 = IP4_MAP_NEXT_FRAGMENT;
275 next0 = ip4_map_ip6_lookup_bypass(p0, ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0;
276 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
277 clib_net_to_host_u16(ip6h0->payload_length) + 40);
280 next0 = IP4_MAP_NEXT_DROP;
284 * Determine next node. Can be one of:
285 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
287 if (PREDICT_TRUE(error1 == MAP_ERROR_NONE)) {
288 if (PREDICT_FALSE(d1->mtu && (clib_net_to_host_u16(ip6h1->payload_length) + sizeof(*ip6h1) > d1->mtu))) {
289 vnet_buffer(p1)->ip_frag.header_offset = sizeof(*ip6h1);
290 vnet_buffer(p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
291 vnet_buffer(p1)->ip_frag.mtu = d1->mtu;
292 vnet_buffer(p1)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
293 next1 = IP4_MAP_NEXT_FRAGMENT;
295 next1 = ip4_map_ip6_lookup_bypass(p1, ip41) ? IP4_MAP_NEXT_IP6_REWRITE : next1;
296 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index1, 1,
297 clib_net_to_host_u16(ip6h1->payload_length) + 40);
300 next1 = IP4_MAP_NEXT_DROP;
303 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
304 map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
305 tr->map_domain_index = map_domain_index0;
308 if (PREDICT_FALSE(p1->flags & VLIB_BUFFER_IS_TRACED)) {
309 map_trace_t *tr = vlib_add_trace(vm, node, p1, sizeof(*tr));
310 tr->map_domain_index = map_domain_index1;
314 p0->error = error_node->errors[error0];
315 p1->error = error_node->errors[error1];
317 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1);
320 while (n_left_from > 0 && n_left_to_next > 0) {
324 u8 error0 = MAP_ERROR_NONE;
328 u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP;
329 u32 map_domain_index0 = ~0;
331 pi0 = to_next[0] = from[0];
337 p0 = vlib_get_buffer(vm, pi0);
338 ip40 = vlib_buffer_get_current(p0);
339 p0->current_length = clib_net_to_host_u16(ip40->length);
340 d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
344 * Shared IPv4 address
346 port0 = ip4_map_port_and_security_check(d0, ip40, &next0, &error0);
349 u32 da40 = clib_net_to_host_u32(ip40->dst_address.as_u32);
350 u16 dp40 = clib_net_to_host_u16(port0);
351 u64 dal60 = map_get_pfx(d0, da40, dp40);
352 u64 dar60 = map_get_sfx(d0, da40, dp40);
353 if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE) error0 = MAP_ERROR_UNKNOWN;
355 /* construct ipv6 header */
356 vlib_buffer_advance(p0, - (sizeof(ip6_header_t)));
357 ip6h0 = vlib_buffer_get_current(p0);
358 vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0;
360 ip6h0->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip40, p0);
361 ip6h0->payload_length = ip40->length;
362 ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
363 ip6h0->hop_limit = 0x40;
364 ip6h0->src_address = d0->ip6_src;
365 ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64(dal60);
366 ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64(dar60);
369 * Determine next node. Can be one of:
370 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
372 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
373 if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip6h0->payload_length) + sizeof(*ip6h0) > d0->mtu))) {
374 vnet_buffer(p0)->ip_frag.header_offset = sizeof(*ip6h0);
375 vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
376 vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
377 vnet_buffer(p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
378 next0 = IP4_MAP_NEXT_FRAGMENT;
380 next0 = ip4_map_ip6_lookup_bypass(p0, ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0;
381 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
382 clib_net_to_host_u16(ip6h0->payload_length) + 40);
385 next0 = IP4_MAP_NEXT_DROP;
388 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
389 map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
390 tr->map_domain_index = map_domain_index0;
394 p0->error = error_node->errors[error0];
395 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
397 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
400 return frame->n_vectors;
407 ip4_map_reass (vlib_main_t *vm,
408 vlib_node_runtime_t *node,
411 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
412 vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_reass_node.index);
413 from = vlib_frame_vector_args(frame);
414 n_left_from = frame->n_vectors;
415 next_index = node->cached_next_index;
416 map_main_t *mm = &map_main;
417 vlib_combined_counter_main_t *cm = mm->domain_counters;
418 u32 cpu_index = os_get_cpu_number();
419 u32 *fragments_to_drop = NULL;
420 u32 *fragments_to_loopback = NULL;
422 while (n_left_from > 0) {
423 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
425 while (n_left_from > 0 && n_left_to_next > 0) {
429 u8 error0 = MAP_ERROR_NONE;
433 u32 next0 = IP4_MAP_REASS_NEXT_IP6_LOOKUP;
434 u32 map_domain_index0;
437 pi0 = to_next[0] = from[0];
443 p0 = vlib_get_buffer(vm, pi0);
444 ip60 = vlib_buffer_get_current(p0);
445 ip40 = (ip4_header_t *)(ip60 + 1);
446 d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
448 map_ip4_reass_lock();
449 map_ip4_reass_t *r = map_ip4_reass_get(ip40->src_address.as_u32, ip40->dst_address.as_u32,
450 ip40->fragment_id, ip40->protocol, &fragments_to_drop);
451 if (PREDICT_FALSE(!r)) {
452 // Could not create a caching entry
453 error0 = MAP_ERROR_FRAGMENT_MEMORY;
454 } else if (PREDICT_TRUE(ip4_get_fragment_offset(ip40))) {
456 // We know the port already
458 } else if (map_ip4_reass_add_fragment(r, pi0)) {
459 // Not enough space for caching
460 error0 = MAP_ERROR_FRAGMENT_MEMORY;
461 map_ip4_reass_free(r, &fragments_to_drop);
465 } else if ((port0 = ip4_get_port(ip40, MAP_RECEIVER, p0->current_length)) < 0) {
466 // Could not find port. We'll free the reassembly.
467 error0 = MAP_ERROR_BAD_PROTOCOL;
469 map_ip4_reass_free(r, &fragments_to_drop);
472 map_ip4_reass_get_fragments(r, &fragments_to_loopback);
475 #ifdef MAP_IP4_REASS_COUNT_BYTES
477 r->forwarded += clib_host_to_net_u16(ip40->length) - 20;
478 if (!ip4_get_fragment_more(ip40))
479 r->expected_total = ip4_get_fragment_offset(ip40) * 8 + clib_host_to_net_u16(ip40->length) - 20;
480 if(r->forwarded >= r->expected_total)
481 map_ip4_reass_free(r, &fragments_to_drop);
485 map_ip4_reass_unlock();
487 // NOTE: Most operations have already been performed by ip4_map
488 // All we need is the right destination address
489 ip60->dst_address.as_u64[0] = map_get_pfx_net(d0, ip40->dst_address.as_u32, port0);
490 ip60->dst_address.as_u64[1] = map_get_sfx_net(d0, ip40->dst_address.as_u32, port0);
492 if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip60->payload_length) + sizeof(*ip60) > d0->mtu))) {
493 vnet_buffer(p0)->ip_frag.header_offset = sizeof(*ip60);
494 vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
495 vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
496 vnet_buffer(p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
497 next0 = IP4_MAP_REASS_NEXT_IP4_FRAGMENT;
500 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
501 map_ip4_map_reass_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
502 tr->map_domain_index = map_domain_index0;
512 if (error0 == MAP_ERROR_NONE)
513 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
514 clib_net_to_host_u16(ip60->payload_length) + 40);
515 next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP;
516 p0->error = error_node->errors[error0];
517 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
520 //Loopback when we reach the end of the inpu vector
521 if(n_left_from == 0 && vec_len(fragments_to_loopback)) {
522 from = vlib_frame_vector_args(frame);
523 u32 len = vec_len(fragments_to_loopback);
524 if(len <= VLIB_FRAME_SIZE) {
525 memcpy(from, fragments_to_loopback, sizeof(u32)*len);
527 vec_reset_length(fragments_to_loopback);
529 memcpy(from, fragments_to_loopback + (len - VLIB_FRAME_SIZE), sizeof(u32)*VLIB_FRAME_SIZE);
530 n_left_from = VLIB_FRAME_SIZE;
531 _vec_len(fragments_to_loopback) = len - VLIB_FRAME_SIZE;
535 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
538 map_send_all_to_node(vm, fragments_to_drop, node,
539 &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
540 IP4_MAP_REASS_NEXT_DROP);
542 vec_free(fragments_to_drop);
543 vec_free(fragments_to_loopback);
544 return frame->n_vectors;
547 static char *map_error_strings[] = {
548 #define _(sym,string) string,
553 VLIB_REGISTER_NODE(ip4_map_node) = {
556 .vector_size = sizeof(u32),
557 .format_trace = format_map_trace,
558 .type = VLIB_NODE_TYPE_INTERNAL,
560 .n_errors = MAP_N_ERROR,
561 .error_strings = map_error_strings,
563 .n_next_nodes = IP4_MAP_N_NEXT,
565 [IP4_MAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
566 #ifdef MAP_SKIP_IP6_LOOKUP
567 [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-rewrite",
569 [IP4_MAP_NEXT_FRAGMENT] = "ip4-frag",
570 [IP4_MAP_NEXT_REASS] = "ip4-map-reass",
571 [IP4_MAP_NEXT_DROP] = "error-drop",
575 VLIB_REGISTER_NODE(ip4_map_reass_node) = {
576 .function = ip4_map_reass,
577 .name = "ip4-map-reass",
578 .vector_size = sizeof(u32),
579 .format_trace = format_ip4_map_reass_trace,
580 .type = VLIB_NODE_TYPE_INTERNAL,
582 .n_errors = MAP_N_ERROR,
583 .error_strings = map_error_strings,
585 .n_next_nodes = IP4_MAP_REASS_N_NEXT,
587 [IP4_MAP_REASS_NEXT_IP6_LOOKUP] = "ip6-lookup",
588 [IP4_MAP_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
589 [IP4_MAP_REASS_NEXT_DROP] = "error-drop",