2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * Defines used for testing various optimisation schemes
20 #include <vnet/ip/ip_frag.h>
21 #include <vnet/ip/ip4_to_ip6.h>
23 vlib_node_registration_t ip4_map_reass_node;
27 IP4_MAP_NEXT_IP6_LOOKUP,
28 #ifdef MAP_SKIP_IP6_LOOKUP
29 IP4_MAP_NEXT_IP6_REWRITE,
31 IP4_MAP_NEXT_IP4_FRAGMENT,
32 IP4_MAP_NEXT_IP6_FRAGMENT,
34 IP4_MAP_NEXT_ICMP_ERROR,
39 enum ip4_map_reass_next_t
41 IP4_MAP_REASS_NEXT_IP6_LOOKUP,
42 IP4_MAP_REASS_NEXT_IP4_FRAGMENT,
43 IP4_MAP_REASS_NEXT_DROP,
52 } map_ip4_map_reass_trace_t;
55 format_ip4_map_reass_trace (u8 * s, va_list * args)
57 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
59 map_ip4_map_reass_trace_t *t = va_arg (*args, map_ip4_map_reass_trace_t *);
60 return format (s, "MAP domain index: %d L4 port: %u Status: %s",
61 t->map_domain_index, t->port,
62 t->cached ? "cached" : "forwarded");
65 static_always_inline u16
66 ip4_map_port_and_security_check (map_domain_t * d, ip4_header_t * ip,
67 u32 * next, u8 * error)
71 if (d->psid_length > 0)
73 if (ip4_get_fragment_offset (ip) == 0)
76 ((ip->ip_version_and_header_length != 0x45)
77 || clib_host_to_net_u16 (ip->length) < 28))
81 port = ip4_get_port (ip, 0);
84 /* Verify that port is not among the well-known ports */
85 if ((d->psid_offset > 0)
86 && (clib_net_to_host_u16 (port) <
87 (0x1 << (16 - d->psid_offset))))
89 *error = MAP_ERROR_ENCAP_SEC_CHECK;
93 if (ip4_get_fragment_more (ip))
94 *next = IP4_MAP_NEXT_REASS;
100 *error = MAP_ERROR_BAD_PROTOCOL;
105 *next = IP4_MAP_NEXT_REASS;
114 static_always_inline u32
115 ip4_map_vtcfl (ip4_header_t * ip4, vlib_buffer_t * p)
117 map_main_t *mm = &map_main;
118 u8 tc = mm->tc_copy ? ip4->tos : mm->tc;
119 u32 vtcfl = 0x6 << 28;
121 vtcfl |= vnet_buffer (p)->ip.flow_hash & 0x000fffff;
123 return (clib_host_to_net_u32 (vtcfl));
126 static_always_inline bool
127 ip4_map_ip6_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
129 #ifdef MAP_SKIP_IP6_LOOKUP
130 if (FIB_NODE_INDEX_INVALID != pre_resolved[FIB_PROTOCOL_IP6].fei)
132 vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
133 pre_resolved[FIB_PROTOCOL_IP6].dpo.dpoi_index;
144 ip4_map_decrement_ttl (ip4_header_t * ip, u8 * error)
148 /* Input node should have reject packets with ttl 0. */
149 ASSERT (ip->ttl > 0);
151 u32 checksum = ip->checksum + clib_host_to_net_u16 (0x0100);
152 checksum += checksum >= 0xffff;
153 ip->checksum = checksum;
156 *error = ttl <= 0 ? IP4_ERROR_TIME_EXPIRED : *error;
158 /* Verify checksum. */
159 ASSERT (ip->checksum == ip4_header_checksum (ip));
163 ip4_map_fragment (vlib_buffer_t * b, u16 mtu, bool df, u8 * error)
165 map_main_t *mm = &map_main;
169 // TODO: Fix inner fragmentation after removed inner support from ip-frag.
170 ip_frag_set_vnet_buffer (b, /*sizeof (ip6_header_t), */ mtu,
171 IP4_FRAG_NEXT_IP6_LOOKUP,
172 IP_FRAG_FLAG_IP6_HEADER);
173 return (IP4_MAP_NEXT_IP4_FRAGMENT);
177 if (df && !mm->frag_ignore_df)
179 icmp4_error_set_vnet_buffer (b, ICMP4_destination_unreachable,
180 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
182 vlib_buffer_advance (b, sizeof (ip6_header_t));
183 *error = MAP_ERROR_DF_SET;
184 return (IP4_MAP_NEXT_ICMP_ERROR);
186 ip_frag_set_vnet_buffer (b, mtu, IP6_FRAG_NEXT_IP6_LOOKUP,
187 IP_FRAG_FLAG_IP6_HEADER);
188 return (IP4_MAP_NEXT_IP6_FRAGMENT);
196 ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
198 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
199 vlib_node_runtime_t *error_node =
200 vlib_node_get_runtime (vm, ip4_map_node.index);
201 from = vlib_frame_vector_args (frame);
202 n_left_from = frame->n_vectors;
203 next_index = node->cached_next_index;
204 map_main_t *mm = &map_main;
205 vlib_combined_counter_main_t *cm = mm->domain_counters;
206 u32 thread_index = vm->thread_index;
208 while (n_left_from > 0)
210 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
213 while (n_left_from >= 4 && n_left_to_next >= 2)
216 vlib_buffer_t *p0, *p1;
217 map_domain_t *d0, *d1;
218 u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE;
219 ip4_header_t *ip40, *ip41;
220 u16 port0 = 0, port1 = 0;
221 ip6_header_t *ip6h0, *ip6h1;
222 u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
223 u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 =
224 IP4_MAP_NEXT_IP6_LOOKUP;
226 /* Prefetch next iteration. */
228 vlib_buffer_t *p2, *p3;
230 p2 = vlib_get_buffer (vm, from[2]);
231 p3 = vlib_get_buffer (vm, from[3]);
233 vlib_prefetch_buffer_header (p2, STORE);
234 vlib_prefetch_buffer_header (p3, STORE);
235 /* IPv4 + 8 = 28. possibly plus -40 */
236 CLIB_PREFETCH (p2->data - 40, 68, STORE);
237 CLIB_PREFETCH (p3->data - 40, 68, STORE);
240 pi0 = to_next[0] = from[0];
241 pi1 = to_next[1] = from[1];
247 p0 = vlib_get_buffer (vm, pi0);
248 p1 = vlib_get_buffer (vm, pi1);
249 ip40 = vlib_buffer_get_current (p0);
250 ip41 = vlib_buffer_get_current (p1);
252 ip4_map_get_domain (&ip40->dst_address, &map_domain_index0,
255 ip4_map_get_domain (&ip41->dst_address, &map_domain_index1,
259 * Shared IPv4 address
261 port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
262 port1 = ip4_map_port_and_security_check (d1, ip41, &next1, &error1);
264 /* Decrement IPv4 TTL */
265 ip4_map_decrement_ttl (ip40, &error0);
266 ip4_map_decrement_ttl (ip41, &error1);
268 ip40->flags_and_fragment_offset &
269 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
271 ip41->flags_and_fragment_offset &
272 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
275 u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
276 u32 da41 = clib_net_to_host_u32 (ip41->dst_address.as_u32);
277 u16 dp40 = clib_net_to_host_u16 (port0);
278 u16 dp41 = clib_net_to_host_u16 (port1);
279 u64 dal60 = map_get_pfx (d0, da40, dp40);
280 u64 dal61 = map_get_pfx (d1, da41, dp41);
281 u64 dar60 = map_get_sfx (d0, da40, dp40);
282 u64 dar61 = map_get_sfx (d1, da41, dp41);
283 if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
284 && next0 != IP4_MAP_NEXT_REASS)
285 error0 = MAP_ERROR_NO_BINDING;
286 if (dal61 == 0 && dar61 == 0 && error1 == MAP_ERROR_NONE
287 && next1 != IP4_MAP_NEXT_REASS)
288 error1 = MAP_ERROR_NO_BINDING;
290 /* construct ipv6 header */
291 vlib_buffer_advance (p0, -sizeof (ip6_header_t));
292 vlib_buffer_advance (p1, -sizeof (ip6_header_t));
293 ip6h0 = vlib_buffer_get_current (p0);
294 ip6h1 = vlib_buffer_get_current (p1);
295 vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
296 vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
298 ip6h0->ip_version_traffic_class_and_flow_label =
299 ip4_map_vtcfl (ip40, p0);
300 ip6h1->ip_version_traffic_class_and_flow_label =
301 ip4_map_vtcfl (ip41, p1);
302 ip6h0->payload_length = ip40->length;
303 ip6h1->payload_length = ip41->length;
304 ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
305 ip6h1->protocol = IP_PROTOCOL_IP_IN_IP;
306 ip6h0->hop_limit = 0x40;
307 ip6h1->hop_limit = 0x40;
308 ip6h0->src_address = d0->ip6_src;
309 ip6h1->src_address = d1->ip6_src;
310 ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
311 ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
312 ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64 (dal61);
313 ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64 (dar61);
316 * Determine next node. Can be one of:
317 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
319 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
323 && (clib_net_to_host_u16 (ip6h0->payload_length) +
324 sizeof (*ip6h0) > d0->mtu)))
326 next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
331 ip4_map_ip6_lookup_bypass (p0,
333 IP4_MAP_NEXT_IP6_REWRITE : next0;
334 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
336 map_domain_index0, 1,
338 (ip6h0->payload_length) +
344 next0 = IP4_MAP_NEXT_DROP;
348 * Determine next node. Can be one of:
349 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
351 if (PREDICT_TRUE (error1 == MAP_ERROR_NONE))
355 && (clib_net_to_host_u16 (ip6h1->payload_length) +
356 sizeof (*ip6h1) > d1->mtu)))
358 next1 = ip4_map_fragment (p1, d1->mtu, df1, &error1);
363 ip4_map_ip6_lookup_bypass (p1,
365 IP4_MAP_NEXT_IP6_REWRITE : next1;
366 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
368 map_domain_index1, 1,
370 (ip6h1->payload_length) +
376 next1 = IP4_MAP_NEXT_DROP;
379 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
381 map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
382 tr->map_domain_index = map_domain_index0;
385 if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
387 map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
388 tr->map_domain_index = map_domain_index1;
392 p0->error = error_node->errors[error0];
393 p1->error = error_node->errors[error1];
395 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
396 n_left_to_next, pi0, pi1, next0,
400 while (n_left_from > 0 && n_left_to_next > 0)
405 u8 error0 = MAP_ERROR_NONE;
409 u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP;
410 u32 map_domain_index0 = ~0;
412 pi0 = to_next[0] = from[0];
418 p0 = vlib_get_buffer (vm, pi0);
419 ip40 = vlib_buffer_get_current (p0);
422 ip4_map_get_domain (&ip40->dst_address, &map_domain_index0,
425 { /* Guess it wasn't for us */
426 vnet_feature_next (&next0, p0);
431 * Shared IPv4 address
433 port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0);
435 /* Decrement IPv4 TTL */
436 ip4_map_decrement_ttl (ip40, &error0);
438 ip40->flags_and_fragment_offset &
439 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT);
442 u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32);
443 u16 dp40 = clib_net_to_host_u16 (port0);
444 u64 dal60 = map_get_pfx (d0, da40, dp40);
445 u64 dar60 = map_get_sfx (d0, da40, dp40);
446 if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE
447 && next0 != IP4_MAP_NEXT_REASS)
448 error0 = MAP_ERROR_NO_BINDING;
450 /* construct ipv6 header */
451 vlib_buffer_advance (p0, -(sizeof (ip6_header_t)));
452 ip6h0 = vlib_buffer_get_current (p0);
453 vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
455 ip6h0->ip_version_traffic_class_and_flow_label =
456 ip4_map_vtcfl (ip40, p0);
457 ip6h0->payload_length = ip40->length;
458 ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
459 ip6h0->hop_limit = 0x40;
460 ip6h0->src_address = d0->ip6_src;
461 ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60);
462 ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60);
465 * Determine next node. Can be one of:
466 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
468 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
472 && (clib_net_to_host_u16 (ip6h0->payload_length) +
473 sizeof (*ip6h0) > d0->mtu)))
475 next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0);
480 ip4_map_ip6_lookup_bypass (p0,
482 IP4_MAP_NEXT_IP6_REWRITE : next0;
483 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
485 map_domain_index0, 1,
487 (ip6h0->payload_length) +
493 next0 = IP4_MAP_NEXT_DROP;
496 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
498 map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
499 tr->map_domain_index = map_domain_index0;
503 p0->error = error_node->errors[error0];
505 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
506 n_left_to_next, pi0, next0);
508 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
511 return frame->n_vectors;
518 ip4_map_reass (vlib_main_t * vm,
519 vlib_node_runtime_t * node, vlib_frame_t * frame)
521 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
522 vlib_node_runtime_t *error_node =
523 vlib_node_get_runtime (vm, ip4_map_reass_node.index);
524 from = vlib_frame_vector_args (frame);
525 n_left_from = frame->n_vectors;
526 next_index = node->cached_next_index;
527 map_main_t *mm = &map_main;
528 vlib_combined_counter_main_t *cm = mm->domain_counters;
529 u32 thread_index = vm->thread_index;
530 u32 *fragments_to_drop = NULL;
531 u32 *fragments_to_loopback = NULL;
533 while (n_left_from > 0)
535 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
537 while (n_left_from > 0 && n_left_to_next > 0)
542 u8 error0 = MAP_ERROR_NONE;
546 u32 next0 = IP4_MAP_REASS_NEXT_IP6_LOOKUP;
547 u32 map_domain_index0 = ~0;
550 pi0 = to_next[0] = from[0];
556 p0 = vlib_get_buffer (vm, pi0);
557 ip60 = vlib_buffer_get_current (p0);
558 ip40 = (ip4_header_t *) (ip60 + 1);
560 ip4_map_get_domain (&ip40->dst_address, &map_domain_index0,
563 map_ip4_reass_lock ();
564 map_ip4_reass_t *r = map_ip4_reass_get (ip40->src_address.as_u32,
565 ip40->dst_address.as_u32,
569 if (PREDICT_FALSE (!r))
571 // Could not create a caching entry
572 error0 = MAP_ERROR_FRAGMENT_MEMORY;
574 else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
578 // We know the port already
581 else if (map_ip4_reass_add_fragment (r, pi0))
583 // Not enough space for caching
584 error0 = MAP_ERROR_FRAGMENT_MEMORY;
585 map_ip4_reass_free (r, &fragments_to_drop);
592 else if ((port0 = ip4_get_port (ip40, 0)) == 0)
594 // Could not find port. We'll free the reassembly.
595 error0 = MAP_ERROR_BAD_PROTOCOL;
597 map_ip4_reass_free (r, &fragments_to_drop);
602 map_ip4_reass_get_fragments (r, &fragments_to_loopback);
605 #ifdef MAP_IP4_REASS_COUNT_BYTES
608 r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
609 if (!ip4_get_fragment_more (ip40))
611 ip4_get_fragment_offset (ip40) * 8 +
612 clib_host_to_net_u16 (ip40->length) - 20;
613 if (r->forwarded >= r->expected_total)
614 map_ip4_reass_free (r, &fragments_to_drop);
618 map_ip4_reass_unlock ();
620 // NOTE: Most operations have already been performed by ip4_map
621 // All we need is the right destination address
622 ip60->dst_address.as_u64[0] =
623 map_get_pfx_net (d0, ip40->dst_address.as_u32, port0);
624 ip60->dst_address.as_u64[1] =
625 map_get_sfx_net (d0, ip40->dst_address.as_u32, port0);
629 && (clib_net_to_host_u16 (ip60->payload_length) +
630 sizeof (*ip60) > d0->mtu)))
632 // TODO: vnet_buffer (p0)->ip_frag.header_offset = sizeof (*ip60);
633 vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
634 vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
635 vnet_buffer (p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
636 next0 = IP4_MAP_REASS_NEXT_IP4_FRAGMENT;
639 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
641 map_ip4_map_reass_trace_t *tr =
642 vlib_add_trace (vm, node, p0, sizeof (*tr));
643 tr->map_domain_index = map_domain_index0;
656 if (error0 == MAP_ERROR_NONE)
657 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
659 map_domain_index0, 1,
661 (ip60->payload_length) + 40);
663 (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP;
664 p0->error = error_node->errors[error0];
665 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
666 n_left_to_next, pi0, next0);
669 //Loopback when we reach the end of the inpu vector
670 if (n_left_from == 0 && vec_len (fragments_to_loopback))
672 from = vlib_frame_vector_args (frame);
673 u32 len = vec_len (fragments_to_loopback);
674 if (len <= VLIB_FRAME_SIZE)
676 clib_memcpy_fast (from, fragments_to_loopback,
679 vec_reset_length (fragments_to_loopback);
683 clib_memcpy_fast (from, fragments_to_loopback +
684 (len - VLIB_FRAME_SIZE),
685 sizeof (u32) * VLIB_FRAME_SIZE);
686 n_left_from = VLIB_FRAME_SIZE;
687 _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
691 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
694 map_send_all_to_node (vm, fragments_to_drop, node,
695 &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
696 IP4_MAP_REASS_NEXT_DROP);
698 vec_free (fragments_to_drop);
699 vec_free (fragments_to_loopback);
700 return frame->n_vectors;
703 static char *map_error_strings[] = {
704 #define _(sym,string) string,
711 VNET_FEATURE_INIT (ip4_map_feature, static) =
713 .arc_name = "ip4-unicast",
714 .node_name = "ip4-map",
716 VNET_FEATURES ("ip4-flow-classify"),
719 VLIB_REGISTER_NODE(ip4_map_node) = {
722 .vector_size = sizeof(u32),
723 .format_trace = format_map_trace,
724 .type = VLIB_NODE_TYPE_INTERNAL,
726 .n_errors = MAP_N_ERROR,
727 .error_strings = map_error_strings,
729 .n_next_nodes = IP4_MAP_N_NEXT,
731 [IP4_MAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
732 #ifdef MAP_SKIP_IP6_LOOKUP
733 [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-load-balance",
735 [IP4_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
736 [IP4_MAP_NEXT_IP6_FRAGMENT] = "ip6-frag",
737 [IP4_MAP_NEXT_REASS] = "ip4-map-reass",
738 [IP4_MAP_NEXT_ICMP_ERROR] = "ip4-icmp-error",
739 [IP4_MAP_NEXT_DROP] = "error-drop",
745 VLIB_REGISTER_NODE(ip4_map_reass_node) = {
746 .function = ip4_map_reass,
747 .name = "ip4-map-reass",
748 .vector_size = sizeof(u32),
749 .format_trace = format_ip4_map_reass_trace,
750 .type = VLIB_NODE_TYPE_INTERNAL,
752 .n_errors = MAP_N_ERROR,
753 .error_strings = map_error_strings,
755 .n_next_nodes = IP4_MAP_REASS_N_NEXT,
757 [IP4_MAP_REASS_NEXT_IP6_LOOKUP] = "ip6-lookup",
758 [IP4_MAP_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
759 [IP4_MAP_REASS_NEXT_DROP] = "error-drop",
765 * fd.io coding-style-patch-verification: ON
768 * eval: (c-set-style "gnu")