2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * Defines used for testing various optimisation schemes
18 #define MAP_ENCAP_DUAL 0
21 #include "../ip/ip_frag.h"
23 vlib_node_registration_t ip4_map_reass_node;
26 IP4_MAP_NEXT_IP6_LOOKUP,
27 #ifdef MAP_SKIP_IP6_LOOKUP
28 IP4_MAP_NEXT_IP6_REWRITE,
30 IP4_MAP_NEXT_IP4_FRAGMENT,
31 IP4_MAP_NEXT_IP6_FRAGMENT,
33 IP4_MAP_NEXT_ICMP_ERROR,
38 enum ip4_map_reass_next_t {
39 IP4_MAP_REASS_NEXT_IP6_LOOKUP,
40 IP4_MAP_REASS_NEXT_IP4_FRAGMENT,
41 IP4_MAP_REASS_NEXT_DROP,
49 } map_ip4_map_reass_trace_t;
52 format_ip4_map_reass_trace (u8 *s, va_list *args)
54 CLIB_UNUSED(vlib_main_t *vm) = va_arg (*args, vlib_main_t *);
55 CLIB_UNUSED(vlib_node_t *node) = va_arg (*args, vlib_node_t *);
56 map_ip4_map_reass_trace_t *t = va_arg (*args, map_ip4_map_reass_trace_t *);
57 return format(s, "MAP domain index: %d L4 port: %u Status: %s", t->map_domain_index,
58 t->port, t->cached?"cached":"forwarded");
65 ip4_map_get_port (ip4_header_t *ip, map_dir_e dir)
67 /* Find port information */
68 if (PREDICT_TRUE((ip->protocol == IP_PROTOCOL_TCP) ||
69 (ip->protocol == IP_PROTOCOL_UDP))) {
70 udp_header_t *udp = (void *)(ip + 1);
71 return (dir == MAP_SENDER ? udp->src_port : udp->dst_port);
72 } else if (ip->protocol == IP_PROTOCOL_ICMP) {
74 * 1) ICMP Echo request or Echo reply
75 * 2) ICMP Error with inner packet being UDP or TCP
76 * 3) ICMP Error with inner packet being ICMP Echo request or Echo reply
78 icmp46_header_t *icmp = (void *)(ip + 1);
79 if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply) {
80 return *((u16 *)(icmp + 1));
81 } else if (clib_net_to_host_u16(ip->length) >= 56) { // IP + ICMP + IP + L4 header
82 ip4_header_t *icmp_ip = (ip4_header_t *)(icmp + 2);
83 if (PREDICT_TRUE((icmp_ip->protocol == IP_PROTOCOL_TCP) ||
84 (icmp_ip->protocol == IP_PROTOCOL_UDP))) {
85 udp_header_t *udp = (void *)(icmp_ip + 1);
86 return (dir == MAP_SENDER ? udp->dst_port : udp->src_port);
87 } else if (icmp_ip->protocol == IP_PROTOCOL_ICMP) {
88 icmp46_header_t *inner_icmp = (void *)(icmp_ip + 1);
89 if (inner_icmp->type == ICMP4_echo_request || inner_icmp->type == ICMP4_echo_reply)
90 return (*((u16 *)(inner_icmp + 1)));
97 static_always_inline u16
98 ip4_map_port_and_security_check (map_domain_t *d, ip4_header_t *ip, u32 *next, u8 *error)
102 if (d->psid_length > 0) {
103 if (!ip4_is_fragment(ip)) {
104 if (PREDICT_FALSE((ip->ip_version_and_header_length != 0x45) || clib_host_to_net_u16(ip->length) < 28)) {
107 port = ip4_map_get_port(ip, MAP_RECEIVER);
109 /* Verify that port is not among the well-known ports */
110 if ((d->psid_offset > 0) && (clib_net_to_host_u16(port) < (0x1 << (16 - d->psid_offset)))) {
111 *error = MAP_ERROR_ENCAP_SEC_CHECK;
116 *error = MAP_ERROR_BAD_PROTOCOL;
119 *next = IP4_MAP_NEXT_REASS;
128 static_always_inline u32
129 ip4_map_vtcfl (ip4_header_t *ip4, vlib_buffer_t *p)
131 map_main_t *mm = &map_main;
132 u8 tc = mm->tc_copy ? ip4->tos : mm->tc;
133 u32 vtcfl = 0x6 << 28;
135 vtcfl |= vnet_buffer(p)->ip.flow_hash & 0x000fffff;
137 return (clib_host_to_net_u32(vtcfl));
140 static_always_inline bool
141 ip4_map_ip6_lookup_bypass (vlib_buffer_t *p0, ip4_header_t *ip)
143 #ifdef MAP_SKIP_IP6_LOOKUP
144 map_main_t *mm = &map_main;
145 u32 adj_index0 = mm->adj6_index;
146 if (adj_index0 > 0) {
147 ip_lookup_main_t *lm6 = &ip6_main.lookup_main;
148 ip_adjacency_t *adj = ip_get_adjacency(lm6, mm->adj6_index);
149 if (adj->n_adj > 1) {
150 u32 hash_c0 = ip4_compute_flow_hash(ip, IP_FLOW_HASH_DEFAULT);
151 adj_index0 += (hash_c0 & (adj->n_adj - 1));
153 vnet_buffer(p0)->ip.adj_index[VLIB_TX] = adj_index0;
164 ip4_map_decrement_ttl (ip4_header_t *ip, u8 *error)
168 /* Input node should have reject packets with ttl 0. */
169 ASSERT (ip->ttl > 0);
171 u32 checksum = ip->checksum + clib_host_to_net_u16(0x0100);
172 checksum += checksum >= 0xffff;
173 ip->checksum = checksum;
176 *error = ttl <= 0 ? IP4_ERROR_TIME_EXPIRED : *error;
178 /* Verify checksum. */
179 ASSERT (ip->checksum == ip4_header_checksum(ip));
183 ip4_map_fragment (vlib_buffer_t *b, u16 mtu, bool df, u8 *error)
185 map_main_t *mm = &map_main;
187 if (mm->frag_inner) {
188 ip_frag_set_vnet_buffer(b, sizeof(ip6_header_t), mtu, IP4_FRAG_NEXT_IP6_LOOKUP, IP_FRAG_FLAG_IP6_HEADER);
189 return (IP4_MAP_NEXT_IP4_FRAGMENT);
191 if (df && !mm->frag_ignore_df) {
192 icmp4_error_set_vnet_buffer(b, ICMP4_destination_unreachable,
193 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set, mtu);
194 vlib_buffer_advance(b, sizeof(ip6_header_t));
195 *error = MAP_ERROR_DF_SET;
196 return (IP4_MAP_NEXT_ICMP_ERROR);
198 ip_frag_set_vnet_buffer(b, 0, mtu, IP6_FRAG_NEXT_IP6_LOOKUP, IP_FRAG_FLAG_IP6_HEADER);
199 return (IP4_MAP_NEXT_IP6_FRAGMENT);
207 ip4_map (vlib_main_t *vm,
208 vlib_node_runtime_t *node,
211 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
212 vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_node.index);
213 from = vlib_frame_vector_args(frame);
214 n_left_from = frame->n_vectors;
215 next_index = node->cached_next_index;
216 map_main_t *mm = &map_main;
217 vlib_combined_counter_main_t *cm = mm->domain_counters;
218 u32 cpu_index = os_get_cpu_number();
220 while (n_left_from > 0) {
221 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
224 while (n_left_from >= 4 && n_left_to_next >= 2) {
226 vlib_buffer_t *p0, *p1;
227 map_domain_t *d0, *d1;
228 u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE;
229 ip4_header_t *ip40, *ip41;
230 u16 port0 = 0, port1 = 0;
231 ip6_header_t *ip6h0, *ip6h1;
232 u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
233 u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 = IP4_MAP_NEXT_IP6_LOOKUP;
235 /* Prefetch next iteration. */
237 vlib_buffer_t *p2, *p3;
239 p2 = vlib_get_buffer(vm, from[2]);
240 p3 = vlib_get_buffer(vm, from[3]);
242 vlib_prefetch_buffer_header(p2, STORE);
243 vlib_prefetch_buffer_header(p3, STORE);
244 /* IPv4 + 8 = 28. possibly plus -40 */
245 CLIB_PREFETCH (p2->data-40, 68, STORE);
246 CLIB_PREFETCH (p3->data-40, 68, STORE);
249 pi0 = to_next[0] = from[0];
250 pi1 = to_next[1] = from[1];
256 p0 = vlib_get_buffer(vm, pi0);
257 p1 = vlib_get_buffer(vm, pi1);
258 ip40 = vlib_buffer_get_current(p0);
259 ip41 = vlib_buffer_get_current(p1);
260 d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
261 d1 = ip4_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX], &map_domain_index1);
266 * Shared IPv4 address
268 port0 = ip4_map_port_and_security_check(d0, ip40, &next0, &error0);
269 port1 = ip4_map_port_and_security_check(d1, ip41, &next1, &error1);
271 /* Decrement IPv4 TTL */
272 ip4_map_decrement_ttl(ip40, &error0);
273 ip4_map_decrement_ttl(ip41, &error1);
274 bool df0 = ip40->flags_and_fragment_offset & clib_host_to_net_u16(IP4_HEADER_FLAG_DONT_FRAGMENT);
275 bool df1 = ip41->flags_and_fragment_offset & clib_host_to_net_u16(IP4_HEADER_FLAG_DONT_FRAGMENT);
278 u32 da40 = clib_net_to_host_u32(ip40->dst_address.as_u32);
279 u32 da41 = clib_net_to_host_u32(ip41->dst_address.as_u32);
280 u16 dp40 = clib_net_to_host_u16(port0);
281 u16 dp41 = clib_net_to_host_u16(port1);
282 u64 dal60 = map_get_pfx(d0, da40, dp40);
283 u64 dal61 = map_get_pfx(d1, da41, dp41);
284 u64 dar60 = map_get_sfx(d0, da40, dp40);
285 u64 dar61 = map_get_sfx(d1, da41, dp41);
286 if (dal60 == 0 && dar60 == 0) error0 = MAP_ERROR_NO_BINDING;
287 if (dal61 == 0 && dar61 == 0) error1 = MAP_ERROR_NO_BINDING;
289 /* construct ipv6 header */
290 vlib_buffer_advance(p0, - sizeof(ip6_header_t));
291 vlib_buffer_advance(p1, - sizeof(ip6_header_t));
292 ip6h0 = vlib_buffer_get_current(p0);
293 ip6h1 = vlib_buffer_get_current(p1);
294 vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0;
295 vnet_buffer(p1)->sw_if_index[VLIB_TX] = (u32)~0;
297 ip6h0->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip40, p0);
298 ip6h1->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip41, p1);
299 ip6h0->payload_length = ip40->length;
300 ip6h1->payload_length = ip41->length;
301 ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
302 ip6h1->protocol = IP_PROTOCOL_IP_IN_IP;
303 ip6h0->hop_limit = 0x40;
304 ip6h1->hop_limit = 0x40;
305 ip6h0->src_address = d0->ip6_src;
306 ip6h1->src_address = d1->ip6_src;
307 ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64(dal60);
308 ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64(dar60);
309 ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64(dal61);
310 ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64(dar61);
313 * Determine next node. Can be one of:
314 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
316 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
317 if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip6h0->payload_length) + sizeof(*ip6h0) > d0->mtu))) {
318 next0 = ip4_map_fragment(p0, d0->mtu, df0, &error0);
320 next0 = ip4_map_ip6_lookup_bypass(p0, ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0;
321 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
322 clib_net_to_host_u16(ip6h0->payload_length) + 40);
325 next0 = IP4_MAP_NEXT_DROP;
329 * Determine next node. Can be one of:
330 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
332 if (PREDICT_TRUE(error1 == MAP_ERROR_NONE)) {
333 if (PREDICT_FALSE(d1->mtu && (clib_net_to_host_u16(ip6h1->payload_length) + sizeof(*ip6h1) > d1->mtu))) {
334 next1 = ip4_map_fragment(p1, d1->mtu, df1, &error1);
336 next1 = ip4_map_ip6_lookup_bypass(p1, ip41) ? IP4_MAP_NEXT_IP6_REWRITE : next1;
337 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index1, 1,
338 clib_net_to_host_u16(ip6h1->payload_length) + 40);
341 next1 = IP4_MAP_NEXT_DROP;
344 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
345 map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
346 tr->map_domain_index = map_domain_index0;
349 if (PREDICT_FALSE(p1->flags & VLIB_BUFFER_IS_TRACED)) {
350 map_trace_t *tr = vlib_add_trace(vm, node, p1, sizeof(*tr));
351 tr->map_domain_index = map_domain_index1;
355 p0->error = error_node->errors[error0];
356 p1->error = error_node->errors[error1];
358 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1);
361 while (n_left_from > 0 && n_left_to_next > 0) {
365 u8 error0 = MAP_ERROR_NONE;
369 u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP;
370 u32 map_domain_index0 = ~0;
372 pi0 = to_next[0] = from[0];
378 p0 = vlib_get_buffer(vm, pi0);
379 ip40 = vlib_buffer_get_current(p0);
380 d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
384 * Shared IPv4 address
386 port0 = ip4_map_port_and_security_check(d0, ip40, &next0, &error0);
388 /* Decrement IPv4 TTL */
389 ip4_map_decrement_ttl(ip40, &error0);
390 bool df0 = ip40->flags_and_fragment_offset & clib_host_to_net_u16(IP4_HEADER_FLAG_DONT_FRAGMENT);
393 u32 da40 = clib_net_to_host_u32(ip40->dst_address.as_u32);
394 u16 dp40 = clib_net_to_host_u16(port0);
395 u64 dal60 = map_get_pfx(d0, da40, dp40);
396 u64 dar60 = map_get_sfx(d0, da40, dp40);
397 if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE) error0 = MAP_ERROR_NO_BINDING;
399 /* construct ipv6 header */
400 vlib_buffer_advance(p0, - (sizeof(ip6_header_t)));
401 ip6h0 = vlib_buffer_get_current(p0);
402 vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0;
404 ip6h0->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip40, p0);
405 ip6h0->payload_length = ip40->length;
406 ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
407 ip6h0->hop_limit = 0x40;
408 ip6h0->src_address = d0->ip6_src;
409 ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64(dal60);
410 ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64(dar60);
413 * Determine next node. Can be one of:
414 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
416 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
417 if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip6h0->payload_length) + sizeof(*ip6h0) > d0->mtu))) {
418 next0 = ip4_map_fragment(p0, d0->mtu, df0, &error0);
420 next0 = ip4_map_ip6_lookup_bypass(p0, ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0;
421 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
422 clib_net_to_host_u16(ip6h0->payload_length) + 40);
425 next0 = IP4_MAP_NEXT_DROP;
428 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
429 map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
430 tr->map_domain_index = map_domain_index0;
434 p0->error = error_node->errors[error0];
435 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
437 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
440 return frame->n_vectors;
447 ip4_map_reass (vlib_main_t *vm,
448 vlib_node_runtime_t *node,
451 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
452 vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_reass_node.index);
453 from = vlib_frame_vector_args(frame);
454 n_left_from = frame->n_vectors;
455 next_index = node->cached_next_index;
456 map_main_t *mm = &map_main;
457 vlib_combined_counter_main_t *cm = mm->domain_counters;
458 u32 cpu_index = os_get_cpu_number();
459 u32 *fragments_to_drop = NULL;
460 u32 *fragments_to_loopback = NULL;
462 while (n_left_from > 0) {
463 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
465 while (n_left_from > 0 && n_left_to_next > 0) {
469 u8 error0 = MAP_ERROR_NONE;
473 u32 next0 = IP4_MAP_REASS_NEXT_IP6_LOOKUP;
474 u32 map_domain_index0;
477 pi0 = to_next[0] = from[0];
483 p0 = vlib_get_buffer(vm, pi0);
484 ip60 = vlib_buffer_get_current(p0);
485 ip40 = (ip4_header_t *)(ip60 + 1);
486 d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
488 map_ip4_reass_lock();
489 map_ip4_reass_t *r = map_ip4_reass_get(ip40->src_address.as_u32, ip40->dst_address.as_u32,
490 ip40->fragment_id, ip40->protocol, &fragments_to_drop);
491 if (PREDICT_FALSE(!r)) {
492 // Could not create a caching entry
493 error0 = MAP_ERROR_FRAGMENT_MEMORY;
494 } else if (PREDICT_TRUE(ip4_get_fragment_offset(ip40))) {
496 // We know the port already
498 } else if (map_ip4_reass_add_fragment(r, pi0)) {
499 // Not enough space for caching
500 error0 = MAP_ERROR_FRAGMENT_MEMORY;
501 map_ip4_reass_free(r, &fragments_to_drop);
505 } else if ((port0 = ip4_get_port(ip40, MAP_RECEIVER, p0->current_length)) < 0) {
506 // Could not find port. We'll free the reassembly.
507 error0 = MAP_ERROR_BAD_PROTOCOL;
509 map_ip4_reass_free(r, &fragments_to_drop);
512 map_ip4_reass_get_fragments(r, &fragments_to_loopback);
515 #ifdef MAP_IP4_REASS_COUNT_BYTES
517 r->forwarded += clib_host_to_net_u16(ip40->length) - 20;
518 if (!ip4_get_fragment_more(ip40))
519 r->expected_total = ip4_get_fragment_offset(ip40) * 8 + clib_host_to_net_u16(ip40->length) - 20;
520 if(r->forwarded >= r->expected_total)
521 map_ip4_reass_free(r, &fragments_to_drop);
525 map_ip4_reass_unlock();
527 // NOTE: Most operations have already been performed by ip4_map
528 // All we need is the right destination address
529 ip60->dst_address.as_u64[0] = map_get_pfx_net(d0, ip40->dst_address.as_u32, port0);
530 ip60->dst_address.as_u64[1] = map_get_sfx_net(d0, ip40->dst_address.as_u32, port0);
532 if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip60->payload_length) + sizeof(*ip60) > d0->mtu))) {
533 vnet_buffer(p0)->ip_frag.header_offset = sizeof(*ip60);
534 vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
535 vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
536 vnet_buffer(p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
537 next0 = IP4_MAP_REASS_NEXT_IP4_FRAGMENT;
540 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
541 map_ip4_map_reass_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
542 tr->map_domain_index = map_domain_index0;
552 if (error0 == MAP_ERROR_NONE)
553 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
554 clib_net_to_host_u16(ip60->payload_length) + 40);
555 next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP;
556 p0->error = error_node->errors[error0];
557 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
560 //Loopback when we reach the end of the inpu vector
561 if(n_left_from == 0 && vec_len(fragments_to_loopback)) {
562 from = vlib_frame_vector_args(frame);
563 u32 len = vec_len(fragments_to_loopback);
564 if(len <= VLIB_FRAME_SIZE) {
565 memcpy(from, fragments_to_loopback, sizeof(u32)*len);
567 vec_reset_length(fragments_to_loopback);
569 memcpy(from, fragments_to_loopback + (len - VLIB_FRAME_SIZE), sizeof(u32)*VLIB_FRAME_SIZE);
570 n_left_from = VLIB_FRAME_SIZE;
571 _vec_len(fragments_to_loopback) = len - VLIB_FRAME_SIZE;
575 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
578 map_send_all_to_node(vm, fragments_to_drop, node,
579 &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
580 IP4_MAP_REASS_NEXT_DROP);
582 vec_free(fragments_to_drop);
583 vec_free(fragments_to_loopback);
584 return frame->n_vectors;
587 static char *map_error_strings[] = {
588 #define _(sym,string) string,
593 VLIB_REGISTER_NODE(ip4_map_node) = {
596 .vector_size = sizeof(u32),
597 .format_trace = format_map_trace,
598 .type = VLIB_NODE_TYPE_INTERNAL,
600 .n_errors = MAP_N_ERROR,
601 .error_strings = map_error_strings,
603 .n_next_nodes = IP4_MAP_N_NEXT,
605 [IP4_MAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
606 #ifdef MAP_SKIP_IP6_LOOKUP
607 [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-rewrite",
609 [IP4_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
610 [IP4_MAP_NEXT_IP6_FRAGMENT] = "ip6-frag",
611 [IP4_MAP_NEXT_REASS] = "ip4-map-reass",
612 [IP4_MAP_NEXT_ICMP_ERROR] = "ip4-icmp-error",
613 [IP4_MAP_NEXT_DROP] = "error-drop",
617 VLIB_REGISTER_NODE(ip4_map_reass_node) = {
618 .function = ip4_map_reass,
619 .name = "ip4-map-reass",
620 .vector_size = sizeof(u32),
621 .format_trace = format_ip4_map_reass_trace,
622 .type = VLIB_NODE_TYPE_INTERNAL,
624 .n_errors = MAP_N_ERROR,
625 .error_strings = map_error_strings,
627 .n_next_nodes = IP4_MAP_REASS_N_NEXT,
629 [IP4_MAP_REASS_NEXT_IP6_LOOKUP] = "ip6-lookup",
630 [IP4_MAP_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
631 [IP4_MAP_REASS_NEXT_DROP] = "error-drop",