2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * Defines used for testing various optimisation schemes
18 #define MAP_ENCAP_DUAL 0
21 #include "../ip/ip_frag.h"
23 vlib_node_registration_t ip4_map_reass_node;
26 IP4_MAP_NEXT_IP6_LOOKUP,
27 #ifdef MAP_SKIP_IP6_LOOKUP
28 IP4_MAP_NEXT_IP6_REWRITE,
30 IP4_MAP_NEXT_IP4_FRAGMENT,
31 IP4_MAP_NEXT_IP6_FRAGMENT,
33 IP4_MAP_NEXT_ICMP_ERROR,
38 enum ip4_map_reass_next_t {
39 IP4_MAP_REASS_NEXT_IP6_LOOKUP,
40 IP4_MAP_REASS_NEXT_IP4_FRAGMENT,
41 IP4_MAP_REASS_NEXT_DROP,
49 } map_ip4_map_reass_trace_t;
52 format_ip4_map_reass_trace (u8 *s, va_list *args)
54 CLIB_UNUSED(vlib_main_t *vm) = va_arg (*args, vlib_main_t *);
55 CLIB_UNUSED(vlib_node_t *node) = va_arg (*args, vlib_node_t *);
56 map_ip4_map_reass_trace_t *t = va_arg (*args, map_ip4_map_reass_trace_t *);
57 return format(s, "MAP domain index: %d L4 port: %u Status: %s", t->map_domain_index,
58 t->port, t->cached?"cached":"forwarded");
65 ip4_map_get_port (ip4_header_t *ip, map_dir_e dir)
67 /* Find port information */
68 if (PREDICT_TRUE((ip->protocol == IP_PROTOCOL_TCP) ||
69 (ip->protocol == IP_PROTOCOL_UDP))) {
70 udp_header_t *udp = (void *)(ip + 1);
71 return (dir == MAP_SENDER ? udp->src_port : udp->dst_port);
72 } else if (ip->protocol == IP_PROTOCOL_ICMP) {
74 * 1) ICMP Echo request or Echo reply
75 * 2) ICMP Error with inner packet being UDP or TCP
76 * 3) ICMP Error with inner packet being ICMP Echo request or Echo reply
78 icmp46_header_t *icmp = (void *)(ip + 1);
79 if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply) {
80 return *((u16 *)(icmp + 1));
81 } else if (clib_net_to_host_u16(ip->length) >= 56) { // IP + ICMP + IP + L4 header
82 ip4_header_t *icmp_ip = (ip4_header_t *)(icmp + 2);
83 if (PREDICT_TRUE((icmp_ip->protocol == IP_PROTOCOL_TCP) ||
84 (icmp_ip->protocol == IP_PROTOCOL_UDP))) {
85 udp_header_t *udp = (void *)(icmp_ip + 1);
86 return (dir == MAP_SENDER ? udp->dst_port : udp->src_port);
87 } else if (icmp_ip->protocol == IP_PROTOCOL_ICMP) {
88 icmp46_header_t *inner_icmp = (void *)(icmp_ip + 1);
89 if (inner_icmp->type == ICMP4_echo_request || inner_icmp->type == ICMP4_echo_reply)
90 return (*((u16 *)(inner_icmp + 1)));
97 static_always_inline u16
98 ip4_map_port_and_security_check (map_domain_t *d, ip4_header_t *ip, u32 *next, u8 *error)
102 if (d->psid_length > 0) {
103 if (ip4_get_fragment_offset(ip) == 0) {
104 if (PREDICT_FALSE((ip->ip_version_and_header_length != 0x45) || clib_host_to_net_u16(ip->length) < 28)) {
107 port = ip4_map_get_port(ip, MAP_RECEIVER);
109 /* Verify that port is not among the well-known ports */
110 if ((d->psid_offset > 0) && (clib_net_to_host_u16(port) < (0x1 << (16 - d->psid_offset)))) {
111 *error = MAP_ERROR_ENCAP_SEC_CHECK;
113 if (ip4_get_fragment_more(ip)) *next = IP4_MAP_NEXT_REASS;
117 *error = MAP_ERROR_BAD_PROTOCOL;
120 *next = IP4_MAP_NEXT_REASS;
129 static_always_inline u32
130 ip4_map_vtcfl (ip4_header_t *ip4, vlib_buffer_t *p)
132 map_main_t *mm = &map_main;
133 u8 tc = mm->tc_copy ? ip4->tos : mm->tc;
134 u32 vtcfl = 0x6 << 28;
136 vtcfl |= vnet_buffer(p)->ip.flow_hash & 0x000fffff;
138 return (clib_host_to_net_u32(vtcfl));
141 static_always_inline bool
142 ip4_map_ip6_lookup_bypass (vlib_buffer_t *p0, ip4_header_t *ip)
144 #ifdef MAP_SKIP_IP6_LOOKUP
145 map_main_t *mm = &map_main;
146 u32 adj_index0 = mm->adj6_index;
147 if (adj_index0 > 0) {
148 ip_lookup_main_t *lm6 = &ip6_main.lookup_main;
149 ip_adjacency_t *adj = ip_get_adjacency(lm6, mm->adj6_index);
150 if (adj->n_adj > 1) {
151 u32 hash_c0 = ip4_compute_flow_hash(ip, IP_FLOW_HASH_DEFAULT);
152 adj_index0 += (hash_c0 & (adj->n_adj - 1));
154 vnet_buffer(p0)->ip.adj_index[VLIB_TX] = adj_index0;
165 ip4_map_decrement_ttl (ip4_header_t *ip, u8 *error)
169 /* Input node should have reject packets with ttl 0. */
170 ASSERT (ip->ttl > 0);
172 u32 checksum = ip->checksum + clib_host_to_net_u16(0x0100);
173 checksum += checksum >= 0xffff;
174 ip->checksum = checksum;
177 *error = ttl <= 0 ? IP4_ERROR_TIME_EXPIRED : *error;
179 /* Verify checksum. */
180 ASSERT (ip->checksum == ip4_header_checksum(ip));
184 ip4_map_fragment (vlib_buffer_t *b, u16 mtu, bool df, u8 *error)
186 map_main_t *mm = &map_main;
188 if (mm->frag_inner) {
189 ip_frag_set_vnet_buffer(b, sizeof(ip6_header_t), mtu, IP4_FRAG_NEXT_IP6_LOOKUP, IP_FRAG_FLAG_IP6_HEADER);
190 return (IP4_MAP_NEXT_IP4_FRAGMENT);
192 if (df && !mm->frag_ignore_df) {
193 icmp4_error_set_vnet_buffer(b, ICMP4_destination_unreachable,
194 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set, mtu);
195 vlib_buffer_advance(b, sizeof(ip6_header_t));
196 *error = MAP_ERROR_DF_SET;
197 return (IP4_MAP_NEXT_ICMP_ERROR);
199 ip_frag_set_vnet_buffer(b, 0, mtu, IP6_FRAG_NEXT_IP6_LOOKUP, IP_FRAG_FLAG_IP6_HEADER);
200 return (IP4_MAP_NEXT_IP6_FRAGMENT);
208 ip4_map (vlib_main_t *vm,
209 vlib_node_runtime_t *node,
212 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
213 vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_node.index);
214 from = vlib_frame_vector_args(frame);
215 n_left_from = frame->n_vectors;
216 next_index = node->cached_next_index;
217 map_main_t *mm = &map_main;
218 vlib_combined_counter_main_t *cm = mm->domain_counters;
219 u32 cpu_index = os_get_cpu_number();
221 while (n_left_from > 0) {
222 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
225 while (n_left_from >= 4 && n_left_to_next >= 2) {
227 vlib_buffer_t *p0, *p1;
228 map_domain_t *d0, *d1;
229 u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE;
230 ip4_header_t *ip40, *ip41;
231 u16 port0 = 0, port1 = 0;
232 ip6_header_t *ip6h0, *ip6h1;
233 u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
234 u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 = IP4_MAP_NEXT_IP6_LOOKUP;
236 /* Prefetch next iteration. */
238 vlib_buffer_t *p2, *p3;
240 p2 = vlib_get_buffer(vm, from[2]);
241 p3 = vlib_get_buffer(vm, from[3]);
243 vlib_prefetch_buffer_header(p2, STORE);
244 vlib_prefetch_buffer_header(p3, STORE);
245 /* IPv4 + 8 = 28. possibly plus -40 */
246 CLIB_PREFETCH (p2->data-40, 68, STORE);
247 CLIB_PREFETCH (p3->data-40, 68, STORE);
250 pi0 = to_next[0] = from[0];
251 pi1 = to_next[1] = from[1];
257 p0 = vlib_get_buffer(vm, pi0);
258 p1 = vlib_get_buffer(vm, pi1);
259 ip40 = vlib_buffer_get_current(p0);
260 ip41 = vlib_buffer_get_current(p1);
261 d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
262 d1 = ip4_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX], &map_domain_index1);
267 * Shared IPv4 address
269 port0 = ip4_map_port_and_security_check(d0, ip40, &next0, &error0);
270 port1 = ip4_map_port_and_security_check(d1, ip41, &next1, &error1);
272 /* Decrement IPv4 TTL */
273 ip4_map_decrement_ttl(ip40, &error0);
274 ip4_map_decrement_ttl(ip41, &error1);
275 bool df0 = ip40->flags_and_fragment_offset & clib_host_to_net_u16(IP4_HEADER_FLAG_DONT_FRAGMENT);
276 bool df1 = ip41->flags_and_fragment_offset & clib_host_to_net_u16(IP4_HEADER_FLAG_DONT_FRAGMENT);
279 u32 da40 = clib_net_to_host_u32(ip40->dst_address.as_u32);
280 u32 da41 = clib_net_to_host_u32(ip41->dst_address.as_u32);
281 u16 dp40 = clib_net_to_host_u16(port0);
282 u16 dp41 = clib_net_to_host_u16(port1);
283 u64 dal60 = map_get_pfx(d0, da40, dp40);
284 u64 dal61 = map_get_pfx(d1, da41, dp41);
285 u64 dar60 = map_get_sfx(d0, da40, dp40);
286 u64 dar61 = map_get_sfx(d1, da41, dp41);
287 if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE && next0 != IP4_MAP_NEXT_REASS)
288 error0 = MAP_ERROR_NO_BINDING;
289 if (dal61 == 0 && dar61 == 0 && error1 == MAP_ERROR_NONE && next1 != IP4_MAP_NEXT_REASS)
290 error1 = MAP_ERROR_NO_BINDING;
292 /* construct ipv6 header */
293 vlib_buffer_advance(p0, - sizeof(ip6_header_t));
294 vlib_buffer_advance(p1, - sizeof(ip6_header_t));
295 ip6h0 = vlib_buffer_get_current(p0);
296 ip6h1 = vlib_buffer_get_current(p1);
297 vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0;
298 vnet_buffer(p1)->sw_if_index[VLIB_TX] = (u32)~0;
300 ip6h0->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip40, p0);
301 ip6h1->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip41, p1);
302 ip6h0->payload_length = ip40->length;
303 ip6h1->payload_length = ip41->length;
304 ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
305 ip6h1->protocol = IP_PROTOCOL_IP_IN_IP;
306 ip6h0->hop_limit = 0x40;
307 ip6h1->hop_limit = 0x40;
308 ip6h0->src_address = d0->ip6_src;
309 ip6h1->src_address = d1->ip6_src;
310 ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64(dal60);
311 ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64(dar60);
312 ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64(dal61);
313 ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64(dar61);
316 * Determine next node. Can be one of:
317 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
319 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
320 if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip6h0->payload_length) + sizeof(*ip6h0) > d0->mtu))) {
321 next0 = ip4_map_fragment(p0, d0->mtu, df0, &error0);
323 next0 = ip4_map_ip6_lookup_bypass(p0, ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0;
324 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
325 clib_net_to_host_u16(ip6h0->payload_length) + 40);
328 next0 = IP4_MAP_NEXT_DROP;
332 * Determine next node. Can be one of:
333 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
335 if (PREDICT_TRUE(error1 == MAP_ERROR_NONE)) {
336 if (PREDICT_FALSE(d1->mtu && (clib_net_to_host_u16(ip6h1->payload_length) + sizeof(*ip6h1) > d1->mtu))) {
337 next1 = ip4_map_fragment(p1, d1->mtu, df1, &error1);
339 next1 = ip4_map_ip6_lookup_bypass(p1, ip41) ? IP4_MAP_NEXT_IP6_REWRITE : next1;
340 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index1, 1,
341 clib_net_to_host_u16(ip6h1->payload_length) + 40);
344 next1 = IP4_MAP_NEXT_DROP;
347 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
348 map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
349 tr->map_domain_index = map_domain_index0;
352 if (PREDICT_FALSE(p1->flags & VLIB_BUFFER_IS_TRACED)) {
353 map_trace_t *tr = vlib_add_trace(vm, node, p1, sizeof(*tr));
354 tr->map_domain_index = map_domain_index1;
358 p0->error = error_node->errors[error0];
359 p1->error = error_node->errors[error1];
361 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1);
364 while (n_left_from > 0 && n_left_to_next > 0) {
368 u8 error0 = MAP_ERROR_NONE;
372 u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP;
373 u32 map_domain_index0 = ~0;
375 pi0 = to_next[0] = from[0];
381 p0 = vlib_get_buffer(vm, pi0);
382 ip40 = vlib_buffer_get_current(p0);
383 d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
387 * Shared IPv4 address
389 port0 = ip4_map_port_and_security_check(d0, ip40, &next0, &error0);
391 /* Decrement IPv4 TTL */
392 ip4_map_decrement_ttl(ip40, &error0);
393 bool df0 = ip40->flags_and_fragment_offset & clib_host_to_net_u16(IP4_HEADER_FLAG_DONT_FRAGMENT);
396 u32 da40 = clib_net_to_host_u32(ip40->dst_address.as_u32);
397 u16 dp40 = clib_net_to_host_u16(port0);
398 u64 dal60 = map_get_pfx(d0, da40, dp40);
399 u64 dar60 = map_get_sfx(d0, da40, dp40);
400 if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE && next0 != IP4_MAP_NEXT_REASS)
401 error0 = MAP_ERROR_NO_BINDING;
403 /* construct ipv6 header */
404 vlib_buffer_advance(p0, - (sizeof(ip6_header_t)));
405 ip6h0 = vlib_buffer_get_current(p0);
406 vnet_buffer(p0)->sw_if_index[VLIB_TX] = (u32)~0;
408 ip6h0->ip_version_traffic_class_and_flow_label = ip4_map_vtcfl(ip40, p0);
409 ip6h0->payload_length = ip40->length;
410 ip6h0->protocol = IP_PROTOCOL_IP_IN_IP;
411 ip6h0->hop_limit = 0x40;
412 ip6h0->src_address = d0->ip6_src;
413 ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64(dal60);
414 ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64(dar60);
417 * Determine next node. Can be one of:
418 * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop
420 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
421 if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip6h0->payload_length) + sizeof(*ip6h0) > d0->mtu))) {
422 next0 = ip4_map_fragment(p0, d0->mtu, df0, &error0);
424 next0 = ip4_map_ip6_lookup_bypass(p0, ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0;
425 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
426 clib_net_to_host_u16(ip6h0->payload_length) + 40);
429 next0 = IP4_MAP_NEXT_DROP;
432 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
433 map_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
434 tr->map_domain_index = map_domain_index0;
438 p0->error = error_node->errors[error0];
439 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
441 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
444 return frame->n_vectors;
451 ip4_map_reass (vlib_main_t *vm,
452 vlib_node_runtime_t *node,
455 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
456 vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_reass_node.index);
457 from = vlib_frame_vector_args(frame);
458 n_left_from = frame->n_vectors;
459 next_index = node->cached_next_index;
460 map_main_t *mm = &map_main;
461 vlib_combined_counter_main_t *cm = mm->domain_counters;
462 u32 cpu_index = os_get_cpu_number();
463 u32 *fragments_to_drop = NULL;
464 u32 *fragments_to_loopback = NULL;
466 while (n_left_from > 0) {
467 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
469 while (n_left_from > 0 && n_left_to_next > 0) {
473 u8 error0 = MAP_ERROR_NONE;
477 u32 next0 = IP4_MAP_REASS_NEXT_IP6_LOOKUP;
478 u32 map_domain_index0;
481 pi0 = to_next[0] = from[0];
487 p0 = vlib_get_buffer(vm, pi0);
488 ip60 = vlib_buffer_get_current(p0);
489 ip40 = (ip4_header_t *)(ip60 + 1);
490 d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX], &map_domain_index0);
492 map_ip4_reass_lock();
493 map_ip4_reass_t *r = map_ip4_reass_get(ip40->src_address.as_u32, ip40->dst_address.as_u32,
494 ip40->fragment_id, ip40->protocol, &fragments_to_drop);
495 if (PREDICT_FALSE(!r)) {
496 // Could not create a caching entry
497 error0 = MAP_ERROR_FRAGMENT_MEMORY;
498 } else if (PREDICT_TRUE(ip4_get_fragment_offset(ip40))) {
500 // We know the port already
502 } else if (map_ip4_reass_add_fragment(r, pi0)) {
503 // Not enough space for caching
504 error0 = MAP_ERROR_FRAGMENT_MEMORY;
505 map_ip4_reass_free(r, &fragments_to_drop);
509 } else if ((port0 = ip4_get_port(ip40, MAP_RECEIVER, p0->current_length)) < 0) {
510 // Could not find port. We'll free the reassembly.
511 error0 = MAP_ERROR_BAD_PROTOCOL;
513 map_ip4_reass_free(r, &fragments_to_drop);
516 map_ip4_reass_get_fragments(r, &fragments_to_loopback);
519 #ifdef MAP_IP4_REASS_COUNT_BYTES
521 r->forwarded += clib_host_to_net_u16(ip40->length) - 20;
522 if (!ip4_get_fragment_more(ip40))
523 r->expected_total = ip4_get_fragment_offset(ip40) * 8 + clib_host_to_net_u16(ip40->length) - 20;
524 if(r->forwarded >= r->expected_total)
525 map_ip4_reass_free(r, &fragments_to_drop);
529 map_ip4_reass_unlock();
531 // NOTE: Most operations have already been performed by ip4_map
532 // All we need is the right destination address
533 ip60->dst_address.as_u64[0] = map_get_pfx_net(d0, ip40->dst_address.as_u32, port0);
534 ip60->dst_address.as_u64[1] = map_get_sfx_net(d0, ip40->dst_address.as_u32, port0);
536 if (PREDICT_FALSE(d0->mtu && (clib_net_to_host_u16(ip60->payload_length) + sizeof(*ip60) > d0->mtu))) {
537 vnet_buffer(p0)->ip_frag.header_offset = sizeof(*ip60);
538 vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP;
539 vnet_buffer(p0)->ip_frag.mtu = d0->mtu;
540 vnet_buffer(p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER;
541 next0 = IP4_MAP_REASS_NEXT_IP4_FRAGMENT;
544 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) {
545 map_ip4_map_reass_trace_t *tr = vlib_add_trace(vm, node, p0, sizeof(*tr));
546 tr->map_domain_index = map_domain_index0;
556 if (error0 == MAP_ERROR_NONE)
557 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index, map_domain_index0, 1,
558 clib_net_to_host_u16(ip60->payload_length) + 40);
559 next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP;
560 p0->error = error_node->errors[error0];
561 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next0);
564 //Loopback when we reach the end of the inpu vector
565 if(n_left_from == 0 && vec_len(fragments_to_loopback)) {
566 from = vlib_frame_vector_args(frame);
567 u32 len = vec_len(fragments_to_loopback);
568 if(len <= VLIB_FRAME_SIZE) {
569 clib_memcpy(from, fragments_to_loopback, sizeof(u32)*len);
571 vec_reset_length(fragments_to_loopback);
573 clib_memcpy(from, fragments_to_loopback + (len - VLIB_FRAME_SIZE), sizeof(u32)*VLIB_FRAME_SIZE);
574 n_left_from = VLIB_FRAME_SIZE;
575 _vec_len(fragments_to_loopback) = len - VLIB_FRAME_SIZE;
579 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
582 map_send_all_to_node(vm, fragments_to_drop, node,
583 &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
584 IP4_MAP_REASS_NEXT_DROP);
586 vec_free(fragments_to_drop);
587 vec_free(fragments_to_loopback);
588 return frame->n_vectors;
591 static char *map_error_strings[] = {
592 #define _(sym,string) string,
597 VLIB_REGISTER_NODE(ip4_map_node) = {
600 .vector_size = sizeof(u32),
601 .format_trace = format_map_trace,
602 .type = VLIB_NODE_TYPE_INTERNAL,
604 .n_errors = MAP_N_ERROR,
605 .error_strings = map_error_strings,
607 .n_next_nodes = IP4_MAP_N_NEXT,
609 [IP4_MAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
610 #ifdef MAP_SKIP_IP6_LOOKUP
611 [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-rewrite",
613 [IP4_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
614 [IP4_MAP_NEXT_IP6_FRAGMENT] = "ip6-frag",
615 [IP4_MAP_NEXT_REASS] = "ip4-map-reass",
616 [IP4_MAP_NEXT_ICMP_ERROR] = "ip4-icmp-error",
617 [IP4_MAP_NEXT_DROP] = "error-drop",
621 VLIB_REGISTER_NODE(ip4_map_reass_node) = {
622 .function = ip4_map_reass,
623 .name = "ip4-map-reass",
624 .vector_size = sizeof(u32),
625 .format_trace = format_ip4_map_reass_trace,
626 .type = VLIB_NODE_TYPE_INTERNAL,
628 .n_errors = MAP_N_ERROR,
629 .error_strings = map_error_strings,
631 .n_next_nodes = IP4_MAP_REASS_N_NEXT,
633 [IP4_MAP_REASS_NEXT_IP6_LOOKUP] = "ip6-lookup",
634 [IP4_MAP_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
635 [IP4_MAP_REASS_NEXT_DROP] = "error-drop",