2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/fib/ip4_fib.h>
19 #include <vnet/gre/packet.h>
20 #include <lb/lbhash.h>
22 #define foreach_lb_error \
24 _(PROTO_NOT_SUPPORTED, "protocol not supported")
28 #define _(sym,str) LB_ERROR_##sym,
34 static char *lb_error_strings[] =
36 #define _(sym,string) string,
52 } lb_nodeport_trace_t;
63 format_lb_trace (u8 * s, va_list * args)
65 lb_main_t *lbm = &lb_main;
66 CLIB_UNUSED(vlib_main_t * vm)
67 = va_arg (*args, vlib_main_t *);
68 CLIB_UNUSED(vlib_node_t * node)
69 = va_arg (*args, vlib_node_t *);
70 lb_trace_t *t = va_arg (*args, lb_trace_t *);
71 if (pool_is_free_index(lbm->vips, t->vip_index))
73 s = format (s, "lb vip[%d]: This VIP was freed since capture\n");
77 s = format (s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip,
78 &lbm->vips[t->vip_index]);
80 if (pool_is_free_index(lbm->ass, t->as_index))
82 s = format (s, "lb as[%d]: This AS was freed since capture\n");
86 s = format (s, "lb as[%d]: %U\n", t->as_index, format_lb_as,
87 &lbm->ass[t->as_index]);
93 format_lb_nat_trace (u8 * s, va_list * args)
95 lb_main_t *lbm = &lb_main;
96 CLIB_UNUSED(vlib_main_t * vm)
97 = va_arg (*args, vlib_main_t *);
98 CLIB_UNUSED(vlib_node_t * node)
99 = va_arg (*args, vlib_node_t *);
100 lb_nat_trace_t *t = va_arg (*args, lb_nat_trace_t *);
102 if (pool_is_free_index(lbm->vips, t->vip_index))
104 s = format (s, "lb vip[%d]: This VIP was freed since capture\n");
108 s = format (s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip,
109 &lbm->vips[t->vip_index]);
111 if (pool_is_free_index(lbm->ass, t->as_index))
113 s = format (s, "lb as[%d]: This AS was freed since capture\n");
117 s = format (s, "lb as[%d]: %U\n", t->as_index, format_lb_as,
118 &lbm->ass[t->as_index]);
120 s = format (s, "lb nat: rx_sw_if_index = %d, next_index = %d",
121 t->rx_sw_if_index, t->next_index);
127 lb_get_sticky_table (u32 thread_index)
129 lb_main_t *lbm = &lb_main;
130 lb_hash_t *sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
131 //Check if size changed
133 sticky_ht && (lbm->per_cpu_sticky_buckets != lb_hash_nbuckets(sticky_ht))))
135 //Dereference everything in there
138 lb_hash_foreach_entry(sticky_ht, b, i)
140 vlib_refcount_add (&lbm->as_refcount, thread_index, b->value[i], -1);
141 vlib_refcount_add (&lbm->as_refcount, thread_index, 0, 1);
144 lb_hash_free (sticky_ht);
148 //Create if necessary
149 if (PREDICT_FALSE(sticky_ht == NULL))
151 lbm->per_cpu[thread_index].sticky_ht = lb_hash_alloc (
152 lbm->per_cpu_sticky_buckets, lbm->flow_timeout);
153 sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
154 clib_warning("Regenerated sticky table %p", sticky_ht);
160 sticky_ht->timeout = lbm->flow_timeout;
165 lb_node_get_other_ports4 (ip4_header_t *ip40)
171 lb_node_get_other_ports6 (ip6_header_t *ip60)
176 static_always_inline void
177 lb_node_get_hash (lb_main_t *lbm, vlib_buffer_t *p, u8 is_input_v4, u32 *hash,
178 u32 *vip_idx, u8 per_port_vip, u8 src_ip_sticky)
181 clib_bihash_kv_8_8_t kv, value;
183 /* For vip case, retrieve vip index for ip lookup */
184 *vip_idx = vnet_buffer (p)->ip.adj_index[VLIB_TX];
188 /* For per-port-vip case, ip lookup stores placeholder index */
189 key.vip_prefix_index = *vip_idx;
197 ip40 = vlib_buffer_get_current (p);
199 ip40->protocol == IP_PROTOCOL_TCP
200 || ip40->protocol == IP_PROTOCOL_UDP))
201 ports = ((u64) ((udp_header_t *) (ip40 + 1))->src_port << 16)
202 | ((u64) ((udp_header_t *) (ip40 + 1))->dst_port);
204 ports = lb_node_get_other_ports4 (ip40);
208 *hash = lb_hash_hash (*((u64 *) &ip40->address_pair), 0, 0, 0, 0);
213 lb_hash_hash (*((u64 *) &ip40->address_pair), ports, 0, 0, 0);
218 key.protocol = ip40->protocol;
219 key.port = (u16)(ports & 0xFFFF);
225 ip60 = vlib_buffer_get_current (p);
229 ip60->protocol == IP_PROTOCOL_TCP
230 || ip60->protocol == IP_PROTOCOL_UDP))
231 ports = ((u64) ((udp_header_t *) (ip60 + 1))->src_port << 16)
232 | ((u64) ((udp_header_t *) (ip60 + 1))->dst_port);
234 ports = lb_node_get_other_ports6 (ip60);
238 *hash = lb_hash_hash (
239 ip60->src_address.as_u64[0], ip60->src_address.as_u64[1],
240 ip60->dst_address.as_u64[0], ip60->dst_address.as_u64[1], 0);
244 *hash = lb_hash_hash (
245 ip60->src_address.as_u64[0], ip60->src_address.as_u64[1],
246 ip60->dst_address.as_u64[0], ip60->dst_address.as_u64[1], ports);
251 key.protocol = ip60->protocol;
252 key.port = (u16)(ports & 0xFFFF);
256 /* For per-port-vip case, retrieve vip index for vip_port_filter table */
260 if (clib_bihash_search_8_8(&lbm->vip_index_per_port, &kv, &value) < 0)
262 /* return default vip */
266 *vip_idx = value.value;
270 /* clang-format off */
271 static_always_inline uword
272 lb_node_fn (vlib_main_t * vm,
273 vlib_node_runtime_t * node,
274 vlib_frame_t * frame,
275 u8 is_input_v4, //Compile-time parameter stating that is input is v4 (or v6)
276 lb_encap_type_t encap_type, //Compile-time parameter is GRE4/GRE6/L3DSR/NAT4/NAT6
277 u8 per_port_vip, //Compile-time parameter stating that is per_port_vip or not
278 u8 src_ip_sticky) //Compile-time parameter stating that is source ip based sticky or not
280 lb_main_t *lbm = &lb_main;
281 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
282 u32 thread_index = vm->thread_index;
283 u32 lb_time = lb_hash_time_now (vm);
285 lb_hash_t *sticky_ht = lb_get_sticky_table (thread_index);
286 from = vlib_frame_vector_args (frame);
287 n_left_from = frame->n_vectors;
288 next_index = node->cached_next_index;
291 u32 next_vip_idx0 = ~0;
292 if (PREDICT_TRUE(n_left_from > 0))
294 vlib_buffer_t *p0 = vlib_get_buffer (vm, from[0]);
295 lb_node_get_hash (lbm, p0, is_input_v4, &nexthash0,
296 &next_vip_idx0, per_port_vip, src_ip_sticky);
299 while (n_left_from > 0)
301 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
302 while (n_left_from > 0 && n_left_to_next > 0)
309 u32 available_index0;
311 u32 hash0 = nexthash0;
312 u32 vip_index0 = next_vip_idx0;
315 if (PREDICT_TRUE(n_left_from > 1))
317 vlib_buffer_t *p1 = vlib_get_buffer (vm, from[1]);
318 //Compute next hash and prefetch bucket
319 lb_node_get_hash (lbm, p1, is_input_v4,
320 &nexthash0, &next_vip_idx0,
321 per_port_vip, src_ip_sticky);
322 lb_hash_prefetch_bucket (sticky_ht, nexthash0);
323 //Prefetch for encap, next
324 CLIB_PREFETCH(vlib_buffer_get_current (p1) - 64, 64, STORE);
327 if (PREDICT_TRUE(n_left_from > 2))
330 p2 = vlib_get_buffer (vm, from[2]);
331 /* prefetch packet header and data */
332 vlib_prefetch_buffer_header(p2, STORE);
333 CLIB_PREFETCH(vlib_buffer_get_current (p2), 64, STORE);
336 pi0 = to_next[0] = from[0];
342 p0 = vlib_get_buffer (vm, pi0);
344 vip0 = pool_elt_at_index(lbm->vips, vip_index0);
349 ip40 = vlib_buffer_get_current (p0);
350 len0 = clib_net_to_host_u16 (ip40->length);
355 ip60 = vlib_buffer_get_current (p0);
356 len0 = clib_net_to_host_u16 (ip60->payload_length)
357 + sizeof(ip6_header_t);
360 lb_hash_get (sticky_ht, hash0,
362 &available_index0, &asindex0);
364 if (PREDICT_TRUE(asindex0 != 0))
366 //Found an existing entry
367 counter = LB_VIP_COUNTER_NEXT_PACKET;
369 else if (PREDICT_TRUE(available_index0 != ~0))
371 //There is an available slot for a new flow
373 vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].as_index;
374 counter = LB_VIP_COUNTER_FIRST_PACKET;
375 counter = (asindex0 == 0) ? LB_VIP_COUNTER_NO_SERVER : counter;
377 //TODO: There are race conditions with as0 and vip0 manipulation.
378 //Configuration may be changed, vectors resized, etc...
380 //Dereference previously used
382 &lbm->as_refcount, thread_index,
383 lb_hash_available_value (sticky_ht, hash0, available_index0),
385 vlib_refcount_add (&lbm->as_refcount, thread_index, asindex0, 1);
388 //Note that when there is no AS configured, an entry is configured anyway.
389 //But no configured AS is not something that should happen
390 lb_hash_put (sticky_ht, hash0, asindex0,
392 available_index0, lb_time);
396 //Could not store new entry in the table
398 vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].as_index;
399 counter = LB_VIP_COUNTER_UNTRACKED_PACKET;
402 vlib_increment_simple_counter (
403 &lbm->vip_counters[counter], thread_index,
408 if ((encap_type == LB_ENCAP_TYPE_GRE4)
409 || (encap_type == LB_ENCAP_TYPE_GRE6))
412 if (encap_type == LB_ENCAP_TYPE_GRE4) /* encap GRE4*/
415 vlib_buffer_advance (
416 p0, -sizeof(ip4_header_t) - sizeof(gre_header_t));
417 ip40 = vlib_buffer_get_current (p0);
418 gre0 = (gre_header_t *) (ip40 + 1);
419 ip40->src_address = lbm->ip4_src_address;
420 ip40->dst_address = lbm->ass[asindex0].address.ip4;
421 ip40->ip_version_and_header_length = 0x45;
423 ip40->fragment_id = 0;
424 ip40->flags_and_fragment_offset = 0;
425 ip40->length = clib_host_to_net_u16 (
426 len0 + sizeof(gre_header_t) + sizeof(ip4_header_t));
427 ip40->protocol = IP_PROTOCOL_GRE;
428 ip40->checksum = ip4_header_checksum (ip40);
433 vlib_buffer_advance (
434 p0, -sizeof(ip6_header_t) - sizeof(gre_header_t));
435 ip60 = vlib_buffer_get_current (p0);
436 gre0 = (gre_header_t *) (ip60 + 1);
437 ip60->dst_address = lbm->ass[asindex0].address.ip6;
438 ip60->src_address = lbm->ip6_src_address;
439 ip60->hop_limit = 128;
440 ip60->ip_version_traffic_class_and_flow_label =
441 clib_host_to_net_u32 (0x6 << 28);
442 ip60->payload_length = clib_host_to_net_u16 (
443 len0 + sizeof(gre_header_t));
444 ip60->protocol = IP_PROTOCOL_GRE;
447 gre0->flags_and_version = 0;
450 clib_host_to_net_u16 (0x0800) :
451 clib_host_to_net_u16 (0x86DD);
453 else if (encap_type == LB_ENCAP_TYPE_L3DSR) /* encap L3DSR*/
457 u32 old_dst, new_dst;
460 ip40 = vlib_buffer_get_current (p0);
461 old_dst = ip40->dst_address.as_u32;
462 new_dst = lbm->ass[asindex0].address.ip4.as_u32;
463 ip40->dst_address.as_u32 = lbm->ass[asindex0].address.ip4.as_u32;
464 /* Get and rewrite DSCP bit */
466 new_tos = (u8) ((vip0->encap_args.dscp & 0x3F) << 2);
467 ip40->tos = (u8) ((vip0->encap_args.dscp & 0x3F) << 2);
469 csum = ip40->checksum;
470 csum = ip_csum_update (csum, old_tos, new_tos,
472 tos /* changed member */);
473 csum = ip_csum_update (csum, old_dst, new_dst,
475 dst_address /* changed member */);
476 ip40->checksum = ip_csum_fold (csum);
478 /* Recomputing L4 checksum after dst-IP modifying */
479 if (ip40->protocol == IP_PROTOCOL_TCP)
482 th0 = ip4_next_header (ip40);
484 th0->checksum = ip4_tcp_udp_compute_checksum (vm, p0, ip40);
486 else if (ip40->protocol == IP_PROTOCOL_UDP)
489 uh0 = ip4_next_header (ip40);
491 uh0->checksum = ip4_tcp_udp_compute_checksum (vm, p0, ip40);
494 else if ((encap_type == LB_ENCAP_TYPE_NAT4)
495 || (encap_type == LB_ENCAP_TYPE_NAT6))
501 if ((is_input_v4 == 1) && (encap_type == LB_ENCAP_TYPE_NAT4))
506 ip40 = vlib_buffer_get_current (p0);
507 uh = (udp_header_t *) (ip40 + 1);
508 old_dst = ip40->dst_address.as_u32;
509 ip40->dst_address = lbm->ass[asindex0].address.ip4;
511 csum = ip40->checksum;
512 csum = ip_csum_sub_even (csum, old_dst);
513 csum = ip_csum_add_even (
514 csum, lbm->ass[asindex0].address.ip4.as_u32);
515 ip40->checksum = ip_csum_fold (csum);
517 if (ip40->protocol == IP_PROTOCOL_UDP)
519 uh->dst_port = vip0->encap_args.target_port;
521 csum = ip_csum_sub_even (csum, old_dst);
522 csum = ip_csum_add_even (
523 csum, lbm->ass[asindex0].address.ip4.as_u32);
524 uh->checksum = ip_csum_fold (csum);
531 else if ((is_input_v4 == 0) && (encap_type == LB_ENCAP_TYPE_NAT6))
535 ip6_address_t old_dst;
537 ip60 = vlib_buffer_get_current (p0);
538 uh = (udp_header_t *) (ip60 + 1);
540 old_dst.as_u64[0] = ip60->dst_address.as_u64[0];
541 old_dst.as_u64[1] = ip60->dst_address.as_u64[1];
542 ip60->dst_address.as_u64[0] =
543 lbm->ass[asindex0].address.ip6.as_u64[0];
544 ip60->dst_address.as_u64[1] =
545 lbm->ass[asindex0].address.ip6.as_u64[1];
547 if (PREDICT_TRUE(ip60->protocol == IP_PROTOCOL_UDP))
549 uh->dst_port = vip0->encap_args.target_port;
551 csum = ip_csum_sub_even (csum, old_dst.as_u64[0]);
552 csum = ip_csum_sub_even (csum, old_dst.as_u64[1]);
553 csum = ip_csum_add_even (
554 csum, lbm->ass[asindex0].address.ip6.as_u64[0]);
555 csum = ip_csum_add_even (
556 csum, lbm->ass[asindex0].address.ip6.as_u64[1]);
557 uh->checksum = ip_csum_fold (csum);
565 next0 = lbm->ass[asindex0].dpo.dpoi_next_node;
566 //Note that this is going to error if asindex0 == 0
567 vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
568 lbm->ass[asindex0].dpo.dpoi_index;
570 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
572 lb_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof(*tr));
573 tr->as_index = asindex0;
574 tr->vip_index = vip_index0;
578 vlib_validate_buffer_enqueue_x1(
579 vm, node, next_index, to_next, n_left_to_next, pi0, next0);
581 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
584 return frame->n_vectors;
586 /* clang-format on */
589 format_nodeport_lb_trace (u8 * s, va_list * args)
591 lb_main_t *lbm = &lb_main;
592 CLIB_UNUSED(vlib_main_t * vm)
593 = va_arg (*args, vlib_main_t *);
594 CLIB_UNUSED(vlib_node_t * node)
595 = va_arg (*args, vlib_node_t *);
596 lb_nodeport_trace_t *t = va_arg (*args, lb_nodeport_trace_t *);
597 if (pool_is_free_index(lbm->vips, t->vip_index))
599 s = format (s, "lb vip[%d]: This VIP was freed since capture\n");
603 s = format (s, "lb vip[%d]: %U\n", t->vip_index, format_lb_vip,
604 &lbm->vips[t->vip_index]);
607 s = format (s, " lb node_port: %d", t->node_port);
613 lb_nodeport_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
614 vlib_frame_t * frame, u8 is_input_v4)
616 lb_main_t *lbm = &lb_main;
617 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
619 from = vlib_frame_vector_args (frame);
620 n_left_from = frame->n_vectors;
621 next_index = node->cached_next_index;
623 while (n_left_from > 0)
625 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
627 while (n_left_from > 0 && n_left_to_next > 0)
631 udp_header_t * udp_0;
634 if (PREDICT_TRUE(n_left_from > 1))
636 vlib_buffer_t *p1 = vlib_get_buffer (vm, from[1]);
637 //Prefetch for encap, next
638 CLIB_PREFETCH(vlib_buffer_get_current (p1) - 64, 64, STORE);
641 if (PREDICT_TRUE(n_left_from > 2))
644 p2 = vlib_get_buffer (vm, from[2]);
645 /* prefetch packet header and data */
646 vlib_prefetch_buffer_header(p2, STORE);
647 CLIB_PREFETCH(vlib_buffer_get_current (p2), 64, STORE);
650 pi0 = to_next[0] = from[0];
656 p0 = vlib_get_buffer (vm, pi0);
661 vlib_buffer_advance (
662 p0, -(word) (sizeof(udp_header_t) + sizeof(ip4_header_t)));
663 ip40 = vlib_buffer_get_current (p0);
664 udp_0 = (udp_header_t *) (ip40 + 1);
669 vlib_buffer_advance (
670 p0, -(word) (sizeof(udp_header_t) + sizeof(ip6_header_t)));
671 ip60 = vlib_buffer_get_current (p0);
672 udp_0 = (udp_header_t *) (ip60 + 1);
675 entry0 = hash_get_mem(lbm->vip_index_by_nodeport, &(udp_0->dst_port));
678 vnet_buffer(p0)->ip.adj_index[VLIB_TX] = entry0 ? entry0[0]
681 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
683 lb_nodeport_trace_t *tr = vlib_add_trace (vm, node, p0,
685 tr->vip_index = entry0 ? entry0[0] : ADJ_INDEX_INVALID;
686 tr->node_port = (u32) clib_net_to_host_u16 (udp_0->dst_port);
689 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
692 LB4_NODEPORT_NEXT_IP4_NAT4 : LB6_NODEPORT_NEXT_IP6_NAT6);
694 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
697 return frame->n_vectors;
702 * @brief Match NAT44 static mapping.
704 * @param sm NAT main.
705 * @param match Address and port to match.
706 * @param index index to the pool.
708 * @returns 0 if match found, otherwise -1.
711 lb_nat44_mapping_match (lb_main_t *lbm, lb_snat4_key_t * match, u32 *index)
713 clib_bihash_kv_8_8_t kv4, value;
714 clib_bihash_8_8_t *mapping_hash = &lbm->mapping_by_as4;
716 kv4.key = match->as_u64;
718 if (clib_bihash_search_8_8 (mapping_hash, &kv4, &value))
723 *index = value.value;
728 * @brief Match NAT66 static mapping.
730 * @param sm NAT main.
731 * @param match Address and port to match.
732 * @param mapping External or local address and port of the matched mapping.
734 * @returns 0 if match found otherwise 1.
737 lb_nat66_mapping_match (lb_main_t *lbm, lb_snat6_key_t * match, u32 *index)
739 clib_bihash_kv_24_8_t kv6, value;
740 lb_snat6_key_t m_key6;
741 clib_bihash_24_8_t *mapping_hash = &lbm->mapping_by_as6;
743 m_key6.addr.as_u64[0] = match->addr.as_u64[0];
744 m_key6.addr.as_u64[1] = match->addr.as_u64[1];
745 m_key6.port = match->port;
747 m_key6.fib_index = 0;
749 kv6.key[0] = m_key6.as_u64[0];
750 kv6.key[1] = m_key6.as_u64[1];
751 kv6.key[2] = m_key6.as_u64[2];
753 if (clib_bihash_search_24_8 (mapping_hash, &kv6, &value))
758 *index = value.value;
763 lb_nat_in2out_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
764 vlib_frame_t * frame, u32 is_nat4)
766 u32 n_left_from, *from, *to_next;
768 u32 pkts_processed = 0;
769 lb_main_t *lbm = &lb_main;
770 u32 stats_node_index;
773 is_nat4 ? lb_nat4_in2out_node.index : lb_nat6_in2out_node.index;
775 from = vlib_frame_vector_args (frame);
776 n_left_from = frame->n_vectors;
777 next_index = node->cached_next_index;
779 while (n_left_from > 0)
783 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
785 while (n_left_from > 0 && n_left_to_next > 0)
792 u16 old_port0, new_port0;
799 /* speculatively enqueue b0 to the current next frame */
807 b0 = vlib_get_buffer (vm, bi0);
808 next0 = LB_NAT4_IN2OUT_NEXT_LOOKUP;
809 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
810 rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (
816 u32 old_addr0, new_addr0;
817 lb_snat4_key_t key40;
818 lb_snat_mapping_t *sm40;
821 ip40 = vlib_buffer_get_current (b0);
822 udp0 = ip4_next_header (ip40);
823 tcp0 = (tcp_header_t *) udp0;
824 proto0 = lb_ip_proto_to_nat_proto (ip40->protocol);
826 key40.addr = ip40->src_address;
827 key40.protocol = proto0;
828 key40.port = udp0->src_port;
829 key40.fib_index = rx_fib_index0;
831 if (lb_nat44_mapping_match (lbm, &key40, &index40))
833 next0 = LB_NAT4_IN2OUT_NEXT_DROP;
837 sm40 = pool_elt_at_index(lbm->snat_mappings, index40);
838 new_addr0 = sm40->src_ip.ip4.as_u32;
839 new_port0 = sm40->src_port;
840 vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm40->fib_index;
841 old_addr0 = ip40->src_address.as_u32;
842 ip40->src_address.as_u32 = new_addr0;
844 csum = ip40->checksum;
845 csum = ip_csum_sub_even (csum, old_addr0);
846 csum = ip_csum_add_even (csum, new_addr0);
847 ip40->checksum = ip_csum_fold (csum);
849 if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_TCP))
851 old_port0 = tcp0->src_port;
852 tcp0->src_port = new_port0;
854 csum = tcp0->checksum;
855 csum = ip_csum_sub_even (csum, old_addr0);
856 csum = ip_csum_sub_even (csum, old_port0);
857 csum = ip_csum_add_even (csum, new_addr0);
858 csum = ip_csum_add_even (csum, new_port0);
859 tcp0->checksum = ip_csum_fold (csum);
861 else if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_UDP))
863 old_port0 = udp0->src_port;
864 udp0->src_port = new_port0;
866 csum = udp0->checksum;
867 csum = ip_csum_sub_even (csum, old_addr0);
868 csum = ip_csum_sub_even (csum, old_port0);
869 csum = ip_csum_add_even (csum, new_addr0);
870 csum = ip_csum_add_even (csum, new_port0);
871 udp0->checksum = ip_csum_fold (csum);
874 pkts_processed += next0 != LB_NAT4_IN2OUT_NEXT_DROP;
879 ip6_address_t old_addr0, new_addr0;
880 lb_snat6_key_t key60;
881 lb_snat_mapping_t *sm60;
884 ip60 = vlib_buffer_get_current (b0);
885 udp0 = ip6_next_header (ip60);
886 tcp0 = (tcp_header_t *) udp0;
887 proto0 = lb_ip_proto_to_nat_proto (ip60->protocol);
889 key60.addr.as_u64[0] = ip60->src_address.as_u64[0];
890 key60.addr.as_u64[1] = ip60->src_address.as_u64[1];
891 key60.protocol = proto0;
892 key60.port = udp0->src_port;
893 key60.fib_index = rx_fib_index0;
895 if (lb_nat66_mapping_match (lbm, &key60, &index60))
897 next0 = LB_NAT6_IN2OUT_NEXT_DROP;
901 sm60 = pool_elt_at_index(lbm->snat_mappings, index60);
902 new_addr0.as_u64[0] = sm60->src_ip.as_u64[0];
903 new_addr0.as_u64[1] = sm60->src_ip.as_u64[1];
904 new_port0 = sm60->src_port;
905 vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm60->fib_index;
906 old_addr0.as_u64[0] = ip60->src_address.as_u64[0];
907 old_addr0.as_u64[1] = ip60->src_address.as_u64[1];
908 ip60->src_address.as_u64[0] = new_addr0.as_u64[0];
909 ip60->src_address.as_u64[1] = new_addr0.as_u64[1];
911 if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_TCP))
913 old_port0 = tcp0->src_port;
914 tcp0->src_port = new_port0;
916 csum = tcp0->checksum;
917 csum = ip_csum_sub_even (csum, old_addr0.as_u64[0]);
918 csum = ip_csum_sub_even (csum, old_addr0.as_u64[1]);
919 csum = ip_csum_add_even (csum, new_addr0.as_u64[0]);
920 csum = ip_csum_add_even (csum, new_addr0.as_u64[1]);
921 csum = ip_csum_sub_even (csum, old_port0);
922 csum = ip_csum_add_even (csum, new_port0);
923 tcp0->checksum = ip_csum_fold (csum);
925 else if (PREDICT_TRUE(proto0 == LB_NAT_PROTOCOL_UDP))
927 old_port0 = udp0->src_port;
928 udp0->src_port = new_port0;
930 csum = udp0->checksum;
931 csum = ip_csum_sub_even (csum, old_addr0.as_u64[0]);
932 csum = ip_csum_sub_even (csum, old_addr0.as_u64[1]);
933 csum = ip_csum_add_even (csum, new_addr0.as_u64[0]);
934 csum = ip_csum_add_even (csum, new_addr0.as_u64[1]);
935 csum = ip_csum_sub_even (csum, old_port0);
936 csum = ip_csum_add_even (csum, new_port0);
937 udp0->checksum = ip_csum_fold (csum);
940 pkts_processed += next0 != LB_NAT4_IN2OUT_NEXT_DROP;
943 trace0: if (PREDICT_FALSE(
944 (node->flags & VLIB_NODE_FLAG_TRACE) && (b0->flags & VLIB_BUFFER_IS_TRACED)))
946 lb_nat_trace_t *t = vlib_add_trace (vm, node, b0, sizeof(*t));
947 t->rx_sw_if_index = sw_if_index0;
948 t->next_index = next0;
951 /* verify speculative enqueue, maybe switch current next frame */
952 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
953 n_left_to_next, bi0, next0);
956 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
959 vlib_node_increment_counter (vm, stats_node_index,
960 LB_NAT_IN2OUT_ERROR_IN2OUT_PACKETS,
962 return frame->n_vectors;
966 lb6_gre6_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
967 vlib_frame_t * frame)
969 return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE6, 0, 0);
973 lb6_gre4_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
974 vlib_frame_t * frame)
976 return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE4, 0, 0);
980 lb4_gre6_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
981 vlib_frame_t * frame)
983 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE6, 0, 0);
987 lb4_gre4_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
988 vlib_frame_t * frame)
990 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE4, 0, 0);
994 lb6_gre6_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
995 vlib_frame_t * frame)
997 return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE6, 1, 0);
1001 lb6_gre4_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1002 vlib_frame_t * frame)
1004 return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE4, 1, 0);
1008 lb4_gre6_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1009 vlib_frame_t * frame)
1011 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE6, 1, 0);
1015 lb4_gre4_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1016 vlib_frame_t * frame)
1018 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE4, 1, 0);
1022 lb4_l3dsr_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1023 vlib_frame_t * frame)
1025 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_L3DSR, 0, 0);
1029 lb4_l3dsr_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1030 vlib_frame_t * frame)
1032 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_L3DSR, 1, 0);
1036 lb6_nat6_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1037 vlib_frame_t * frame)
1039 return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_NAT6, 1, 0);
1043 lb4_nat4_port_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1044 vlib_frame_t * frame)
1046 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_NAT4, 1, 0);
1050 lb6_gre6_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1051 vlib_frame_t *frame)
1053 return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE6, 0, 1);
1057 lb6_gre4_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1058 vlib_frame_t *frame)
1060 return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE4, 0, 1);
1064 lb4_gre6_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1065 vlib_frame_t *frame)
1067 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE6, 0, 1);
1071 lb4_gre4_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1072 vlib_frame_t *frame)
1074 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE4, 0, 1);
1078 lb6_gre6_port_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1079 vlib_frame_t *frame)
1081 return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE6, 1, 1);
1085 lb6_gre4_port_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1086 vlib_frame_t *frame)
1088 return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_GRE4, 1, 1);
1092 lb4_gre6_port_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1093 vlib_frame_t *frame)
1095 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE6, 1, 1);
1099 lb4_gre4_port_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1100 vlib_frame_t *frame)
1102 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_GRE4, 1, 1);
1106 lb4_l3dsr_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1107 vlib_frame_t *frame)
1109 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_L3DSR, 0, 1);
1113 lb4_l3dsr_port_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1114 vlib_frame_t *frame)
1116 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_L3DSR, 1, 1);
1120 lb6_nat6_port_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1121 vlib_frame_t *frame)
1123 return lb_node_fn (vm, node, frame, 0, LB_ENCAP_TYPE_NAT6, 1, 1);
1127 lb4_nat4_port_sticky_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
1128 vlib_frame_t *frame)
1130 return lb_node_fn (vm, node, frame, 1, LB_ENCAP_TYPE_NAT4, 1, 1);
1134 lb_nat4_in2out_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1135 vlib_frame_t * frame)
1137 return lb_nat_in2out_node_fn (vm, node, frame, 1);
1141 lb_nat6_in2out_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1142 vlib_frame_t * frame)
1144 return lb_nat_in2out_node_fn (vm, node, frame, 0);
1147 VLIB_REGISTER_NODE (lb6_gre6_node) =
1149 .function = lb6_gre6_node_fn,
1151 .vector_size = sizeof(u32),
1152 .format_trace = format_lb_trace,
1153 .n_errors = LB_N_ERROR,
1154 .error_strings = lb_error_strings,
1155 .n_next_nodes = LB_N_NEXT,
1157 { [LB_NEXT_DROP] = "error-drop" },
1160 VLIB_REGISTER_NODE (lb6_gre4_node) =
1162 .function = lb6_gre4_node_fn,
1164 .vector_size = sizeof(u32),
1165 .format_trace = format_lb_trace,
1166 .n_errors = LB_N_ERROR,
1167 .error_strings = lb_error_strings,
1168 .n_next_nodes = LB_N_NEXT,
1170 { [LB_NEXT_DROP] = "error-drop" },
1173 VLIB_REGISTER_NODE (lb4_gre6_node) =
1175 .function = lb4_gre6_node_fn,
1177 .vector_size = sizeof(u32),
1178 .format_trace = format_lb_trace,
1179 .n_errors = LB_N_ERROR,
1180 .error_strings = lb_error_strings,
1181 .n_next_nodes = LB_N_NEXT,
1183 { [LB_NEXT_DROP] = "error-drop" },
1186 VLIB_REGISTER_NODE (lb4_gre4_node) =
1188 .function = lb4_gre4_node_fn,
1190 .vector_size = sizeof(u32),
1191 .format_trace = format_lb_trace,
1192 .n_errors = LB_N_ERROR,
1193 .error_strings = lb_error_strings,
1194 .n_next_nodes = LB_N_NEXT,
1196 { [LB_NEXT_DROP] = "error-drop" },
1199 VLIB_REGISTER_NODE (lb6_gre6_port_node) =
1201 .function = lb6_gre6_port_node_fn,
1202 .name = "lb6-gre6-port",
1203 .vector_size = sizeof(u32),
1204 .format_trace = format_lb_trace,
1205 .n_errors = LB_N_ERROR,
1206 .error_strings = lb_error_strings,
1207 .n_next_nodes = LB_N_NEXT,
1209 { [LB_NEXT_DROP] = "error-drop" },
1212 VLIB_REGISTER_NODE (lb6_gre4_port_node) =
1214 .function = lb6_gre4_port_node_fn,
1215 .name = "lb6-gre4-port",
1216 .vector_size = sizeof(u32),
1217 .format_trace = format_lb_trace,
1218 .n_errors = LB_N_ERROR,
1219 .error_strings = lb_error_strings,
1220 .n_next_nodes = LB_N_NEXT,
1222 { [LB_NEXT_DROP] = "error-drop" },
1225 VLIB_REGISTER_NODE (lb4_gre6_port_node) =
1227 .function = lb4_gre6_port_node_fn,
1228 .name = "lb4-gre6-port",
1229 .vector_size = sizeof(u32),
1230 .format_trace = format_lb_trace,
1231 .n_errors = LB_N_ERROR,
1232 .error_strings = lb_error_strings,
1233 .n_next_nodes = LB_N_NEXT,
1235 { [LB_NEXT_DROP] = "error-drop" },
1238 VLIB_REGISTER_NODE (lb4_gre4_port_node) =
1240 .function = lb4_gre4_port_node_fn,
1241 .name = "lb4-gre4-port",
1242 .vector_size = sizeof(u32),
1243 .format_trace = format_lb_trace,
1244 .n_errors = LB_N_ERROR,
1245 .error_strings = lb_error_strings,
1246 .n_next_nodes = LB_N_NEXT,
1248 { [LB_NEXT_DROP] = "error-drop" },
1251 VLIB_REGISTER_NODE (lb4_l3dsr_port_node) =
1253 .function = lb4_l3dsr_port_node_fn,
1254 .name = "lb4-l3dsr-port",
1255 .vector_size = sizeof(u32),
1256 .format_trace = format_lb_trace,
1257 .n_errors = LB_N_ERROR,
1258 .error_strings = lb_error_strings,
1259 .n_next_nodes = LB_N_NEXT,
1261 { [LB_NEXT_DROP] = "error-drop" },
1264 VLIB_REGISTER_NODE (lb4_l3dsr_node) =
1266 .function = lb4_l3dsr_node_fn,
1267 .name = "lb4-l3dsr",
1268 .vector_size = sizeof(u32),
1269 .format_trace = format_lb_trace,
1270 .n_errors = LB_N_ERROR,
1271 .error_strings = lb_error_strings,
1272 .n_next_nodes = LB_N_NEXT,
1274 { [LB_NEXT_DROP] = "error-drop" },
1277 VLIB_REGISTER_NODE (lb6_nat6_port_node) =
1279 .function = lb6_nat6_port_node_fn,
1280 .name = "lb6-nat6-port",
1281 .vector_size = sizeof(u32),
1282 .format_trace = format_lb_trace,
1283 .n_errors = LB_N_ERROR,
1284 .error_strings = lb_error_strings,
1285 .n_next_nodes = LB_N_NEXT,
1287 { [LB_NEXT_DROP] = "error-drop" },
1290 VLIB_REGISTER_NODE (lb4_nat4_port_node) =
1292 .function = lb4_nat4_port_node_fn,
1293 .name = "lb4-nat4-port",
1294 .vector_size = sizeof(u32),
1295 .format_trace = format_lb_trace,
1296 .n_errors = LB_N_ERROR,
1297 .error_strings = lb_error_strings,
1298 .n_next_nodes = LB_N_NEXT,
1300 { [LB_NEXT_DROP] = "error-drop" },
1303 VLIB_REGISTER_NODE (lb6_gre6_sticky_node) = {
1304 .function = lb6_gre6_sticky_node_fn,
1305 .name = "lb6-gre6-sticky",
1306 .vector_size = sizeof (u32),
1307 .format_trace = format_lb_trace,
1308 .n_errors = LB_N_ERROR,
1309 .error_strings = lb_error_strings,
1310 .n_next_nodes = LB_N_NEXT,
1311 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1314 VLIB_REGISTER_NODE (lb6_gre4_sticky_node) = {
1315 .function = lb6_gre4_sticky_node_fn,
1316 .name = "lb6-gre4-sticky",
1317 .vector_size = sizeof (u32),
1318 .format_trace = format_lb_trace,
1319 .n_errors = LB_N_ERROR,
1320 .error_strings = lb_error_strings,
1321 .n_next_nodes = LB_N_NEXT,
1322 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1325 VLIB_REGISTER_NODE (lb4_gre6_sticky_node) = {
1326 .function = lb4_gre6_sticky_node_fn,
1327 .name = "lb4-gre6-sticky",
1328 .vector_size = sizeof (u32),
1329 .format_trace = format_lb_trace,
1330 .n_errors = LB_N_ERROR,
1331 .error_strings = lb_error_strings,
1332 .n_next_nodes = LB_N_NEXT,
1333 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1336 VLIB_REGISTER_NODE (lb4_gre4_sticky_node) = {
1337 .function = lb4_gre4_sticky_node_fn,
1338 .name = "lb4-gre4-sticky",
1339 .vector_size = sizeof (u32),
1340 .format_trace = format_lb_trace,
1341 .n_errors = LB_N_ERROR,
1342 .error_strings = lb_error_strings,
1343 .n_next_nodes = LB_N_NEXT,
1344 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1347 VLIB_REGISTER_NODE (lb6_gre6_port_sticky_node) = {
1348 .function = lb6_gre6_port_sticky_node_fn,
1349 .name = "lb6-gre6-port-sticky",
1350 .vector_size = sizeof (u32),
1351 .format_trace = format_lb_trace,
1352 .n_errors = LB_N_ERROR,
1353 .error_strings = lb_error_strings,
1354 .n_next_nodes = LB_N_NEXT,
1355 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1358 VLIB_REGISTER_NODE (lb6_gre4_port_sticky_node) = {
1359 .function = lb6_gre4_port_sticky_node_fn,
1360 .name = "lb6-gre4-port-sticky",
1361 .vector_size = sizeof (u32),
1362 .format_trace = format_lb_trace,
1363 .n_errors = LB_N_ERROR,
1364 .error_strings = lb_error_strings,
1365 .n_next_nodes = LB_N_NEXT,
1366 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1369 VLIB_REGISTER_NODE (lb4_gre6_port_sticky_node) = {
1370 .function = lb4_gre6_port_sticky_node_fn,
1371 .name = "lb4-gre6-port-sticky",
1372 .vector_size = sizeof (u32),
1373 .format_trace = format_lb_trace,
1374 .n_errors = LB_N_ERROR,
1375 .error_strings = lb_error_strings,
1376 .n_next_nodes = LB_N_NEXT,
1377 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1380 VLIB_REGISTER_NODE (lb4_gre4_port_sticky_node) = {
1381 .function = lb4_gre4_port_sticky_node_fn,
1382 .name = "lb4-gre4-port-sticky",
1383 .vector_size = sizeof (u32),
1384 .format_trace = format_lb_trace,
1385 .n_errors = LB_N_ERROR,
1386 .error_strings = lb_error_strings,
1387 .n_next_nodes = LB_N_NEXT,
1388 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1391 VLIB_REGISTER_NODE (lb4_l3dsr_port_sticky_node) = {
1392 .function = lb4_l3dsr_port_sticky_node_fn,
1393 .name = "lb4-l3dsr-port-sticky",
1394 .vector_size = sizeof (u32),
1395 .format_trace = format_lb_trace,
1396 .n_errors = LB_N_ERROR,
1397 .error_strings = lb_error_strings,
1398 .n_next_nodes = LB_N_NEXT,
1399 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1402 VLIB_REGISTER_NODE (lb4_l3dsr_sticky_node) = {
1403 .function = lb4_l3dsr_sticky_node_fn,
1404 .name = "lb4-l3dsr-sticky",
1405 .vector_size = sizeof (u32),
1406 .format_trace = format_lb_trace,
1407 .n_errors = LB_N_ERROR,
1408 .error_strings = lb_error_strings,
1409 .n_next_nodes = LB_N_NEXT,
1410 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1413 VLIB_REGISTER_NODE (lb6_nat6_port_sticky_node) = {
1414 .function = lb6_nat6_port_sticky_node_fn,
1415 .name = "lb6-nat6-port-sticky",
1416 .vector_size = sizeof (u32),
1417 .format_trace = format_lb_trace,
1418 .n_errors = LB_N_ERROR,
1419 .error_strings = lb_error_strings,
1420 .n_next_nodes = LB_N_NEXT,
1421 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1424 VLIB_REGISTER_NODE (lb4_nat4_port_sticky_node) = {
1425 .function = lb4_nat4_port_sticky_node_fn,
1426 .name = "lb4-nat4-port-sticky",
1427 .vector_size = sizeof (u32),
1428 .format_trace = format_lb_trace,
1429 .n_errors = LB_N_ERROR,
1430 .error_strings = lb_error_strings,
1431 .n_next_nodes = LB_N_NEXT,
1432 .next_nodes = { [LB_NEXT_DROP] = "error-drop" },
1436 lb4_nodeport_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1437 vlib_frame_t * frame)
1439 return lb_nodeport_node_fn (vm, node, frame, 1);
1443 lb6_nodeport_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1444 vlib_frame_t * frame)
1446 return lb_nodeport_node_fn (vm, node, frame, 0);
1449 VLIB_REGISTER_NODE (lb4_nodeport_node) =
1451 .function = lb4_nodeport_node_fn,
1452 .name = "lb4-nodeport",
1453 .vector_size = sizeof(u32),
1454 .format_trace = format_nodeport_lb_trace,
1455 .n_errors = LB_N_ERROR,
1456 .error_strings = lb_error_strings,
1457 .n_next_nodes = LB4_NODEPORT_N_NEXT,
1460 [LB4_NODEPORT_NEXT_IP4_NAT4] = "lb4-nat4-port",
1461 [LB4_NODEPORT_NEXT_DROP] = "error-drop",
1465 VLIB_REGISTER_NODE (lb6_nodeport_node) =
1467 .function = lb6_nodeport_node_fn,
1468 .name = "lb6-nodeport",
1469 .vector_size = sizeof(u32),
1470 .format_trace = format_nodeport_lb_trace,
1471 .n_errors = LB_N_ERROR,
1472 .error_strings = lb_error_strings,
1473 .n_next_nodes = LB6_NODEPORT_N_NEXT,
1476 [LB6_NODEPORT_NEXT_IP6_NAT6] = "lb6-nat6-port",
1477 [LB6_NODEPORT_NEXT_DROP] = "error-drop",
1481 VNET_FEATURE_INIT (lb_nat4_in2out_node_fn, static) =
1483 .arc_name = "ip4-unicast",
1484 .node_name = "lb-nat4-in2out",
1485 .runs_before = VNET_FEATURES("ip4-lookup"),
1488 VLIB_REGISTER_NODE (lb_nat4_in2out_node) =
1490 .function = lb_nat4_in2out_node_fn,
1491 .name = "lb-nat4-in2out",
1492 .vector_size = sizeof(u32),
1493 .format_trace = format_lb_nat_trace,
1494 .n_errors = LB_N_ERROR,
1495 .error_strings = lb_error_strings,
1496 .n_next_nodes = LB_NAT4_IN2OUT_N_NEXT,
1499 [LB_NAT4_IN2OUT_NEXT_DROP] = "error-drop",
1500 [LB_NAT4_IN2OUT_NEXT_LOOKUP] = "ip4-lookup",
1504 VNET_FEATURE_INIT (lb_nat6_in2out_node_fn, static) =
1506 .arc_name = "ip6-unicast",
1507 .node_name = "lb-nat6-in2out",
1508 .runs_before = VNET_FEATURES("ip6-lookup"),
1511 VLIB_REGISTER_NODE (lb_nat6_in2out_node) =
1513 .function = lb_nat6_in2out_node_fn,
1514 .name = "lb-nat6-in2out",
1515 .vector_size = sizeof(u32),
1516 .format_trace = format_lb_nat_trace,
1517 .n_errors = LB_N_ERROR,
1518 .error_strings = lb_error_strings,
1519 .n_next_nodes = LB_NAT6_IN2OUT_N_NEXT,
1522 [LB_NAT6_IN2OUT_NEXT_DROP] = "error-drop",
1523 [LB_NAT6_IN2OUT_NEXT_LOOKUP] = "ip6-lookup",