2 * Copyright (c) 2016 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or anated to in writing, software
10 * distributed under the License is distributed on an "POD IS" BPODIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/fib/ip4_fib.h>
19 #include <kubeproxy/kp.h>
20 #include <kubeproxy/kphash.h>
22 #define foreach_kp_error \
24 _(PROTO_NOT_SUPPORTED, "protocol not supported")
27 #define _(sym,str) KP_ERROR_##sym,
33 static char *kp_error_strings[] = {
34 #define _(sym,string) string,
47 } kp_nodeport_trace_t;
55 format_kp_trace (u8 * s, va_list * args)
57 kp_main_t *kpm = &kp_main;
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 kp_trace_t *t = va_arg (*args, kp_trace_t *);
61 if (pool_is_free_index(kpm->vips, t->vip_index)) {
62 s = format(s, "kp vip[%d]: This VIP was freed since capture\n");
64 s = format(s, "kp vip[%d]: %U\n", t->vip_index, format_kp_vip, &kpm->vips[t->vip_index]);
66 if (pool_is_free_index(kpm->pods, t->pod_index)) {
67 s = format(s, " kp pod[%d]: This POD was freed since capture");
69 s = format(s, " kp pod[%d]: %U", t->pod_index, format_kp_pod, &kpm->pods[t->pod_index]);
75 format_kp_nat_trace (u8 * s, va_list * args)
77 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
78 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
79 kp_nat_trace_t *t = va_arg (*args, kp_nat_trace_t *);
81 s = format(s, "kp nat: rx_sw_if_index = %d, next_index = %d",
82 t->rx_sw_if_index, t->next_index);
87 kp_hash_t *kp_get_sticky_table(u32 thread_index)
89 kp_main_t *kpm = &kp_main;
90 kp_hash_t *sticky_ht = kpm->per_cpu[thread_index].sticky_ht;
91 //Check if size changed
92 if (PREDICT_FALSE(sticky_ht && (kpm->per_cpu_sticky_buckets != kp_hash_nbuckets(sticky_ht))))
94 //Dereference everything in there
97 kp_hash_foreach_entry(sticky_ht, b, i) {
98 vlib_refcount_add(&kpm->pod_refcount, thread_index, b->value[i], -1);
99 vlib_refcount_add(&kpm->pod_refcount, thread_index, 0, 1);
102 kp_hash_free(sticky_ht);
106 //Create if necessary
107 if (PREDICT_FALSE(sticky_ht == NULL)) {
108 kpm->per_cpu[thread_index].sticky_ht = kp_hash_alloc(kpm->per_cpu_sticky_buckets, kpm->flow_timeout);
109 sticky_ht = kpm->per_cpu[thread_index].sticky_ht;
110 clib_warning("Regenerated sticky table %p", sticky_ht);
116 sticky_ht->timeout = kpm->flow_timeout;
121 kp_node_get_other_ports4(ip4_header_t *ip40)
127 kp_node_get_other_ports6(ip6_header_t *ip60)
132 static_always_inline u32
133 kp_node_get_hash(vlib_buffer_t *p, u8 is_input_v4)
140 ip40 = vlib_buffer_get_current (p);
141 if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP ||
142 ip40->protocol == IP_PROTOCOL_UDP))
143 ports = ((u64)((udp_header_t *)(ip40 + 1))->src_port << 16) |
144 ((u64)((udp_header_t *)(ip40 + 1))->dst_port);
146 ports = kp_node_get_other_ports4(ip40);
148 hash = kp_hash_hash(*((u64 *)&ip40->address_pair), ports,
154 ip60 = vlib_buffer_get_current (p);
156 if (PREDICT_TRUE (ip60->protocol == IP_PROTOCOL_TCP ||
157 ip60->protocol == IP_PROTOCOL_UDP))
158 ports = ((u64)((udp_header_t *)(ip60 + 1))->src_port << 16) |
159 ((u64)((udp_header_t *)(ip60 + 1))->dst_port);
161 ports = kp_node_get_other_ports6(ip60);
163 hash = kp_hash_hash(ip60->src_address.as_u64[0],
164 ip60->src_address.as_u64[1],
165 ip60->dst_address.as_u64[0],
166 ip60->dst_address.as_u64[1],
172 static_always_inline uword
173 kp_node_fn (vlib_main_t * vm,
174 vlib_node_runtime_t * node, vlib_frame_t * frame,
175 u8 is_input_v4, //Compile-time parameter stating that is input is v4 (or v6)
176 u8 is_nat_v4) //Compile-time parameter stating that is NAT is v4 (or v6)
178 kp_main_t *kpm = &kp_main;
179 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
180 u32 thread_index = vlib_get_thread_index();
181 u32 kp_time = kp_hash_time_now(vm);
183 kp_hash_t *sticky_ht = kp_get_sticky_table(thread_index);
184 from = vlib_frame_vector_args (frame);
185 n_left_from = frame->n_vectors;
186 next_index = node->cached_next_index;
189 if (PREDICT_TRUE(n_left_from > 0))
190 nexthash0 = kp_node_get_hash(vlib_get_buffer (vm, from[0]), is_input_v4);
192 while (n_left_from > 0)
194 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
195 while (n_left_from > 0 && n_left_to_next > 0)
201 u32 available_index0;
203 u32 hash0 = nexthash0;
205 if (PREDICT_TRUE(n_left_from > 1))
207 vlib_buffer_t *p1 = vlib_get_buffer (vm, from[1]);
208 //Compute next hash and prefetch bucket
209 nexthash0 = kp_node_get_hash(p1, is_input_v4);
210 kp_hash_prefetch_bucket(sticky_ht, nexthash0);
211 //Prefetch for encap, next
212 CLIB_PREFETCH (vlib_buffer_get_current(p1) - 64, 64, STORE);
215 if (PREDICT_TRUE(n_left_from > 2))
218 p2 = vlib_get_buffer(vm, from[2]);
219 /* prefetch packet header and data */
220 vlib_prefetch_buffer_header(p2, STORE);
221 CLIB_PREFETCH (vlib_buffer_get_current(p2), 64, STORE);
224 pi0 = to_next[0] = from[0];
230 p0 = vlib_get_buffer (vm, pi0);
231 vip0 = pool_elt_at_index (kpm->vips,
232 vnet_buffer (p0)->ip.adj_index[VLIB_TX]);
234 kp_hash_get(sticky_ht, hash0, vnet_buffer (p0)->ip.adj_index[VLIB_TX],
235 kp_time, &available_index0, &podindex0);
237 if (PREDICT_TRUE(podindex0 != ~0))
239 //Found an existing entry
240 counter = KP_VIP_COUNTER_NEXT_PACKET;
242 else if (PREDICT_TRUE(available_index0 != ~0))
244 //There is an available slot for a new flow
245 podindex0 = vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].pod_index;
246 counter = KP_VIP_COUNTER_FIRST_PACKET;
247 counter = (podindex0 == 0)?KP_VIP_COUNTER_NO_SERVER:counter;
249 //Dereference previously used
250 vlib_refcount_add(&kpm->pod_refcount, thread_index,
251 kp_hash_available_value(sticky_ht, hash0, available_index0), -1);
252 vlib_refcount_add(&kpm->pod_refcount, thread_index,
256 //Note that when there is no POD configured, an entry is configured anyway.
257 //But no configured POD is not something that should happen
258 kp_hash_put(sticky_ht, hash0, podindex0,
259 vnet_buffer (p0)->ip.adj_index[VLIB_TX],
260 available_index0, kp_time);
264 //Could not store new entry in the table
265 podindex0 = vip0->new_flow_table[hash0 & vip0->new_flow_table_mask].pod_index;
266 counter = KP_VIP_COUNTER_UNTRACKED_PACKET;
269 vlib_increment_simple_counter(&kpm->vip_counters[counter],
271 vnet_buffer (p0)->ip.adj_index[VLIB_TX],
277 if ( (is_input_v4==1) && (is_nat_v4==1) ) /* NAT44 */
280 ip40 = vlib_buffer_get_current(p0);
281 port0 = (udp_header_t *)(ip40 + 1);
282 ip40->dst_address = kpm->pods[podindex0].address.ip4;
283 ip40->checksum = ip4_header_checksum (ip40);
285 else if ( (is_input_v4==1) && (is_nat_v4==0) ) /* NAT46 */
290 ip40 = vlib_buffer_get_current(p0);
291 len0 = clib_net_to_host_u16(ip40->length);
293 vlib_buffer_advance(p0, (-sizeof(ip6_header_t)+sizeof(ip4_header_t)) );
295 ip60 = vlib_buffer_get_current(p0);
296 port0 = (udp_header_t *)(ip60 + 1);
297 ip60->payload_length = len0 - sizeof(ip4_header_t);
298 ip60->dst_address = kpm->pods[podindex0].address.ip6;
300 else if ( (is_input_v4==0) && (is_nat_v4==0) ) /* NAT66 */
303 ip60 = vlib_buffer_get_current(p0);
304 port0 = (udp_header_t *)(ip60 + 1);
305 ip60->dst_address = kpm->pods[podindex0].address.ip6;
312 ip60 = vlib_buffer_get_current(p0);
313 len0 = clib_net_to_host_u16(ip60->payload_length);
315 vlib_buffer_advance(p0, (sizeof(ip6_header_t)-sizeof(ip4_header_t)) );
317 ip40 = vlib_buffer_get_current(p0);
318 port0 = (udp_header_t *)(ip40 + 1);
319 ip40->length = len0 + sizeof(ip4_header_t);
320 ip40->dst_address = kpm->pods[podindex0].address.ip4;
321 ip40->checksum = ip4_header_checksum (ip40);
324 port0->dst_port = vip0->target_port;
327 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
329 kp_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
330 tr->pod_index = podindex0;
331 tr->vip_index = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
335 //Note that this is going to error if podindex0 == 0
336 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = kpm->pods[podindex0].dpo.dpoi_index;
337 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
339 kpm->pods[podindex0].dpo.dpoi_next_node);
341 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
344 return frame->n_vectors;
348 format_nodeport_kp_trace (u8 * s, va_list * args)
350 kp_main_t *kpm = &kp_main;
351 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
352 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
353 kp_nodeport_trace_t *t = va_arg (*args, kp_nodeport_trace_t *);
354 if (pool_is_free_index(kpm->vips, t->vip_index)) {
355 s = format(s, "kp vip[%d]: This VIP was freed since capture\n");
357 s = format(s, "kp vip[%d]: %U\n", t->vip_index, format_kp_vip, &kpm->vips[t->vip_index]);
360 s = format(s, " kp node_port: %d", t->node_port);
365 kp_nodeport_node_fn (vlib_main_t * vm,
366 vlib_node_runtime_t * node,
367 vlib_frame_t * frame,
370 kp_main_t *kpm = &kp_main;
371 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
373 from = vlib_frame_vector_args (frame);
374 n_left_from = frame->n_vectors;
375 next_index = node->cached_next_index;
378 while (n_left_from > 0)
380 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
382 while (n_left_from > 0 && n_left_to_next > 0)
386 udp_header_t * udp_0;
388 u32 next0 = KP_NODEPORT_NEXT_DROP;
391 if (PREDICT_TRUE(n_left_from > 1))
393 vlib_buffer_t *p1 = vlib_get_buffer (vm, from[1]);
394 //Prefetch for encap, next
395 CLIB_PREFETCH (vlib_buffer_get_current(p1) - 64, 64, STORE);
398 if (PREDICT_TRUE(n_left_from > 2))
401 p2 = vlib_get_buffer(vm, from[2]);
402 /* prefetch packet header and data */
403 vlib_prefetch_buffer_header(p2, STORE);
404 CLIB_PREFETCH (vlib_buffer_get_current(p2), 64, STORE);
407 pi0 = to_next[0] = from[0];
413 p0 = vlib_get_buffer (vm, pi0);
419 (p0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
420 ip40 = vlib_buffer_get_current(p0);
421 udp_0 = (udp_header_t *)(ip40 + 1);
427 (p0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
428 ip60 = vlib_buffer_get_current(p0);
429 udp_0 = (udp_header_t *)(ip60 + 1);
432 entry0 = hash_get_mem(kpm->nodeport_by_key, &(udp_0->dst_port));
437 next0 = KP_NODEPORT_NEXT_IP4_NAT4;
441 next0 = KP_NODEPORT_NEXT_IP6_NAT6;
444 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
446 kp_nodeport_trace_t *tr = vlib_add_trace (vm, node,
448 tr->vip_index = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
449 tr->node_port = (u32)clib_net_to_host_u16(udp_0->dst_port);
453 vnet_buffer(p0)->ip.adj_index[VLIB_TX] = entry0[0];
454 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
455 n_left_to_next, pi0, next0);
457 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
460 return frame->n_vectors;
465 * @brief Match NAT4 static mapping.
467 * @param sm NAT main.
468 * @param match Address and port to match.
469 * @param mapping External or local address and port of the matched mapping.
471 * @returns 0 if match found otherwise 1.
473 int kp_nat4_mapping_match (kp_main_t *kpm,
474 kp_snat4_key_t match,
475 kp_snat4_key_t * mapping)
477 clib_bihash_kv_8_8_t kv, value;
478 kp_snat_mapping_t *m;
479 kp_snat4_key_t m_key;
480 clib_bihash_8_8_t *mapping_hash = &kpm->mapping_by_pod;
482 m_key.addr = match.addr;
483 m_key.port = match.port;
484 m_key.protocol = match.protocol;
485 m_key.fib_index = match.fib_index;
487 kv.key = m_key.as_u64;
489 if (clib_bihash_search_8_8 (mapping_hash, &kv, &value))
494 m = pool_elt_at_index (kpm->snat_mappings, value.value);
496 if (m->svr_type == KP_SVR_TYPE_VIP_PORT)
498 mapping->addr = m->vip.ip4;
499 mapping->port = clib_host_to_net_u16 (m->port);
500 mapping->fib_index = m->fib_index;
501 mapping->protocol = match.protocol;
503 else if (m->svr_type == KP_SVR_TYPE_NODEIP_PORT)
505 mapping->addr = m->node_ip.ip4;
506 mapping->port = clib_host_to_net_u16 (m->node_port);
507 mapping->fib_index = m->fib_index;
508 mapping->protocol = match.protocol;
515 kp_nat4_in2out_node_fn (vlib_main_t * vm,
516 vlib_node_runtime_t * node,
517 vlib_frame_t * frame)
519 u32 n_left_from, * from, * to_next;
520 kp_nat4_in2out_next_t next_index;
521 u32 pkts_processed = 0;
522 kp_main_t *kpm = &kp_main;
523 u32 stats_node_index;
525 stats_node_index = kp_nat4_in2out_node.index;
527 from = vlib_frame_vector_args (frame);
528 n_left_from = frame->n_vectors;
529 next_index = node->cached_next_index;
531 while (n_left_from > 0)
535 vlib_get_next_frame (vm, node, next_index,
536 to_next, n_left_to_next);
538 while (n_left_from > 0 && n_left_to_next > 0)
546 u32 new_addr0, old_addr0;
547 u16 old_port0, new_port0;
550 kp_snat4_key_t key0, sm0;
554 /* speculatively enqueue b0 to the current next frame */
562 b0 = vlib_get_buffer (vm, bi0);
563 next0 = KP_NAT4_IN2OUT_NEXT_LOOKUP;
565 ip0 = vlib_buffer_get_current (b0);
566 udp0 = ip4_next_header (ip0);
567 tcp0 = (tcp_header_t *) udp0;
569 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
570 rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index(sw_if_index0);
572 proto0 = kp_ip_proto_to_nat_proto (ip0->protocol);
574 if (PREDICT_FALSE (proto0 == ~0))
577 key0.addr = ip0->src_address;
578 key0.protocol = proto0;
579 key0.port = udp0->src_port;
580 key0.fib_index = rx_fib_index0;
582 if (kp_nat4_mapping_match (kpm, key0, &sm0))
584 next0= KP_NAT4_IN2OUT_NEXT_DROP;
588 new_addr0 = sm0.addr.as_u32;
589 new_port0 = sm0.port;
590 vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
591 old_addr0 = ip0->src_address.as_u32;
592 ip0->src_address.as_u32 = new_addr0;
594 sum0 = ip0->checksum;
595 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
597 src_address /* changed member */);
598 ip0->checksum = ip_csum_fold (sum0);
600 if (PREDICT_FALSE(new_port0 != udp0->dst_port))
602 if (PREDICT_TRUE(proto0 == KP_NAT_PROTOCOL_TCP))
604 old_port0 = tcp0->src_port;
605 tcp0->src_port = new_port0;
607 sum0 = tcp0->checksum;
608 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
610 dst_address /* changed member */);
611 sum0 = ip_csum_update (sum0, old_port0, new_port0,
612 ip4_header_t /* cheat */,
613 length /* changed member */);
614 tcp0->checksum = ip_csum_fold(sum0);
618 old_port0 = udp0->src_port;
619 udp0->src_port = new_port0;
625 if (PREDICT_TRUE(proto0 == KP_NAT_PROTOCOL_TCP))
627 sum0 = tcp0->checksum;
628 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
630 dst_address /* changed member */);
631 tcp0->checksum = ip_csum_fold(sum0);
636 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
637 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
640 vlib_add_trace (vm, node, b0, sizeof (*t));
641 t->rx_sw_if_index = sw_if_index0;
642 t->next_index = next0;
645 pkts_processed += next0 != KP_NAT4_IN2OUT_NEXT_DROP;
647 /* verify speculative enqueue, maybe switch current next frame */
648 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
649 to_next, n_left_to_next,
653 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
656 vlib_node_increment_counter (vm, stats_node_index,
657 KP_NAT_IN2OUT_ERROR_IN2OUT_PACKETS,
659 return frame->n_vectors;
663 kp6_nat6_node_fn (vlib_main_t * vm,
664 vlib_node_runtime_t * node, vlib_frame_t * frame)
666 return kp_node_fn(vm, node, frame, 0, 0);
670 kp6_nat4_node_fn (vlib_main_t * vm,
671 vlib_node_runtime_t * node, vlib_frame_t * frame)
673 return kp_node_fn(vm, node, frame, 0, 1);
677 kp4_nat6_node_fn (vlib_main_t * vm,
678 vlib_node_runtime_t * node, vlib_frame_t * frame)
680 return kp_node_fn(vm, node, frame, 1, 0);
684 kp4_nat4_node_fn (vlib_main_t * vm,
685 vlib_node_runtime_t * node, vlib_frame_t * frame)
687 return kp_node_fn(vm, node, frame, 1, 1);
690 VLIB_REGISTER_NODE (kp6_nat6_node) =
692 .function = kp6_nat6_node_fn,
694 .vector_size = sizeof (u32),
695 .format_trace = format_kp_trace,
697 .n_errors = KP_N_ERROR,
698 .error_strings = kp_error_strings,
700 .n_next_nodes = KP_N_NEXT,
703 [KP_NEXT_DROP] = "error-drop"
707 VLIB_REGISTER_NODE (kp6_nat4_node) =
709 .function = kp6_nat4_node_fn,
711 .vector_size = sizeof (u32),
712 .format_trace = format_kp_trace,
714 .n_errors = KP_N_ERROR,
715 .error_strings = kp_error_strings,
717 .n_next_nodes = KP_N_NEXT,
720 [KP_NEXT_DROP] = "error-drop"
724 VLIB_REGISTER_NODE (kp4_nat6_node) =
726 .function = kp4_nat6_node_fn,
728 .vector_size = sizeof (u32),
729 .format_trace = format_kp_trace,
731 .n_errors = KP_N_ERROR,
732 .error_strings = kp_error_strings,
734 .n_next_nodes = KP_N_NEXT,
737 [KP_NEXT_DROP] = "error-drop"
741 VLIB_REGISTER_NODE (kp4_nat4_node) =
743 .function = kp4_nat4_node_fn,
745 .vector_size = sizeof (u32),
746 .format_trace = format_kp_trace,
748 .n_errors = KP_N_ERROR,
749 .error_strings = kp_error_strings,
751 .n_next_nodes = KP_N_NEXT,
754 [KP_NEXT_DROP] = "error-drop"
759 kp4_nodeport_node_fn (vlib_main_t * vm,
760 vlib_node_runtime_t * node,
761 vlib_frame_t * frame)
763 return kp_nodeport_node_fn(vm, node, frame, 1);
767 kp6_nodeport_node_fn (vlib_main_t * vm,
768 vlib_node_runtime_t * node,
769 vlib_frame_t * frame)
771 return kp_nodeport_node_fn(vm, node, frame, 0);
774 VLIB_REGISTER_NODE (kp4_nodeport_node) =
776 .function = kp4_nodeport_node_fn,
777 .name = "kp4-nodeport",
778 .vector_size = sizeof (u32),
779 .format_trace = format_nodeport_kp_trace,
781 .n_errors = KP_N_ERROR,
782 .error_strings = kp_error_strings,
784 .n_next_nodes = KP_NODEPORT_N_NEXT,
787 [KP_NODEPORT_NEXT_IP4_NAT4] = "kp4-nat4",
788 [KP_NODEPORT_NEXT_IP4_NAT6] = "kp4-nat6",
789 [KP_NODEPORT_NEXT_IP6_NAT4] = "kp6-nat4",
790 [KP_NODEPORT_NEXT_IP6_NAT6] = "kp6-nat6",
791 [KP_NODEPORT_NEXT_DROP] = "error-drop",
795 VLIB_REGISTER_NODE (kp6_nodeport_node) =
797 .function = kp6_nodeport_node_fn,
798 .name = "kp6-nodeport",
799 .vector_size = sizeof (u32),
800 .format_trace = format_nodeport_kp_trace,
802 .n_errors = KP_N_ERROR,
803 .error_strings = kp_error_strings,
805 .n_next_nodes = KP_NODEPORT_N_NEXT,
808 [KP_NODEPORT_NEXT_IP4_NAT4] = "kp4-nat4",
809 [KP_NODEPORT_NEXT_IP4_NAT6] = "kp4-nat6",
810 [KP_NODEPORT_NEXT_IP6_NAT4] = "kp6-nat4",
811 [KP_NODEPORT_NEXT_IP6_NAT6] = "kp6-nat6",
812 [KP_NODEPORT_NEXT_DROP] = "error-drop",
816 VNET_FEATURE_INIT (kp_nat4_in2out_node_fn, static) =
818 .arc_name = "ip4-unicast",
819 .node_name = "kp-nat4-in2out",
820 .runs_before = VNET_FEATURES ("ip4-lookup"),
823 VLIB_REGISTER_NODE (kp_nat4_in2out_node) =
825 .function = kp_nat4_in2out_node_fn,
826 .name = "kp-nat4-in2out",
827 .vector_size = sizeof (u32),
828 .format_trace = format_kp_nat_trace,
830 .n_errors = KP_N_ERROR,
831 .error_strings = kp_error_strings,
833 .n_next_nodes = KP_NAT4_IN2OUT_N_NEXT,
836 [KP_NAT4_IN2OUT_NEXT_DROP] = "error-drop",
837 [KP_NAT4_IN2OUT_NEXT_LOOKUP] = "ip4-lookup",