#include <nat/nat64.h>
#include <nat/nat_reass.h>
+#include <nat/nat_inlines.h>
#include <vnet/ip/ip6_to_ip4.h>
#include <vnet/fib/fib_table.h>
_(IN2OUT_PACKETS, "good in2out packets processed") \
_(NO_TRANSLATION, "no translation") \
_(UNKNOWN, "unknown") \
-_(DROP_FRAGMENT, "Drop fragment") \
-_(MAX_REASS, "Maximum reassemblies exceeded") \
-_(MAX_FRAG, "Maximum fragments per reassembly exceeded")
+_(DROP_FRAGMENT, "drop fragment") \
+_(MAX_REASS, "maximum reassemblies exceeded") \
+_(MAX_FRAG, "maximum fragments per reassembly exceeded") \
+_(TCP_PACKETS, "TCP packets") \
+_(UDP_PACKETS, "UDP packets") \
+_(ICMP_PACKETS, "ICMP packets") \
+_(OTHER_PACKETS, "other protocol packets") \
+_(FRAGMENTS, "fragments") \
+_(CACHED_FRAGMENTS, "cached fragments") \
+_(PROCESSED_FRAGMENTS, "processed fragments")
typedef enum
u32 thread_index;
} nat64_in2out_set_ctx_t;
+static inline u8
+nat64_not_translate (u32 sw_if_index, ip6_address_t ip6_addr)
+{
+ ip6_address_t *addr;
+ ip6_main_t *im6 = &ip6_main;
+ ip_lookup_main_t *lm6 = &im6->lookup_main;
+ ip_interface_address_t *ia = 0;
+
+ /* *INDENT-OFF* */
+ foreach_ip_interface_address (lm6, ia, sw_if_index, 0,
+ ({
+ addr = ip_interface_address_get_address (lm6, ia);
+ if (0 == ip6_address_compare (addr, &ip6_addr))
+ return 1;
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
/**
* @brief Check whether is a hairpinning.
*
bibe =
nat64_db_bib_entry_create (db, &ip6->src_address, &out_addr,
- sport, clib_host_to_net_u16 (out_port),
- fib_index, proto, 0);
+ sport, out_port, fib_index, proto, 0);
if (!bibe)
return -1;
+
+ vlib_set_simple_counter (&nm->total_bibs, ctx->thread_index, 0,
+ db->bib.bib_entries_num);
}
nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
&daddr.ip4, dport);
if (!ste)
return -1;
- }
- nat64_session_reset_timeout (ste, ctx->vm);
+ vlib_set_simple_counter (&nm->total_sessions, ctx->thread_index, 0,
+ db->st.st_entries_num);
+ }
ip4->src_address.as_u32 = bibe->out_addr.as_u32;
udp->src_port = bibe->out_port;
ip_csum_t csum;
tcp_header_t *tcp = ip6_next_header (ip6);
+ nat64_tcp_session_set_state (ste, tcp, 1);
checksum = &tcp->checksum;
csum = ip_csum_sub_even (*checksum, sport);
csum = ip_csum_add_even (csum, udp->src_port);
+ mss_clamping (nm->sm, tcp, &csum);
*checksum = ip_csum_fold (csum);
}
+ nat64_session_reset_timeout (ste, ctx->vm);
+
return 0;
}
bibe =
nat64_db_bib_entry_create (db, &ip6->src_address,
- &out_addr, in_id,
- clib_host_to_net_u16 (out_id),
+ &out_addr, in_id, out_id,
fib_index, IP_PROTOCOL_ICMP, 0);
if (!bibe)
return -1;
+
+ vlib_set_simple_counter (&nm->total_bibs, ctx->thread_index, 0,
+ db->bib.bib_entries_num);
}
nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
&daddr.ip4, 0);
if (!ste)
return -1;
+
+ vlib_set_simple_counter (&nm->total_sessions, ctx->thread_index, 0,
+ db->st.st_entries_num);
}
nat64_session_reset_timeout (ste, ctx->vm);
if (ip46_address_is_equal (&bibe->in_addr, &ctx->src_addr)
&& bibe->fib_index == ctx->fib_index)
{
- memset (&saddr, 0, sizeof (saddr));
+ clib_memset (&saddr, 0, sizeof (saddr));
saddr.ip4.as_u32 = bibe->out_addr.as_u32;
- memset (&daddr, 0, sizeof (daddr));
+ clib_memset (&daddr, 0, sizeof (daddr));
nat64_extract_ip4 (&ctx->dst_addr, &daddr.ip4, ctx->fib_index);
if (nat64_db_st_entry_find
nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
/* Verify if out address is not already in use for protocol */
- memset (&addr, 0, sizeof (addr));
+ clib_memset (&addr, 0, sizeof (addr));
addr.ip4.as_u32 = ctx.out_addr.as_u32;
if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
ctx.out_addr.as_u32 = 0;
0);
if (!bibe)
return -1;
+
+ vlib_set_simple_counter (&nm->total_bibs, s_ctx->thread_index, 0,
+ db->bib.bib_entries_num);
}
nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
nat64_db_st_entry_create (db, bibe, &ip6->dst_address, &daddr.ip4, 0);
if (!ste)
return -1;
+
+ vlib_set_simple_counter (&nm->total_sessions, s_ctx->thread_index, 0,
+ db->st.st_entries_num);
}
nat64_session_reset_timeout (ste, s_ctx->vm);
bibe =
nat64_db_bib_entry_create (db, &ip6->src_address, &out_addr,
- sport, clib_host_to_net_u16 (out_port),
- fib_index, proto, 0);
+ sport, out_port, fib_index, proto, 0);
if (!bibe)
return -1;
+
+ vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
+ db->bib.bib_entries_num);
}
nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
&daddr.ip4, dport);
if (!ste)
return -1;
+
+ vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
+ db->st.st_entries_num);
}
+ if (proto == IP_PROTOCOL_TCP)
+ nat64_tcp_session_set_state (ste, tcp, 1);
+
nat64_session_reset_timeout (ste, vm);
sport = udp->src_port = bibe->out_port;
nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
- memset (&daddr, 0, sizeof (daddr));
+ clib_memset (&daddr, 0, sizeof (daddr));
daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
bibe = 0;
dport = udp->dst_port = bibe->out_port;
nat64_compose_ip6 (&inner_ip6->dst_address, &bibe->out_addr, fib_index);
- memset (&saddr, 0, sizeof (saddr));
- memset (&daddr, 0, sizeof (daddr));
+ clib_memset (&saddr, 0, sizeof (saddr));
+ clib_memset (&daddr, 0, sizeof (daddr));
saddr.ip4.as_u32 = ste->out_r_addr.as_u32;
daddr.ip4.as_u32 = bibe->out_addr.as_u32;
nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
/* Verify if out address is not already in use for protocol */
- memset (&addr, 0, sizeof (addr));
+ clib_memset (&addr, 0, sizeof (addr));
addr.ip4.as_u32 = ctx.out_addr.as_u32;
if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
ctx.out_addr.as_u32 = 0;
0);
if (!bibe)
return -1;
+
+ vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
+ db->bib.bib_entries_num);
}
nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
nat64_db_st_entry_create (db, bibe, &ip6->dst_address, &daddr.ip4, 0);
if (!ste)
return -1;
+
+ vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
+ db->st.st_entries_num);
}
nat64_session_reset_timeout (ste, vm);
nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
- memset (&daddr, 0, sizeof (daddr));
+ clib_memset (&daddr, 0, sizeof (daddr));
daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
bibe = 0;
nat64_in2out_next_t next_index;
u32 pkts_processed = 0;
u32 stats_node_index;
- u32 thread_index = vlib_get_thread_index ();
+ u32 thread_index = vm->thread_index;
+ u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets =
+ 0, fragments = 0;
stats_node_index =
is_slow_path ? nat64_in2out_slowpath_node.index : nat64_in2out_node.index;
u8 l4_protocol0;
u32 proto0;
nat64_in2out_set_ctx_t ctx0;
+ u32 sw_if_index0;
/* speculatively enqueue b0 to the current next frame */
bi0 = from[0];
goto trace0;
}
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ if (nat64_not_translate (sw_if_index0, ip60->dst_address))
+ {
+ next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
+ goto trace0;
+ }
+
proto0 = ip_proto_to_snat_proto (l4_protocol0);
if (is_slow_path)
{
if (PREDICT_TRUE (proto0 == ~0))
{
+ other_packets++;
if (is_hairpinning (&ip60->dst_address))
{
next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
(ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION))
{
next0 = NAT64_IN2OUT_NEXT_REASS;
+ fragments++;
goto trace0;
}
if (proto0 == SNAT_PROTOCOL_ICMP)
{
+ icmp_packets++;
if (is_hairpinning (&ip60->dst_address))
{
next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
}
else if (proto0 == SNAT_PROTOCOL_TCP || proto0 == SNAT_PROTOCOL_UDP)
{
+ if (proto0 == SNAT_PROTOCOL_TCP)
+ tcp_packets++;
+ else
+ udp_packets++;
+
if (is_hairpinning (&ip60->dst_address))
{
next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
t->is_slow_path = is_slow_path;
}
- pkts_processed += next0 != NAT64_IN2OUT_NEXT_DROP;
+ pkts_processed += next0 == NAT64_IN2OUT_NEXT_IP4_LOOKUP;
/* verify speculative enqueue, maybe switch current next frame */
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
vlib_node_increment_counter (vm, stats_node_index,
NAT64_IN2OUT_ERROR_IN2OUT_PACKETS,
pkts_processed);
+ vlib_node_increment_counter (vm, stats_node_index,
+ NAT64_IN2OUT_ERROR_TCP_PACKETS, tcp_packets);
+ vlib_node_increment_counter (vm, stats_node_index,
+ NAT64_IN2OUT_ERROR_UDP_PACKETS, tcp_packets);
+ vlib_node_increment_counter (vm, stats_node_index,
+ NAT64_IN2OUT_ERROR_ICMP_PACKETS, icmp_packets);
+ vlib_node_increment_counter (vm, stats_node_index,
+ NAT64_IN2OUT_ERROR_OTHER_PACKETS,
+ other_packets);
+ vlib_node_increment_counter (vm, stats_node_index,
+ NAT64_IN2OUT_ERROR_FRAGMENTS, fragments);
+
return frame->n_vectors;
}
ip_csum_t csum;
tcp_header_t *tcp = (tcp_header_t *) udp;
+ nat64_tcp_session_set_state (ste, tcp, 1);
checksum = &tcp->checksum;
csum = ip_csum_sub_even (*checksum, tcp->src_port);
csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[0]);
if (!bibe)
return -1;
+ if (ctx->proto == IP_PROTOCOL_TCP)
+ nat64_tcp_session_set_state (ste, tcp, 1);
+
nat64_session_reset_timeout (ste, ctx->vm);
sport = bibe->out_port;
nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, bibe->fib_index);
- memset (&daddr, 0, sizeof (daddr));
+ clib_memset (&daddr, 0, sizeof (daddr));
daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
bibe = 0;
{
u32 n_left_from, *from, *to_next;
nat64_in2out_next_t next_index;
- u32 pkts_processed = 0;
+ u32 pkts_processed = 0, cached_fragments = 0;
u32 *fragments_to_drop = 0;
u32 *fragments_to_loopback = 0;
nat64_main_t *nm = &nat64_main;
- u32 thread_index = vlib_get_thread_index ();
+ u32 thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
ctx0.first_frag = 0;
if (PREDICT_FALSE (reass0->sess_index == (u32) ~ 0))
{
- if (nat_ip6_reass_add_fragment (reass0, bi0))
+ if (nat_ip6_reass_add_fragment
+ (reass0, bi0, &fragments_to_drop))
{
b0->error = node->errors[NAT64_IN2OUT_ERROR_MAX_FRAG];
next0 = NAT64_IN2OUT_NEXT_DROP;
nat64_db_bib_entry_create (db,
&ip60->src_address,
&out_addr0, udp0->src_port,
- clib_host_to_net_u16
- (out_port0), fib_index0,
+ out_port0, fib_index0,
l4_protocol0, 0);
if (!bibe0)
{
node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
goto trace0;
}
+ vlib_set_simple_counter (&nm->total_bibs, thread_index,
+ 0, db->bib.bib_entries_num);
}
nat64_extract_ip4 (&ip60->dst_address, &daddr0.ip4,
fib_index0);
node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
goto trace0;
}
+
+ vlib_set_simple_counter (&nm->total_sessions, thread_index,
+ 0, db->st.st_entries_num);
}
reass0->sess_index = nat64_db_st_entry_get_index (db, ste0);
{
n_left_to_next++;
to_next--;
+ cached_fragments++;
}
else
{
u32 len = vec_len (fragments_to_loopback);
if (len <= VLIB_FRAME_SIZE)
{
- clib_memcpy (from, fragments_to_loopback,
- sizeof (u32) * len);
+ clib_memcpy_fast (from, fragments_to_loopback,
+ sizeof (u32) * len);
n_left_from = len;
vec_reset_length (fragments_to_loopback);
}
else
{
- clib_memcpy (from,
- fragments_to_loopback + (len -
- VLIB_FRAME_SIZE),
- sizeof (u32) * VLIB_FRAME_SIZE);
+ clib_memcpy_fast (from, fragments_to_loopback +
+ (len - VLIB_FRAME_SIZE),
+ sizeof (u32) * VLIB_FRAME_SIZE);
n_left_from = VLIB_FRAME_SIZE;
_vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
}
}
vlib_node_increment_counter (vm, nat64_in2out_reass_node.index,
- NAT64_IN2OUT_ERROR_IN2OUT_PACKETS,
+ NAT64_IN2OUT_ERROR_PROCESSED_FRAGMENTS,
pkts_processed);
+ vlib_node_increment_counter (vm, nat64_in2out_reass_node.index,
+ NAT64_IN2OUT_ERROR_CACHED_FRAGMENTS,
+ cached_fragments);
nat_send_all_to_node (vm, fragments_to_drop, node,
&node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT],
VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_reass_node,
nat64_in2out_reass_node_fn);
+#define foreach_nat64_in2out_handoff_error \
+_(CONGESTION_DROP, "congestion drop") \
+_(SAME_WORKER, "same worker") \
+_(DO_HANDOFF, "do handoff")
+
+typedef enum
+{
+#define _(sym,str) NAT64_IN2OUT_HANDOFF_ERROR_##sym,
+ foreach_nat64_in2out_handoff_error
+#undef _
+ NAT64_IN2OUT_HANDOFF_N_ERROR,
+} nat64_in2out_handoff_error_t;
+
+static char *nat64_in2out_handoff_error_strings[] = {
+#define _(sym,string) string,
+ foreach_nat64_in2out_handoff_error
+#undef _
+};
+
typedef struct
{
u32 next_worker_index;
- u8 do_handoff;
} nat64_in2out_handoff_trace_t;
static u8 *
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
nat64_in2out_handoff_trace_t *t =
va_arg (*args, nat64_in2out_handoff_trace_t *);
- char *m;
- m = t->do_handoff ? "next worker" : "same worker";
- s = format (s, "NAT64-IN2OUT-HANDOFF: %s %d", m, t->next_worker_index);
+ s =
+ format (s, "NAT64-IN2OUT-HANDOFF: next-worker %d", t->next_worker_index);
return s;
}
vlib_frame_t * frame)
{
nat64_main_t *nm = &nat64_main;
- vlib_thread_main_t *tm = vlib_get_thread_main ();
- u32 n_left_from, *from, *to_next = 0, *to_next_drop = 0;
- static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index;
- static __thread vlib_frame_queue_t **congested_handoff_queue_by_worker_index
- = 0;
- vlib_frame_queue_elt_t *hf = 0;
- vlib_frame_queue_t *fq;
- vlib_frame_t *f = 0, *d = 0;
- int i;
- u32 n_left_to_next_worker = 0, *to_next_worker = 0;
- u32 next_worker_index = 0;
- u32 current_worker_index = ~0;
- u32 thread_index = vlib_get_thread_index ();
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+ u32 n_enq, n_left_from, *from;
+ u16 thread_indices[VLIB_FRAME_SIZE], *ti;
u32 fq_index;
- u32 to_node_index;
-
- fq_index = nm->fq_in2out_index;
- to_node_index = nat64_in2out_node.index;
-
- if (PREDICT_FALSE (handoff_queue_elt_by_worker_index == 0))
- {
- vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
-
- vec_validate_init_empty (congested_handoff_queue_by_worker_index,
- tm->n_vlib_mains - 1,
- (vlib_frame_queue_t *) (~0));
- }
+ u32 thread_index = vm->thread_index;
+ u32 do_handoff = 0, same_worker = 0;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+
+ b = bufs;
+ ti = thread_indices;
+
+ fq_index = nm->fq_in2out_index;
while (n_left_from > 0)
{
- u32 bi0;
- vlib_buffer_t *b0;
ip6_header_t *ip0;
- u8 do_handoff;
-
- bi0 = from[0];
- from += 1;
- n_left_from -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- ip0 = vlib_buffer_get_current (b0);
-
- next_worker_index = nat64_get_worker_in2out (&ip0->src_address);
-
- if (PREDICT_FALSE (next_worker_index != thread_index))
- {
- do_handoff = 1;
-
- if (next_worker_index != current_worker_index)
- {
- fq =
- is_vlib_frame_queue_congested (fq_index, next_worker_index,
- 30,
- congested_handoff_queue_by_worker_index);
-
- if (fq)
- {
- /* if this is 1st frame */
- if (!d)
- {
- d = vlib_get_frame_to_node (vm, nm->error_node_index);
- to_next_drop = vlib_frame_vector_args (d);
- }
-
- to_next_drop[0] = bi0;
- to_next_drop += 1;
- d->n_vectors++;
- goto trace0;
- }
-
- if (hf)
- hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
-
- hf =
- vlib_get_worker_handoff_queue_elt (fq_index,
- next_worker_index,
- handoff_queue_elt_by_worker_index);
- n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
- to_next_worker = &hf->buffer_index[hf->n_vectors];
- current_worker_index = next_worker_index;
- }
- ASSERT (to_next_worker != 0);
+ ip0 = vlib_buffer_get_current (b[0]);
+ ti[0] = nat64_get_worker_in2out (&ip0->src_address);
- /* enqueue to correct worker thread */
- to_next_worker[0] = bi0;
- to_next_worker++;
- n_left_to_next_worker--;
-
- if (n_left_to_next_worker == 0)
- {
- hf->n_vectors = VLIB_FRAME_SIZE;
- vlib_put_frame_queue_elt (hf);
- current_worker_index = ~0;
- handoff_queue_elt_by_worker_index[next_worker_index] = 0;
- hf = 0;
- }
- }
+ if (ti[0] != thread_index)
+ do_handoff++;
else
- {
- do_handoff = 0;
- /* if this is 1st frame */
- if (!f)
- {
- f = vlib_get_frame_to_node (vm, to_node_index);
- to_next = vlib_frame_vector_args (f);
- }
-
- to_next[0] = bi0;
- to_next += 1;
- f->n_vectors++;
- }
+ same_worker++;
- trace0:
if (PREDICT_FALSE
((node->flags & VLIB_NODE_FLAG_TRACE)
- && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
{
nat64_in2out_handoff_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->next_worker_index = next_worker_index;
- t->do_handoff = do_handoff;
+ vlib_add_trace (vm, node, b[0], sizeof (*t));
+ t->next_worker_index = ti[0];
}
- }
-
- if (f)
- vlib_put_frame_to_node (vm, to_node_index, f);
- if (d)
- vlib_put_frame_to_node (vm, nm->error_node_index, d);
+ n_left_from -= 1;
+ ti += 1;
+ b += 1;
+ }
- if (hf)
- hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
+ n_enq =
+ vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
+ frame->n_vectors, 1);
+
+ if (n_enq < frame->n_vectors)
+ vlib_node_increment_counter (vm, node->node_index,
+ NAT64_IN2OUT_HANDOFF_ERROR_CONGESTION_DROP,
+ frame->n_vectors - n_enq);
+ vlib_node_increment_counter (vm, node->node_index,
+ NAT64_IN2OUT_HANDOFF_ERROR_SAME_WORKER,
+ same_worker);
+ vlib_node_increment_counter (vm, node->node_index,
+ NAT64_IN2OUT_HANDOFF_ERROR_DO_HANDOFF,
+ do_handoff);
- /* Ship frames to the worker nodes */
- for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
- {
- if (handoff_queue_elt_by_worker_index[i])
- {
- hf = handoff_queue_elt_by_worker_index[i];
- /*
- * It works better to let the handoff node
- * rate-adapt, always ship the handoff queue element.
- */
- if (1 || hf->n_vectors == hf->last_n_vectors)
- {
- vlib_put_frame_queue_elt (hf);
- handoff_queue_elt_by_worker_index[i] = 0;
- }
- else
- hf->last_n_vectors = hf->n_vectors;
- }
- congested_handoff_queue_by_worker_index[i] =
- (vlib_frame_queue_t *) (~0);
- }
- hf = 0;
- current_worker_index = ~0;
return frame->n_vectors;
}
.vector_size = sizeof (u32),
.format_trace = format_nat64_in2out_handoff_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(nat64_in2out_handoff_error_strings),
+ .error_strings = nat64_in2out_handoff_error_strings,
.n_next_nodes = 1,