NAT: VPP-1552 code migration from old multiarch scheme
[vpp.git] / src / plugins / nat / nat64_in2out.c
index d30a9cb..11551f4 100644 (file)
@@ -70,10 +70,6 @@ format_nat64_in2out_reass_trace (u8 * s, va_list * args)
   return s;
 }
 
-vlib_node_registration_t nat64_in2out_node;
-vlib_node_registration_t nat64_in2out_slowpath_node;
-vlib_node_registration_t nat64_in2out_reass_node;
-vlib_node_registration_t nat64_in2out_handoff_node;
 
 #define foreach_nat64_in2out_error                       \
 _(UNSUPPORTED_PROTOCOL, "unsupported protocol")          \
@@ -970,11 +966,13 @@ nat64_in2out_node_fn_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
   u32 pkts_processed = 0;
   u32 stats_node_index;
   u32 thread_index = vm->thread_index;
+  nat64_main_t *nm = &nat64_main;
+
   u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets =
     0, fragments = 0;
 
   stats_node_index =
-    is_slow_path ? nat64_in2out_slowpath_node.index : nat64_in2out_node.index;
+    is_slow_path ? nm->in2out_slowpath_node_index : nm->in2out_node_index;
 
   from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
@@ -1171,16 +1169,15 @@ nat64_in2out_node_fn_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
   return frame->n_vectors;
 }
 
-static uword
-nat64_in2out_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
-                     vlib_frame_t * frame)
+VLIB_NODE_FN (nat64_in2out_node) (vlib_main_t * vm,
+                                 vlib_node_runtime_t * node,
+                                 vlib_frame_t * frame)
 {
   return nat64_in2out_node_fn_inline (vm, node, frame, 0);
 }
 
 /* *INDENT-OFF* */
 VLIB_REGISTER_NODE (nat64_in2out_node) = {
-  .function = nat64_in2out_node_fn,
   .name = "nat64-in2out",
   .vector_size = sizeof (u32),
   .format_trace = format_nat64_in2out_trace,
@@ -1199,18 +1196,15 @@ VLIB_REGISTER_NODE (nat64_in2out_node) = {
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_node, nat64_in2out_node_fn);
-
-static uword
-nat64_in2out_slowpath_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
-                              vlib_frame_t * frame)
+VLIB_NODE_FN (nat64_in2out_slowpath_node) (vlib_main_t * vm,
+                                          vlib_node_runtime_t * node,
+                                          vlib_frame_t * frame)
 {
   return nat64_in2out_node_fn_inline (vm, node, frame, 1);
 }
 
 /* *INDENT-OFF* */
 VLIB_REGISTER_NODE (nat64_in2out_slowpath_node) = {
-  .function = nat64_in2out_slowpath_node_fn,
   .name = "nat64-in2out-slowpath",
   .vector_size = sizeof (u32),
   .format_trace = format_nat64_in2out_trace,
@@ -1229,9 +1223,6 @@ VLIB_REGISTER_NODE (nat64_in2out_slowpath_node) = {
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_slowpath_node,
-                             nat64_in2out_slowpath_node_fn);
-
 typedef struct nat64_in2out_frag_set_ctx_t_
 {
   vlib_main_t *vm;
@@ -1379,9 +1370,9 @@ nat64_in2out_frag_hairpinning (vlib_buffer_t * b, ip6_header_t * ip6,
   return 0;
 }
 
-static uword
-nat64_in2out_reass_node_fn (vlib_main_t * vm,
-                           vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (nat64_in2out_reass_node) (vlib_main_t * vm,
+                                       vlib_node_runtime_t * node,
+                                       vlib_frame_t * frame)
 {
   u32 n_left_from, *from, *to_next;
   nat64_in2out_next_t next_index;
@@ -1648,10 +1639,10 @@ nat64_in2out_reass_node_fn (vlib_main_t * vm,
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
 
-  vlib_node_increment_counter (vm, nat64_in2out_reass_node.index,
+  vlib_node_increment_counter (vm, nm->in2out_reass_node_index,
                               NAT64_IN2OUT_ERROR_PROCESSED_FRAGMENTS,
                               pkts_processed);
-  vlib_node_increment_counter (vm, nat64_in2out_reass_node.index,
+  vlib_node_increment_counter (vm, nm->in2out_reass_node_index,
                               NAT64_IN2OUT_ERROR_CACHED_FRAGMENTS,
                               cached_fragments);
 
@@ -1666,7 +1657,6 @@ nat64_in2out_reass_node_fn (vlib_main_t * vm,
 
 /* *INDENT-OFF* */
 VLIB_REGISTER_NODE (nat64_in2out_reass_node) = {
-  .function = nat64_in2out_reass_node_fn,
   .name = "nat64-in2out-reass",
   .vector_size = sizeof (u32),
   .format_trace = format_nat64_in2out_reass_trace,
@@ -1685,9 +1675,6 @@ VLIB_REGISTER_NODE (nat64_in2out_reass_node) = {
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_reass_node,
-                             nat64_in2out_reass_node_fn);
-
 #define foreach_nat64_in2out_handoff_error                       \
 _(CONGESTION_DROP, "congestion drop")                            \
 _(SAME_WORKER, "same worker")                                    \
@@ -1726,9 +1713,9 @@ format_nat64_in2out_handoff_trace (u8 * s, va_list * args)
   return s;
 }
 
-static inline uword
-nat64_in2out_handoff_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
-                             vlib_frame_t * frame)
+VLIB_NODE_FN (nat64_in2out_handoff_node) (vlib_main_t * vm,
+                                         vlib_node_runtime_t * node,
+                                         vlib_frame_t * frame)
 {
   nat64_main_t *nm = &nat64_main;
   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
@@ -1793,7 +1780,6 @@ nat64_in2out_handoff_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
 
 /* *INDENT-OFF* */
 VLIB_REGISTER_NODE (nat64_in2out_handoff_node) = {
-  .function = nat64_in2out_handoff_node_fn,
   .name = "nat64-in2out-handoff",
   .vector_size = sizeof (u32),
   .format_trace = format_nat64_in2out_handoff_trace,
@@ -1809,9 +1795,6 @@ VLIB_REGISTER_NODE (nat64_in2out_handoff_node) = {
 };
 /* *INDENT-ON* */
 
-VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_handoff_node,
-                             nat64_in2out_handoff_node_fn);
-
 /*
  * fd.io coding-style-patch-verification: ON
  *