nat: respect arc features (multi worker)
[vpp.git] / src / plugins / nat / nat44_classify.c
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief Classify for one armed NAT44 (in+out interface)
18  */
19
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/fib/ip4_fib.h>
23 #include <nat/nat.h>
24 #include <nat/nat_reass.h>
25 #include <nat/nat_inlines.h>
26
27 #define foreach_nat44_classify_error                      \
28 _(MAX_REASS, "Maximum reassemblies exceeded")             \
29 _(MAX_FRAG, "Maximum fragments per reassembly exceeded")  \
30 _(NEXT_IN2OUT, "next in2out")                             \
31 _(NEXT_OUT2IN, "next out2in")                             \
32 _(FRAG_CACHED, "fragment cached")
33
34 typedef enum
35 {
36 #define _(sym,str) NAT44_CLASSIFY_ERROR_##sym,
37   foreach_nat44_classify_error
38 #undef _
39     NAT44_CLASSIFY_N_ERROR,
40 } nat44_classify_error_t;
41
42 static char *nat44_classify_error_strings[] = {
43 #define _(sym,string) string,
44   foreach_nat44_classify_error
45 #undef _
46 };
47
48 typedef enum
49 {
50   NAT44_CLASSIFY_NEXT_IN2OUT,
51   NAT44_CLASSIFY_NEXT_OUT2IN,
52   NAT44_CLASSIFY_NEXT_DROP,
53   NAT44_CLASSIFY_N_NEXT,
54 } nat44_classify_next_t;
55
56 typedef struct
57 {
58   u8 next_in2out;
59   u8 cached;
60 } nat44_classify_trace_t;
61
62 static u8 *
63 format_nat44_classify_trace (u8 * s, va_list * args)
64 {
65   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
66   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
67   nat44_classify_trace_t *t = va_arg (*args, nat44_classify_trace_t *);
68   char *next;
69
70   if (t->cached)
71     s = format (s, "nat44-classify: fragment cached");
72   else
73     {
74       next = t->next_in2out ? "nat44-in2out" : "nat44-out2in";
75       s = format (s, "nat44-classify: next %s", next);
76     }
77
78   return s;
79 }
80
81 static inline uword
82 nat44_classify_node_fn_inline (vlib_main_t * vm,
83                                vlib_node_runtime_t * node,
84                                vlib_frame_t * frame)
85 {
86   u32 n_left_from, *from, *to_next;
87   nat44_classify_next_t next_index;
88   snat_main_t *sm = &snat_main;
89   snat_static_mapping_t *m;
90   u32 thread_index = vm->thread_index;
91   u32 *fragments_to_drop = 0;
92   u32 *fragments_to_loopback = 0;
93   u32 next_in2out = 0, next_out2in = 0, frag_cached = 0;
94
95   from = vlib_frame_vector_args (frame);
96   n_left_from = frame->n_vectors;
97   next_index = node->cached_next_index;
98
99   while (n_left_from > 0)
100     {
101       u32 n_left_to_next;
102
103       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
104
105       while (n_left_from > 0 && n_left_to_next > 0)
106         {
107           u32 bi0;
108           vlib_buffer_t *b0;
109           u32 next0 = NAT44_CLASSIFY_NEXT_IN2OUT;
110           ip4_header_t *ip0;
111           snat_address_t *ap;
112           snat_session_key_t m_key0;
113           clib_bihash_kv_8_8_t kv0, value0;
114           udp_header_t *udp0;
115           nat_reass_ip4_t *reass0;
116           u8 cached0 = 0;
117
118           /* speculatively enqueue b0 to the current next frame */
119           bi0 = from[0];
120           to_next[0] = bi0;
121           from += 1;
122           to_next += 1;
123           n_left_from -= 1;
124           n_left_to_next -= 1;
125
126           b0 = vlib_get_buffer (vm, bi0);
127           ip0 = vlib_buffer_get_current (b0);
128           udp0 = ip4_next_header (ip0);
129
130           /* *INDENT-OFF* */
131           vec_foreach (ap, sm->addresses)
132             {
133               if (ip0->dst_address.as_u32 == ap->addr.as_u32)
134                 {
135                   next0 = NAT44_CLASSIFY_NEXT_OUT2IN;
136                   goto enqueue0;
137                 }
138             }
139           /* *INDENT-ON* */
140
141           if (PREDICT_FALSE (pool_elts (sm->static_mappings)))
142             {
143               m_key0.addr = ip0->dst_address;
144               m_key0.port = 0;
145               m_key0.protocol = 0;
146               m_key0.fib_index = 0;
147               kv0.key = m_key0.as_u64;
148               /* try to classify the fragment based on IP header alone */
149               if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external,
150                                            &kv0, &value0))
151                 {
152                   m = pool_elt_at_index (sm->static_mappings, value0.value);
153                   if (m->local_addr.as_u32 != m->external_addr.as_u32)
154                     next0 = NAT44_CLASSIFY_NEXT_OUT2IN;
155                   goto enqueue0;
156                 }
157               if (!ip4_is_fragment (ip0) || ip4_is_first_fragment (ip0))
158                 {
159                   /* process leading fragment/whole packet (with L4 header) */
160                   m_key0.port = clib_net_to_host_u16 (udp0->dst_port);
161                   m_key0.protocol = ip_proto_to_snat_proto (ip0->protocol);
162                   kv0.key = m_key0.as_u64;
163                   if (!clib_bihash_search_8_8
164                       (&sm->static_mapping_by_external, &kv0, &value0))
165                     {
166                       m =
167                         pool_elt_at_index (sm->static_mappings, value0.value);
168                       if (m->local_addr.as_u32 != m->external_addr.as_u32)
169                         next0 = NAT44_CLASSIFY_NEXT_OUT2IN;
170                     }
171                   if (ip4_is_fragment (ip0))
172                     {
173                       reass0 = nat_ip4_reass_find_or_create (ip0->src_address,
174                                                              ip0->dst_address,
175                                                              ip0->fragment_id,
176                                                              ip0->protocol,
177                                                              1,
178                                                              &fragments_to_drop);
179                       if (PREDICT_FALSE (!reass0))
180                         {
181                           next0 = NAT44_CLASSIFY_NEXT_DROP;
182                           b0->error =
183                             node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
184                           nat_elog_notice ("maximum reassemblies exceeded");
185                           goto enqueue0;
186                         }
187                       /* save classification for future fragments and set past
188                        * fragments to be looped over and reprocessed */
189                       if (next0 == NAT44_CLASSIFY_NEXT_OUT2IN)
190                         reass0->classify_next =
191                           NAT_REASS_IP4_CLASSIFY_NEXT_OUT2IN;
192                       else
193                         reass0->classify_next =
194                           NAT_REASS_IP4_CLASSIFY_NEXT_IN2OUT;
195                       nat_ip4_reass_get_frags (reass0,
196                                                &fragments_to_loopback);
197                     }
198                 }
199               else
200                 {
201                   /* process non-first fragment */
202                   reass0 = nat_ip4_reass_find_or_create (ip0->src_address,
203                                                          ip0->dst_address,
204                                                          ip0->fragment_id,
205                                                          ip0->protocol,
206                                                          1,
207                                                          &fragments_to_drop);
208                   if (PREDICT_FALSE (!reass0))
209                     {
210                       next0 = NAT44_CLASSIFY_NEXT_DROP;
211                       b0->error =
212                         node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
213                       nat_elog_notice ("maximum reassemblies exceeded");
214                       goto enqueue0;
215                     }
216                   if (reass0->classify_next == NAT_REASS_IP4_CLASSIFY_NONE)
217                     /* first fragment still hasn't arrived */
218                     {
219                       if (nat_ip4_reass_add_fragment
220                           (thread_index, reass0, bi0, &fragments_to_drop))
221                         {
222                           b0->error =
223                             node->errors[NAT44_CLASSIFY_ERROR_MAX_FRAG];
224                           nat_elog_notice
225                             ("maximum fragments per reassembly exceeded");
226                           next0 = NAT44_CLASSIFY_NEXT_DROP;
227                           goto enqueue0;
228                         }
229                       cached0 = 1;
230                       goto enqueue0;
231                     }
232                   else if (reass0->classify_next ==
233                            NAT_REASS_IP4_CLASSIFY_NEXT_OUT2IN)
234                     next0 = NAT44_CLASSIFY_NEXT_OUT2IN;
235                   else if (reass0->classify_next ==
236                            NAT_REASS_IP4_CLASSIFY_NEXT_IN2OUT)
237                     next0 = NAT44_CLASSIFY_NEXT_IN2OUT;
238                 }
239             }
240
241         enqueue0:
242           if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
243                              && (b0->flags & VLIB_BUFFER_IS_TRACED)))
244             {
245               nat44_classify_trace_t *t =
246                 vlib_add_trace (vm, node, b0, sizeof (*t));
247               t->cached = cached0;
248               if (!cached0)
249                 t->next_in2out = next0 == NAT44_CLASSIFY_NEXT_IN2OUT ? 1 : 0;
250             }
251
252           if (cached0)
253             {
254               n_left_to_next++;
255               to_next--;
256               frag_cached++;
257             }
258           else
259             {
260               next_in2out += next0 == NAT44_CLASSIFY_NEXT_IN2OUT;
261               next_out2in += next0 == NAT44_CLASSIFY_NEXT_OUT2IN;
262
263               /* verify speculative enqueue, maybe switch current next frame */
264               vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
265                                                to_next, n_left_to_next,
266                                                bi0, next0);
267             }
268
269           if (n_left_from == 0 && vec_len (fragments_to_loopback))
270             {
271               from = vlib_frame_vector_args (frame);
272               u32 len = vec_len (fragments_to_loopback);
273               if (len <= VLIB_FRAME_SIZE)
274                 {
275                   clib_memcpy_fast (from, fragments_to_loopback,
276                                     sizeof (u32) * len);
277                   n_left_from = len;
278                   vec_reset_length (fragments_to_loopback);
279                 }
280               else
281                 {
282                   clib_memcpy_fast (from, fragments_to_loopback +
283                                     (len - VLIB_FRAME_SIZE),
284                                     sizeof (u32) * VLIB_FRAME_SIZE);
285                   n_left_from = VLIB_FRAME_SIZE;
286                   _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
287                 }
288             }
289         }
290
291       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
292     }
293
294   nat_send_all_to_node (vm, fragments_to_drop, node, 0,
295                         NAT44_CLASSIFY_NEXT_DROP);
296
297   vec_free (fragments_to_drop);
298
299   vlib_node_increment_counter (vm, node->node_index,
300                                NAT44_CLASSIFY_ERROR_NEXT_IN2OUT, next_in2out);
301   vlib_node_increment_counter (vm, node->node_index,
302                                NAT44_CLASSIFY_ERROR_NEXT_OUT2IN, next_out2in);
303   vlib_node_increment_counter (vm, node->node_index,
304                                NAT44_CLASSIFY_ERROR_FRAG_CACHED, frag_cached);
305
306   return frame->n_vectors;
307 }
308
309 static inline uword
310 nat44_ed_classify_node_fn_inline (vlib_main_t * vm,
311                                   vlib_node_runtime_t * node,
312                                   vlib_frame_t * frame)
313 {
314   u32 n_left_from, *from, *to_next;
315   nat44_classify_next_t next_index;
316   snat_main_t *sm = &snat_main;
317   snat_static_mapping_t *m;
318   u32 thread_index = vm->thread_index;
319   snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
320   u32 *fragments_to_drop = 0;
321   u32 *fragments_to_loopback = 0;
322   u32 next_in2out = 0, next_out2in = 0, frag_cached = 0;
323   u8 in_loopback = 0;
324
325   from = vlib_frame_vector_args (frame);
326   n_left_from = frame->n_vectors;
327   next_index = node->cached_next_index;
328
329   while (n_left_from > 0)
330     {
331       u32 n_left_to_next;
332
333       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
334
335       while (n_left_from > 0 && n_left_to_next > 0)
336         {
337           u32 bi0;
338           vlib_buffer_t *b0;
339           u32 next0 =
340             NAT_NEXT_IN2OUT_ED_FAST_PATH, sw_if_index0, rx_fib_index0;
341           ip4_header_t *ip0;
342           snat_address_t *ap;
343           snat_session_key_t m_key0;
344           clib_bihash_kv_8_8_t kv0, value0;
345           clib_bihash_kv_16_8_t ed_kv0, ed_value0;
346           udp_header_t *udp0;
347           nat_reass_ip4_t *reass0;
348           u8 cached0 = 0;
349
350           /* speculatively enqueue b0 to the current next frame */
351           bi0 = from[0];
352           to_next[0] = bi0;
353           from += 1;
354           to_next += 1;
355           n_left_from -= 1;
356           n_left_to_next -= 1;
357
358           b0 = vlib_get_buffer (vm, bi0);
359           ip0 = vlib_buffer_get_current (b0);
360           udp0 = ip4_next_header (ip0);
361
362           if (!in_loopback)
363             {
364               u32 arc_next = 0;
365
366               vnet_feature_next (&arc_next, b0);
367               nat_buffer_opaque (b0)->arc_next = arc_next;
368             }
369
370           if (ip0->protocol != IP_PROTOCOL_ICMP)
371             {
372               if (!ip4_is_fragment (ip0) || ip4_is_first_fragment (ip0))
373                 {
374                   /* process leading fragment/whole packet (with L4 header) */
375                   sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
376                   rx_fib_index0 =
377                     fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
378                                                          sw_if_index0);
379                   make_ed_kv (&ed_kv0, &ip0->src_address,
380                               &ip0->dst_address, ip0->protocol,
381                               rx_fib_index0, udp0->src_port, udp0->dst_port);
382                   if (ip4_is_fragment (ip0))
383                     {
384                       reass0 =
385                         nat_ip4_reass_find_or_create (ip0->src_address,
386                                                       ip0->dst_address,
387                                                       ip0->fragment_id,
388                                                       ip0->protocol, 1,
389                                                       &fragments_to_drop);
390                       if (PREDICT_FALSE (!reass0))
391                         {
392                           next0 = NAT_NEXT_DROP;
393                           b0->error =
394                             node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
395                           nat_elog_notice ("maximum reassemblies exceeded");
396                           goto enqueue0;
397                         }
398                       if (!clib_bihash_search_16_8
399                           (&tsm->in2out_ed, &ed_kv0, &ed_value0))
400                         {
401                           /* session exists so classify as IN2OUT,
402                            * save this information for future fragments and set
403                            * past fragments to be looped over and reprocessed */
404                           reass0->sess_index = ed_value0.value;
405                           reass0->classify_next =
406                             NAT_REASS_IP4_CLASSIFY_NEXT_IN2OUT;
407                           nat_ip4_reass_get_frags (reass0,
408                                                    &fragments_to_loopback);
409                           goto enqueue0;
410                         }
411                       else
412                         {
413                           /* session doesn't exist so continue in the code,
414                            * save this information for future fragments and set
415                            * past fragments to be looped over and reprocessed */
416                           reass0->flags |=
417                             NAT_REASS_FLAG_CLASSIFY_ED_CONTINUE;
418                           nat_ip4_reass_get_frags (reass0,
419                                                    &fragments_to_loopback);
420                         }
421                     }
422                   else
423                     {
424                       /* process whole packet */
425                       if (!clib_bihash_search_16_8
426                           (&tsm->in2out_ed, &ed_kv0, &ed_value0))
427                         goto enqueue0;
428                       /* session doesn't exist so continue in code */
429                     }
430                 }
431               else
432                 {
433                   /* process non-first fragment */
434                   reass0 = nat_ip4_reass_find_or_create (ip0->src_address,
435                                                          ip0->dst_address,
436                                                          ip0->fragment_id,
437                                                          ip0->protocol,
438                                                          1,
439                                                          &fragments_to_drop);
440                   if (PREDICT_FALSE (!reass0))
441                     {
442                       next0 = NAT_NEXT_DROP;
443                       b0->error =
444                         node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
445                       nat_elog_notice ("maximum reassemblies exceeded");
446                       goto enqueue0;
447                     }
448                   /* check if first fragment has arrived */
449                   if (reass0->classify_next == NAT_REASS_IP4_CLASSIFY_NONE
450                       && !(reass0->flags &
451                            NAT_REASS_FLAG_CLASSIFY_ED_CONTINUE))
452                     {
453                       /* first fragment still hasn't arrived, cache this fragment */
454                       if (nat_ip4_reass_add_fragment
455                           (thread_index, reass0, bi0, &fragments_to_drop))
456                         {
457                           b0->error =
458                             node->errors[NAT44_CLASSIFY_ERROR_MAX_FRAG];
459                           nat_elog_notice
460                             ("maximum fragments per reassembly exceeded");
461                           next0 = NAT_NEXT_DROP;
462                           goto enqueue0;
463                         }
464                       cached0 = 1;
465                       goto enqueue0;
466                     }
467                   if (reass0->classify_next ==
468                       NAT_REASS_IP4_CLASSIFY_NEXT_IN2OUT)
469                     goto enqueue0;
470                   /* flag NAT_REASS_FLAG_CLASSIFY_ED_CONTINUE is set
471                    * so keep the default next0 and continue in code to
472                    * potentially find other classification for this packet */
473                 }
474             }
475
476           /* *INDENT-OFF* */
477           vec_foreach (ap, sm->addresses)
478             {
479               if (ip0->dst_address.as_u32 == ap->addr.as_u32)
480                 {
481                   next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
482                   goto enqueue0;
483                 }
484             }
485           /* *INDENT-ON* */
486
487           if (PREDICT_FALSE (pool_elts (sm->static_mappings)))
488             {
489               m_key0.addr = ip0->dst_address;
490               m_key0.port = 0;
491               m_key0.protocol = 0;
492               m_key0.fib_index = 0;
493               kv0.key = m_key0.as_u64;
494               /* try to classify the fragment based on IP header alone */
495               if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external,
496                                            &kv0, &value0))
497                 {
498                   m = pool_elt_at_index (sm->static_mappings, value0.value);
499                   if (m->local_addr.as_u32 != m->external_addr.as_u32)
500                     next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
501                   goto enqueue0;
502                 }
503               if (!ip4_is_fragment (ip0) || ip4_is_first_fragment (ip0))
504                 {
505                   /* process leading fragment/whole packet (with L4 header) */
506                   m_key0.port = clib_net_to_host_u16 (udp0->dst_port);
507                   m_key0.protocol = ip_proto_to_snat_proto (ip0->protocol);
508                   kv0.key = m_key0.as_u64;
509                   if (!clib_bihash_search_8_8
510                       (&sm->static_mapping_by_external, &kv0, &value0))
511                     {
512                       m =
513                         pool_elt_at_index (sm->static_mappings, value0.value);
514                       if (m->local_addr.as_u32 != m->external_addr.as_u32)
515                         next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
516                     }
517                   if (ip4_is_fragment (ip0))
518                     {
519                       reass0 = nat_ip4_reass_find_or_create (ip0->src_address,
520                                                              ip0->dst_address,
521                                                              ip0->fragment_id,
522                                                              ip0->protocol,
523                                                              1,
524                                                              &fragments_to_drop);
525                       if (PREDICT_FALSE (!reass0))
526                         {
527                           next0 = NAT_NEXT_DROP;
528                           b0->error =
529                             node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
530                           nat_elog_notice ("maximum reassemblies exceeded");
531                           goto enqueue0;
532                         }
533                       /* save classification for future fragments and set past
534                        * fragments to be looped over and reprocessed */
535                       if (next0 == NAT_NEXT_OUT2IN_ED_FAST_PATH)
536                         reass0->classify_next = NAT_NEXT_OUT2IN_ED_REASS;
537                       else
538                         reass0->classify_next = NAT_NEXT_IN2OUT_ED_REASS;
539                       nat_ip4_reass_get_frags (reass0,
540                                                &fragments_to_loopback);
541                     }
542                 }
543               else
544                 {
545                   /* process non-first fragment */
546                   reass0 = nat_ip4_reass_find_or_create (ip0->src_address,
547                                                          ip0->dst_address,
548                                                          ip0->fragment_id,
549                                                          ip0->protocol,
550                                                          1,
551                                                          &fragments_to_drop);
552                   if (PREDICT_FALSE (!reass0))
553                     {
554                       next0 = NAT_NEXT_DROP;
555                       b0->error =
556                         node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
557                       nat_elog_notice ("maximum reassemblies exceeded");
558                       goto enqueue0;
559                     }
560                   if (reass0->classify_next == NAT_REASS_IP4_CLASSIFY_NONE)
561                     /* first fragment still hasn't arrived */
562                     {
563                       if (nat_ip4_reass_add_fragment
564                           (thread_index, reass0, bi0, &fragments_to_drop))
565                         {
566                           b0->error =
567                             node->errors[NAT44_CLASSIFY_ERROR_MAX_FRAG];
568                           nat_elog_notice
569                             ("maximum fragments per reassembly exceeded");
570                           next0 = NAT_NEXT_DROP;
571                           goto enqueue0;
572                         }
573                       cached0 = 1;
574                       goto enqueue0;
575                     }
576                   else if (reass0->classify_next ==
577                            NAT_REASS_IP4_CLASSIFY_NEXT_OUT2IN)
578                     next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
579                   else if (reass0->classify_next ==
580                            NAT_REASS_IP4_CLASSIFY_NEXT_IN2OUT)
581                     next0 = NAT_NEXT_IN2OUT_ED_FAST_PATH;
582                 }
583             }
584
585         enqueue0:
586           if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
587                              && (b0->flags & VLIB_BUFFER_IS_TRACED)))
588             {
589               nat44_classify_trace_t *t =
590                 vlib_add_trace (vm, node, b0, sizeof (*t));
591               t->cached = cached0;
592               if (!cached0)
593                 t->next_in2out =
594                   next0 == NAT_NEXT_IN2OUT_ED_FAST_PATH ? 1 : 0;
595             }
596
597           if (cached0)
598             {
599               n_left_to_next++;
600               to_next--;
601               frag_cached++;
602             }
603           else
604             {
605               next_in2out += next0 == NAT_NEXT_IN2OUT_ED_FAST_PATH;
606               next_out2in += next0 == NAT_NEXT_OUT2IN_ED_FAST_PATH;
607
608               /* verify speculative enqueue, maybe switch current next frame */
609               vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
610                                                to_next, n_left_to_next,
611                                                bi0, next0);
612             }
613
614           if (n_left_from == 0 && vec_len (fragments_to_loopback))
615             {
616               in_loopback = 1;
617               from = vlib_frame_vector_args (frame);
618               u32 len = vec_len (fragments_to_loopback);
619               if (len <= VLIB_FRAME_SIZE)
620                 {
621                   clib_memcpy_fast (from, fragments_to_loopback,
622                                     sizeof (u32) * len);
623                   n_left_from = len;
624                   vec_reset_length (fragments_to_loopback);
625                 }
626               else
627                 {
628                   clib_memcpy_fast (from, fragments_to_loopback +
629                                     (len - VLIB_FRAME_SIZE),
630                                     sizeof (u32) * VLIB_FRAME_SIZE);
631                   n_left_from = VLIB_FRAME_SIZE;
632                   _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
633                 }
634             }
635         }
636
637       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
638     }
639
640   nat_send_all_to_node (vm, fragments_to_drop, node, 0,
641                         NAT44_CLASSIFY_NEXT_DROP);
642
643   vec_free (fragments_to_drop);
644
645   vlib_node_increment_counter (vm, node->node_index,
646                                NAT44_CLASSIFY_ERROR_NEXT_IN2OUT, next_in2out);
647   vlib_node_increment_counter (vm, node->node_index,
648                                NAT44_CLASSIFY_ERROR_NEXT_OUT2IN, next_out2in);
649   vlib_node_increment_counter (vm, node->node_index,
650                                NAT44_CLASSIFY_ERROR_FRAG_CACHED, frag_cached);
651
652   return frame->n_vectors;
653 }
654
655 VLIB_NODE_FN (nat44_classify_node) (vlib_main_t * vm,
656                                     vlib_node_runtime_t * node,
657                                     vlib_frame_t * frame)
658 {
659   return nat44_classify_node_fn_inline (vm, node, frame);
660 }
661
662 /* *INDENT-OFF* */
663 VLIB_REGISTER_NODE (nat44_classify_node) = {
664   .name = "nat44-classify",
665   .vector_size = sizeof (u32),
666   .format_trace = format_nat44_classify_trace,
667   .type = VLIB_NODE_TYPE_INTERNAL,
668   .n_errors = ARRAY_LEN(nat44_classify_error_strings),
669   .error_strings = nat44_classify_error_strings,
670   .n_next_nodes = NAT44_CLASSIFY_N_NEXT,
671   .next_nodes = {
672     [NAT44_CLASSIFY_NEXT_IN2OUT] = "nat44-in2out",
673     [NAT44_CLASSIFY_NEXT_OUT2IN] = "nat44-out2in",
674     [NAT44_CLASSIFY_NEXT_DROP] = "error-drop",
675   },
676 };
677 /* *INDENT-ON* */
678
679 VLIB_NODE_FN (nat44_ed_classify_node) (vlib_main_t * vm,
680                                        vlib_node_runtime_t * node,
681                                        vlib_frame_t * frame)
682 {
683   return nat44_ed_classify_node_fn_inline (vm, node, frame);
684 }
685
686 /* *INDENT-OFF* */
687 VLIB_REGISTER_NODE (nat44_ed_classify_node) = {
688   .name = "nat44-ed-classify",
689   .vector_size = sizeof (u32),
690   .sibling_of = "nat-default",
691   .format_trace = format_nat44_classify_trace,
692   .type = VLIB_NODE_TYPE_INTERNAL,
693 };
694 /* *INDENT-ON* */
695
696 VLIB_NODE_FN (nat44_det_classify_node) (vlib_main_t * vm,
697                                         vlib_node_runtime_t * node,
698                                         vlib_frame_t * frame)
699 {
700   return nat44_classify_node_fn_inline (vm, node, frame);
701 }
702
703 /* *INDENT-OFF* */
704 VLIB_REGISTER_NODE (nat44_det_classify_node) = {
705   .name = "nat44-det-classify",
706   .vector_size = sizeof (u32),
707   .format_trace = format_nat44_classify_trace,
708   .type = VLIB_NODE_TYPE_INTERNAL,
709   .n_next_nodes = NAT44_CLASSIFY_N_NEXT,
710   .next_nodes = {
711     [NAT44_CLASSIFY_NEXT_IN2OUT] = "nat44-det-in2out",
712     [NAT44_CLASSIFY_NEXT_OUT2IN] = "nat44-det-out2in",
713     [NAT44_CLASSIFY_NEXT_DROP] = "error-drop",
714   },
715 };
716 /* *INDENT-ON* */
717
718 VLIB_NODE_FN (nat44_handoff_classify_node) (vlib_main_t * vm,
719                                             vlib_node_runtime_t * node,
720                                             vlib_frame_t * frame)
721 {
722   return nat44_classify_node_fn_inline (vm, node, frame);
723 }
724
725 /* *INDENT-OFF* */
726 VLIB_REGISTER_NODE (nat44_handoff_classify_node) = {
727   .name = "nat44-handoff-classify",
728   .vector_size = sizeof (u32),
729   .format_trace = format_nat44_classify_trace,
730   .type = VLIB_NODE_TYPE_INTERNAL,
731   .n_next_nodes = NAT44_CLASSIFY_N_NEXT,
732   .next_nodes = {
733     [NAT44_CLASSIFY_NEXT_IN2OUT] = "nat44-in2out-worker-handoff",
734     [NAT44_CLASSIFY_NEXT_OUT2IN] = "nat44-out2in-worker-handoff",
735     [NAT44_CLASSIFY_NEXT_DROP] = "error-drop",
736   },
737 };
738
739 /* *INDENT-ON* */
740
741 /*
742  * fd.io coding-style-patch-verification: ON
743  *
744  * Local Variables:
745  * eval: (c-set-style "gnu")
746  * End:
747  */