hs-test: logging improvements
[vpp.git] / src / vnet / dpo / replicate_dpo.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vnet/ip/lookup.h>
17 #include <vnet/dpo/replicate_dpo.h>
18 #include <vnet/dpo/drop_dpo.h>
19 #include <vnet/dpo/receive_dpo.h>
20 #include <vnet/adj/adj.h>
21 #include <vnet/mpls/mpls_types.h>
22
23 /**
24  * the logger
25  */
26 vlib_log_class_t replicate_logger;
27
28 #define REP_DBG(_rep, _fmt, _args...)                                   \
29 {                                                                       \
30     vlib_log_debug(replicate_logger,                                    \
31                    "rep:[%U]:" _fmt,                                    \
32                    format_replicate,                                    \
33                    replicate_get_index(_rep),                           \
34                    REPLICATE_FORMAT_NONE,                               \
35                    ##_args);                                            \
36 }
37
38 #define foreach_replicate_dpo_error                       \
39 _(BUFFER_ALLOCATION_FAILURE, "Buffer Allocation Failure")
40
41 typedef enum {
42 #define _(sym,str) REPLICATE_DPO_ERROR_##sym,
43   foreach_replicate_dpo_error
44 #undef _
45   REPLICATE_DPO_N_ERROR,
46 } replicate_dpo_error_t;
47
48 static char * replicate_dpo_error_strings[] = {
49 #define _(sym,string) string,
50   foreach_replicate_dpo_error
51 #undef _
52 };
53
54 /**
55  * Pool of all DPOs. It's not static so the DP can have fast access
56  */
57 replicate_t *replicate_pool;
58
59 /**
60  * The one instance of replicate main
61  */
62 replicate_main_t replicate_main = {
63     .repm_counters = {
64         .name = "mroutes",
65         .stat_segment_name = "/net/mroute",
66     },
67 };
68
69 static inline index_t
70 replicate_get_index (const replicate_t *rep)
71 {
72     return (rep - replicate_pool);
73 }
74
75 static inline dpo_id_t*
76 replicate_get_buckets (replicate_t *rep)
77 {
78     if (REP_HAS_INLINE_BUCKETS(rep))
79     {
80         return (rep->rep_buckets_inline);
81     }
82     else
83     {
84         return (rep->rep_buckets);
85     }
86 }
87
88 static replicate_t *
89 replicate_alloc_i (void)
90 {
91     replicate_t *rep;
92
93     pool_get_aligned(replicate_pool, rep, CLIB_CACHE_LINE_BYTES);
94     clib_memset(rep, 0, sizeof(*rep));
95
96     vlib_validate_combined_counter(&(replicate_main.repm_counters),
97                                    replicate_get_index(rep));
98     vlib_zero_combined_counter(&(replicate_main.repm_counters),
99                                replicate_get_index(rep));
100
101     return (rep);
102 }
103
104 static u8*
105 format_replicate_flags (u8 *s, va_list *args)
106 {
107     int flags = va_arg (*args, int);
108
109     if (flags == REPLICATE_FLAGS_NONE)
110     {
111         s = format (s, "none");
112     }
113     else if (flags & REPLICATE_FLAGS_HAS_LOCAL)
114     {
115         s = format (s, "has-local ");
116     }
117
118     return (s);
119 }
120
121 static u8*
122 replicate_format (index_t repi,
123                   replicate_format_flags_t flags,
124                   u32 indent,
125                   u8 *s)
126 {
127     vlib_counter_t to;
128     replicate_t *rep;
129     dpo_id_t *buckets;
130     u32 i;
131
132     repi &= ~MPLS_IS_REPLICATE;
133     rep = replicate_get(repi);
134     vlib_get_combined_counter(&(replicate_main.repm_counters), repi, &to);
135     buckets = replicate_get_buckets(rep);
136
137     s = format(s, "%U: ", format_dpo_type, DPO_REPLICATE);
138     s = format(s, "[index:%d buckets:%d ", repi, rep->rep_n_buckets);
139     s = format(s, "flags:[%U] ", format_replicate_flags, rep->rep_flags);
140     s = format(s, "to:[%Ld:%Ld]]", to.packets, to.bytes);
141
142     for (i = 0; i < rep->rep_n_buckets; i++)
143     {
144         s = format(s, "\n%U", format_white_space, indent+2);
145         s = format(s, "[%d]", i);
146         s = format(s, " %U", format_dpo_id, &buckets[i], indent+6);
147     }
148     return (s);
149 }
150
151 u8*
152 format_replicate (u8 * s, va_list * args)
153 {
154     index_t repi = va_arg(*args, index_t);
155     replicate_format_flags_t flags = va_arg(*args, replicate_format_flags_t);
156
157     return (replicate_format(repi, flags, 0, s));
158 }
159 static u8*
160 format_replicate_dpo (u8 * s, va_list * args)
161 {
162     index_t repi = va_arg(*args, index_t);
163     u32 indent = va_arg(*args, u32);
164
165     return (replicate_format(repi, REPLICATE_FORMAT_DETAIL, indent, s));
166 }
167
168
169 static replicate_t *
170 replicate_create_i (u32 num_buckets,
171                     dpo_proto_t rep_proto)
172 {
173     replicate_t *rep;
174
175     ASSERT (num_buckets <= REP_MAX_BUCKETS);
176
177     rep = replicate_alloc_i();
178     rep->rep_n_buckets = num_buckets;
179     rep->rep_proto = rep_proto;
180
181     if (!REP_HAS_INLINE_BUCKETS(rep))
182     {
183         vec_validate_aligned(rep->rep_buckets,
184                              rep->rep_n_buckets - 1,
185                              CLIB_CACHE_LINE_BYTES);
186     }
187
188     REP_DBG(rep, "create");
189
190     return (rep);
191 }
192
193 index_t
194 replicate_create (u32 n_buckets,
195                   dpo_proto_t rep_proto)
196 {
197     return (replicate_get_index(replicate_create_i(n_buckets, rep_proto)));
198 }
199
200 static inline void
201 replicate_set_bucket_i (replicate_t *rep,
202                         u32 bucket,
203                         dpo_id_t *buckets,
204                         const dpo_id_t *next)
205 {
206     if (dpo_is_receive(&buckets[bucket]))
207     {
208         rep->rep_flags &= ~REPLICATE_FLAGS_HAS_LOCAL;
209     }
210     if (dpo_is_receive(next))
211     {
212         rep->rep_flags |= REPLICATE_FLAGS_HAS_LOCAL;
213     }
214     dpo_stack(DPO_REPLICATE, rep->rep_proto, &buckets[bucket], next);
215 }
216
217 void
218 replicate_set_bucket (index_t repi,
219                       u32 bucket,
220                       const dpo_id_t *next)
221 {
222     replicate_t *rep;
223     dpo_id_t *buckets;
224
225     repi &= ~MPLS_IS_REPLICATE;
226     rep = replicate_get(repi);
227     buckets = replicate_get_buckets(rep);
228
229     ASSERT(bucket < rep->rep_n_buckets);
230
231     replicate_set_bucket_i(rep, bucket, buckets, next);
232 }
233
234 int
235 replicate_is_drop (const dpo_id_t *dpo)
236 {
237     replicate_t *rep;
238     index_t repi;
239
240     if (DPO_REPLICATE != dpo->dpoi_type)
241         return (0);
242
243     repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
244     rep = replicate_get(repi);
245
246     if (1 == rep->rep_n_buckets)
247     {
248         return (dpo_is_drop(replicate_get_bucket_i(rep, 0)));
249     }
250     return (0);
251 }
252
253 const dpo_id_t *
254 replicate_get_bucket (index_t repi,
255                       u32 bucket)
256 {
257     replicate_t *rep;
258
259     repi &= ~MPLS_IS_REPLICATE;
260     rep = replicate_get(repi);
261
262     return (replicate_get_bucket_i(rep, bucket));
263 }
264
265
266 static load_balance_path_t *
267 replicate_multipath_next_hop_fixup (load_balance_path_t *nhs,
268                                     dpo_proto_t drop_proto)
269 {
270     if (0 == vec_len(nhs))
271     {
272         load_balance_path_t *nh;
273
274         /*
275          * we need something for the replicate. so use the drop
276          */
277         vec_add2(nhs, nh, 1);
278
279         nh->path_weight = 1;
280         dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
281     }
282
283     return (nhs);
284 }
285
286 /*
287  * Fill in adjacencies in block based on corresponding
288  * next hop adjacencies.
289  */
290 static void
291 replicate_fill_buckets (replicate_t *rep,
292                         load_balance_path_t *nhs,
293                         dpo_id_t *buckets,
294                         u32 n_buckets)
295 {
296     load_balance_path_t * nh;
297     u16 bucket;
298
299     bucket = 0;
300
301     /*
302      * the next-hops have normalised weights. that means their sum is the number
303      * of buckets we need to fill.
304      */
305     vec_foreach (nh, nhs)
306     {
307         ASSERT(bucket < n_buckets);
308         replicate_set_bucket_i(rep, bucket++, buckets, &nh->path_dpo);
309     }
310 }
311
312 static inline void
313 replicate_set_n_buckets (replicate_t *rep,
314                          u32 n_buckets)
315 {
316   ASSERT (n_buckets <= REP_MAX_BUCKETS);
317   rep->rep_n_buckets = n_buckets;
318 }
319
320 void
321 replicate_multipath_update (const dpo_id_t *dpo,
322                             load_balance_path_t * next_hops)
323 {
324     load_balance_path_t * nh, * nhs;
325     dpo_id_t *tmp_dpo;
326     u32 ii, n_buckets;
327     replicate_t *rep;
328     index_t repi;
329
330     ASSERT(DPO_REPLICATE == dpo->dpoi_type);
331     repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
332     rep = replicate_get(repi);
333     nhs = replicate_multipath_next_hop_fixup(next_hops,
334                                              rep->rep_proto);
335     n_buckets = vec_len(nhs);
336
337     if (n_buckets > REP_MAX_BUCKETS)
338       {
339         vlib_log_err (replicate_logger,
340                       "Too many paths for replicate, truncating %d -> %d",
341                       n_buckets, REP_MAX_BUCKETS);
342         for (int i = REP_MAX_BUCKETS; i < n_buckets; i++)
343           dpo_reset (&vec_elt (nhs, i).path_dpo);
344         vec_set_len (nhs, REP_MAX_BUCKETS);
345         n_buckets = REP_MAX_BUCKETS;
346       }
347
348     if (0 == rep->rep_n_buckets)
349     {
350         /*
351          * first time initialisation. no packets inflight, so we can write
352          * at leisure.
353          */
354         replicate_set_n_buckets(rep, n_buckets);
355
356         if (!REP_HAS_INLINE_BUCKETS(rep))
357             vec_validate_aligned(rep->rep_buckets,
358                                  rep->rep_n_buckets - 1,
359                                  CLIB_CACHE_LINE_BYTES);
360
361         replicate_fill_buckets(rep, nhs,
362                                replicate_get_buckets(rep),
363                                n_buckets);
364     }
365     else
366     {
367         /*
368          * This is a modification of an existing replicate.
369          * We need to ensure that packets in flight see a consistent state, that
370          * is the number of reported buckets the REP has
371          * is not more than it actually has. So if the
372          * number of buckets is increasing, we must update the bucket array first,
373          * then the reported number. vice-versa if the number of buckets goes down.
374          */
375         if (n_buckets == rep->rep_n_buckets)
376         {
377             /*
378              * no change in the number of buckets. we can simply fill what
379              * is new over what is old.
380              */
381             replicate_fill_buckets(rep, nhs,
382                                    replicate_get_buckets(rep),
383                                    n_buckets);
384         }
385         else if (n_buckets > rep->rep_n_buckets)
386         {
387             /*
388              * we have more buckets. the old replicate map (if there is one)
389              * will remain valid, i.e. mapping to indices within range, so we
390              * update it last.
391              */
392             if (n_buckets > REP_NUM_INLINE_BUCKETS &&
393                 rep->rep_n_buckets <= REP_NUM_INLINE_BUCKETS)
394             {
395                 /*
396                  * the new increased number of buckets is crossing the threshold
397                  * from the inline storage to out-line. Alloc the outline buckets
398                  * first, then fixup the number. then reset the inlines.
399                  */
400                 ASSERT(NULL == rep->rep_buckets);
401                 vec_validate_aligned(rep->rep_buckets,
402                                      n_buckets - 1,
403                                      CLIB_CACHE_LINE_BYTES);
404
405                 replicate_fill_buckets(rep, nhs,
406                                        rep->rep_buckets,
407                                        n_buckets);
408                 CLIB_MEMORY_BARRIER();
409                 replicate_set_n_buckets(rep, n_buckets);
410
411                 CLIB_MEMORY_BARRIER();
412
413                 for (ii = 0; ii < REP_NUM_INLINE_BUCKETS; ii++)
414                 {
415                     dpo_reset(&rep->rep_buckets_inline[ii]);
416                 }
417             }
418             else
419             {
420                 if (n_buckets <= REP_NUM_INLINE_BUCKETS)
421                 {
422                     /*
423                      * we are not crossing the threshold and it's still inline buckets.
424                      * we can write the new on the old..
425                      */
426                     replicate_fill_buckets(rep, nhs,
427                                            replicate_get_buckets(rep),
428                                            n_buckets);
429                     CLIB_MEMORY_BARRIER();
430                     replicate_set_n_buckets(rep, n_buckets);
431                 }
432                 else
433                 {
434                     /*
435                      * we are not crossing the threshold. We need a new bucket array to
436                      * hold the increased number of choices.
437                      */
438                     dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
439
440                     new_buckets = NULL;
441                     old_buckets = replicate_get_buckets(rep);
442
443                     vec_validate_aligned(new_buckets,
444                                          n_buckets - 1,
445                                          CLIB_CACHE_LINE_BYTES);
446
447                     replicate_fill_buckets(rep, nhs, new_buckets, n_buckets);
448                     CLIB_MEMORY_BARRIER();
449                     rep->rep_buckets = new_buckets;
450                     CLIB_MEMORY_BARRIER();
451                     replicate_set_n_buckets(rep, n_buckets);
452
453                     vec_foreach(tmp_dpo, old_buckets)
454                     {
455                         dpo_reset(tmp_dpo);
456                     }
457                     vec_free(old_buckets);
458                 }
459             }
460         }
461         else
462         {
463             /*
464              * bucket size shrinkage.
465              */
466             if (n_buckets <= REP_NUM_INLINE_BUCKETS &&
467                 rep->rep_n_buckets > REP_NUM_INLINE_BUCKETS)
468             {
469                 /*
470                  * the new decreased number of buckets is crossing the threshold
471                  * from out-line storage to inline:
472                  *   1 - Fill the inline buckets,
473                  *   2 - fixup the number (and this point the inline buckets are
474                  *       used).
475                  *   3 - free the outline buckets
476                  */
477                 replicate_fill_buckets(rep, nhs,
478                                        rep->rep_buckets_inline,
479                                        n_buckets);
480                 CLIB_MEMORY_BARRIER();
481                 replicate_set_n_buckets(rep, n_buckets);
482                 CLIB_MEMORY_BARRIER();
483
484                 vec_foreach(tmp_dpo, rep->rep_buckets)
485                 {
486                     dpo_reset(tmp_dpo);
487                 }
488                 vec_free(rep->rep_buckets);
489             }
490             else
491             {
492                 /*
493                  * not crossing the threshold.
494                  *  1 - update the number to the smaller size
495                  *  2 - write the new buckets
496                  *  3 - reset those no longer used.
497                  */
498                 dpo_id_t *buckets;
499                 u32 old_n_buckets;
500
501                 old_n_buckets = rep->rep_n_buckets;
502                 buckets = replicate_get_buckets(rep);
503
504                 replicate_set_n_buckets(rep, n_buckets);
505                 CLIB_MEMORY_BARRIER();
506
507                 replicate_fill_buckets(rep, nhs,
508                                        buckets,
509                                        n_buckets);
510
511                 for (ii = n_buckets; ii < old_n_buckets; ii++)
512                 {
513                     dpo_reset(&buckets[ii]);
514                 }
515             }
516         }
517     }
518
519     vec_foreach (nh, nhs)
520     {
521         dpo_reset(&nh->path_dpo);
522     }
523     vec_free(nhs);
524 }
525
526 static void
527 replicate_lock (dpo_id_t *dpo)
528 {
529     replicate_t *rep;
530
531     rep = replicate_get(dpo->dpoi_index);
532
533     rep->rep_locks++;
534 }
535
536 index_t
537 replicate_dup (replicate_flags_t flags,
538                index_t repi)
539 {
540     replicate_t *rep, *copy;
541
542     rep = replicate_get(repi);
543
544     if (rep->rep_flags == flags ||
545         flags & REPLICATE_FLAGS_HAS_LOCAL)
546     {
547         /*
548          * we can include all the buckets from the original in the copy
549          */
550         return (repi);
551     }
552     else
553     {
554         /*
555          * caller doesn't want the local paths that the original has
556          */
557         if (rep->rep_n_buckets == 1)
558         {
559             /*
560              * original has only one bucket that is the local, so create
561              * a new one with only the drop
562              */
563             copy = replicate_create_i (1, rep->rep_proto);
564
565             replicate_set_bucket_i(copy, 0,
566                                    replicate_get_buckets(copy),
567                                    drop_dpo_get(rep->rep_proto));
568         }
569         else
570         {
571             dpo_id_t *old_buckets, *copy_buckets;
572             u16 bucket, pos;
573
574             copy = replicate_create_i(rep->rep_n_buckets - 1,
575                                       rep->rep_proto);
576
577             rep = replicate_get(repi);
578             old_buckets = replicate_get_buckets(rep);
579             copy_buckets = replicate_get_buckets(copy);
580             pos = 0;
581
582             for (bucket = 0; bucket < rep->rep_n_buckets; bucket++)
583             {
584                 if (!dpo_is_receive(&old_buckets[bucket]))
585                 {
586                     replicate_set_bucket_i(copy, pos, copy_buckets,
587                                            (&old_buckets[bucket]));
588                     pos++;
589                 }
590             }
591         }
592     }
593
594     return (replicate_get_index(copy));
595 }
596
597 static void
598 replicate_destroy (replicate_t *rep)
599 {
600     dpo_id_t *buckets;
601     int i;
602
603     buckets = replicate_get_buckets(rep);
604
605     for (i = 0; i < rep->rep_n_buckets; i++)
606     {
607         dpo_reset(&buckets[i]);
608     }
609
610     REP_DBG(rep, "destroy");
611     if (!REP_HAS_INLINE_BUCKETS(rep))
612     {
613         vec_free(rep->rep_buckets);
614     }
615
616     pool_put(replicate_pool, rep);
617 }
618
619 static void
620 replicate_unlock (dpo_id_t *dpo)
621 {
622     replicate_t *rep;
623
624     rep = replicate_get(dpo->dpoi_index);
625
626     rep->rep_locks--;
627
628     if (0 == rep->rep_locks)
629     {
630         replicate_destroy(rep);
631     }
632 }
633
634 static void
635 replicate_mem_show (void)
636 {
637     fib_show_memory_usage("replicate",
638                           pool_elts(replicate_pool),
639                           pool_len(replicate_pool),
640                           sizeof(replicate_t));
641 }
642
643 const static dpo_vft_t rep_vft = {
644     .dv_lock = replicate_lock,
645     .dv_unlock = replicate_unlock,
646     .dv_format = format_replicate_dpo,
647     .dv_mem_show = replicate_mem_show,
648 };
649
650 /**
651  * @brief The per-protocol VLIB graph nodes that are assigned to a replicate
652  *        object.
653  *
654  * this means that these graph nodes are ones from which a replicate is the
655  * parent object in the DPO-graph.
656  */
657 const static char* const replicate_ip4_nodes[] =
658 {
659     "ip4-replicate",
660     NULL,
661 };
662 const static char* const replicate_ip6_nodes[] =
663 {
664     "ip6-replicate",
665     NULL,
666 };
667 const static char* const replicate_mpls_nodes[] =
668 {
669     "mpls-replicate",
670     NULL,
671 };
672
673 const static char* const * const replicate_nodes[DPO_PROTO_NUM] =
674 {
675     [DPO_PROTO_IP4]  = replicate_ip4_nodes,
676     [DPO_PROTO_IP6]  = replicate_ip6_nodes,
677     [DPO_PROTO_MPLS] = replicate_mpls_nodes,
678 };
679
680 void
681 replicate_module_init (void)
682 {
683     dpo_register(DPO_REPLICATE, &rep_vft, replicate_nodes);
684     replicate_logger = vlib_log_register_class("dpo", "replicate");
685 }
686
687 static clib_error_t *
688 replicate_show (vlib_main_t * vm,
689                 unformat_input_t * input,
690                 vlib_cli_command_t * cmd)
691 {
692     index_t repi = INDEX_INVALID;
693
694     while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
695     {
696         if (unformat (input, "%d", &repi))
697             ;
698         else
699             break;
700     }
701
702     if (INDEX_INVALID != repi)
703     {
704             if (pool_is_free_index (replicate_pool, repi))
705                 vlib_cli_output (vm, "no such index %d", repi);
706             else
707                 vlib_cli_output (vm, "%U", format_replicate, repi,
708                          REPLICATE_FORMAT_DETAIL);
709     }
710     else
711     {
712         replicate_t *rep;
713
714         pool_foreach (rep, replicate_pool)
715          {
716             vlib_cli_output (vm, "%U", format_replicate,
717                              replicate_get_index(rep),
718                              REPLICATE_FORMAT_NONE);
719         }
720     }
721
722     return 0;
723 }
724
725 VLIB_CLI_COMMAND (replicate_show_command, static) = {
726     .path = "show replicate",
727     .short_help = "show replicate [<index>]",
728     .function = replicate_show,
729 };
730
731 typedef struct replicate_trace_t_
732 {
733     index_t rep_index;
734     dpo_id_t dpo;
735 } replicate_trace_t;
736
737 static uword
738 replicate_inline (vlib_main_t * vm,
739                   vlib_node_runtime_t * node,
740                   vlib_frame_t * frame)
741 {
742     vlib_combined_counter_main_t * cm = &replicate_main.repm_counters;
743     replicate_main_t * rm = &replicate_main;
744     u32 n_left_from, * from, * to_next, next_index;
745     u32 thread_index = vlib_get_thread_index();
746
747     from = vlib_frame_vector_args (frame);
748     n_left_from = frame->n_vectors;
749     next_index = node->cached_next_index;
750   
751     while (n_left_from > 0)
752     {
753         u32 n_left_to_next;
754
755         vlib_get_next_frame (vm, node, next_index,
756                              to_next, n_left_to_next);
757
758         while (n_left_from > 0 && n_left_to_next > 0)
759         {
760             u32 next0, ci0, bi0, bucket, repi0;
761             const replicate_t *rep0;
762             vlib_buffer_t * b0, *c0;
763             const dpo_id_t *dpo0;
764             u8 num_cloned;
765
766             bi0 = from[0];
767             from += 1;
768             n_left_from -= 1;
769
770             b0 = vlib_get_buffer (vm, bi0);
771             repi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
772             rep0 = replicate_get(repi0);
773
774             vlib_increment_combined_counter(
775                 cm, thread_index, repi0, 1,
776                 vlib_buffer_length_in_chain(vm, b0));
777
778             vec_validate (rm->clones[thread_index], rep0->rep_n_buckets - 1);
779
780             num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[thread_index],
781                                             rep0->rep_n_buckets,
782                                             VLIB_BUFFER_CLONE_HEAD_SIZE);
783
784             if (num_cloned != rep0->rep_n_buckets)
785               {
786                 vlib_node_increment_counter
787                   (vm, node->node_index,
788                    REPLICATE_DPO_ERROR_BUFFER_ALLOCATION_FAILURE, 1);
789               }
790
791             for (bucket = 0; bucket < num_cloned; bucket++)
792             {
793                 ci0 = rm->clones[thread_index][bucket];
794                 c0 = vlib_get_buffer(vm, ci0);
795
796                 to_next[0] = ci0;
797                 to_next += 1;
798                 n_left_to_next -= 1;
799
800                 dpo0 = replicate_get_bucket_i(rep0, bucket);
801                 next0 = dpo0->dpoi_next_node;
802                 vnet_buffer (c0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
803
804                 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
805                 {
806                     replicate_trace_t *t;
807
808                     t = vlib_add_trace (vm, node, c0, sizeof (*t));
809                     t->rep_index = repi0;
810                     t->dpo = *dpo0;
811                 }
812
813                 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
814                                                  to_next, n_left_to_next,
815                                                  ci0, next0);
816                 if (PREDICT_FALSE (n_left_to_next == 0))
817                   {
818                     vlib_put_next_frame (vm, node, next_index, n_left_to_next);
819                     vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
820                   }
821             }
822             vec_reset_length (rm->clones[thread_index]);
823         }
824
825         vlib_put_next_frame (vm, node, next_index, n_left_to_next);
826     }
827
828     return frame->n_vectors;
829 }
830
831 static u8 *
832 format_replicate_trace (u8 * s, va_list * args)
833 {
834   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
835   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
836   replicate_trace_t *t = va_arg (*args, replicate_trace_t *);
837
838   s = format (s, "replicate: %d via %U",
839               t->rep_index,
840               format_dpo_id, &t->dpo, 0);
841   return s;
842 }
843
844 static uword
845 ip4_replicate (vlib_main_t * vm,
846                vlib_node_runtime_t * node,
847                vlib_frame_t * frame)
848 {
849     return (replicate_inline (vm, node, frame));
850 }
851
852 /**
853  * @brief IP4 replication node
854  */
855 VLIB_REGISTER_NODE (ip4_replicate_node) = {
856   .function = ip4_replicate,
857   .name = "ip4-replicate",
858   .vector_size = sizeof (u32),
859
860   .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
861   .error_strings = replicate_dpo_error_strings,
862
863   .format_trace = format_replicate_trace,
864   .n_next_nodes = 1,
865   .next_nodes = {
866       [0] = "ip4-drop",
867   },
868 };
869
870 static uword
871 ip6_replicate (vlib_main_t * vm,
872                vlib_node_runtime_t * node,
873                vlib_frame_t * frame)
874 {
875     return (replicate_inline (vm, node, frame));
876 }
877
878 /**
879  * @brief IPv6 replication node
880  */
881 VLIB_REGISTER_NODE (ip6_replicate_node) = {
882   .function = ip6_replicate,
883   .name = "ip6-replicate",
884   .vector_size = sizeof (u32),
885
886   .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
887   .error_strings = replicate_dpo_error_strings,
888
889   .format_trace = format_replicate_trace,
890   .n_next_nodes = 1,
891   .next_nodes = {
892       [0] = "ip6-drop",
893   },
894 };
895
896 static uword
897 mpls_replicate (vlib_main_t * vm,
898                 vlib_node_runtime_t * node,
899                 vlib_frame_t * frame)
900 {
901     return (replicate_inline (vm, node, frame));
902 }
903
904 /**
905  * @brief MPLS replication node
906  */
907 VLIB_REGISTER_NODE (mpls_replicate_node) = {
908   .function = mpls_replicate,
909   .name = "mpls-replicate",
910   .vector_size = sizeof (u32),
911
912   .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
913   .error_strings = replicate_dpo_error_strings,
914
915   .format_trace = format_replicate_trace,
916   .n_next_nodes = 1,
917   .next_nodes = {
918       [0] = "mpls-drop",
919   },
920 };
921
922 clib_error_t *
923 replicate_dpo_init (vlib_main_t * vm)
924 {
925   replicate_main_t * rm = &replicate_main;
926
927   vec_validate (rm->clones, vlib_num_workers());
928
929   return 0;
930 }
931
932 VLIB_INIT_FUNCTION (replicate_dpo_init);