mroute routers in the stats segment
[vpp.git] / src / vnet / dpo / replicate_dpo.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vnet/ip/lookup.h>
17 #include <vnet/dpo/replicate_dpo.h>
18 #include <vnet/dpo/drop_dpo.h>
19 #include <vnet/adj/adj.h>
20 #include <vnet/mpls/mpls_types.h>
21
22 #undef REP_DEBUG
23
24 #ifdef REP_DEBUG
25 #define REP_DBG(_rep, _fmt, _args...)                                   \
26 {                                                                       \
27     u8* _tmp =NULL;                                                     \
28     clib_warning("rep:[%s]:" _fmt,                                      \
29                  replicate_format(replicate_get_index((_rep)),          \
30                                   0, _tmp),                             \
31                  ##_args);                                              \
32     vec_free(_tmp);                                                     \
33 }
34 #else
35 #define REP_DBG(_p, _fmt, _args...)
36 #endif
37
38 #define foreach_replicate_dpo_error                       \
39 _(BUFFER_ALLOCATION_FAILURE, "Buffer Allocation Failure")
40
41 typedef enum {
42 #define _(sym,str) REPLICATE_DPO_ERROR_##sym,
43   foreach_replicate_dpo_error
44 #undef _
45   REPLICATE_DPO_N_ERROR,
46 } replicate_dpo_error_t;
47
48 static char * replicate_dpo_error_strings[] = {
49 #define _(sym,string) string,
50   foreach_replicate_dpo_error
51 #undef _
52 };
53
54 /**
55  * Pool of all DPOs. It's not static so the DP can have fast access
56  */
57 replicate_t *replicate_pool;
58
59 /**
60  * The one instance of replicate main
61  */
62 replicate_main_t replicate_main = {
63     .repm_counters = {
64         .name = "mroutes",
65         .stat_segment_name = "/net/mroute",
66     },
67 };
68
69 static inline index_t
70 replicate_get_index (const replicate_t *rep)
71 {
72     return (rep - replicate_pool);
73 }
74
75 static inline dpo_id_t*
76 replicate_get_buckets (replicate_t *rep)
77 {
78     if (REP_HAS_INLINE_BUCKETS(rep))
79     {
80         return (rep->rep_buckets_inline);
81     }
82     else
83     {
84         return (rep->rep_buckets);
85     }
86 }
87
88 static replicate_t *
89 replicate_alloc_i (void)
90 {
91     replicate_t *rep;
92
93     pool_get_aligned(replicate_pool, rep, CLIB_CACHE_LINE_BYTES);
94     memset(rep, 0, sizeof(*rep));
95
96     vlib_validate_combined_counter(&(replicate_main.repm_counters),
97                                    replicate_get_index(rep));
98     vlib_zero_combined_counter(&(replicate_main.repm_counters),
99                                replicate_get_index(rep));
100
101     return (rep);
102 }
103
104 static u8*
105 replicate_format (index_t repi,
106                   replicate_format_flags_t flags,
107                   u32 indent,
108                   u8 *s)
109 {
110     vlib_counter_t to;
111     replicate_t *rep;
112     dpo_id_t *buckets;
113     u32 i;
114
115     repi &= ~MPLS_IS_REPLICATE;
116     rep = replicate_get(repi);
117     vlib_get_combined_counter(&(replicate_main.repm_counters), repi, &to);
118     buckets = replicate_get_buckets(rep);
119
120     s = format(s, "%U: ", format_dpo_type, DPO_REPLICATE);
121     s = format(s, "[index:%d buckets:%d ", repi, rep->rep_n_buckets);
122     s = format(s, "to:[%Ld:%Ld]]", to.packets, to.bytes);
123
124     for (i = 0; i < rep->rep_n_buckets; i++)
125     {
126         s = format(s, "\n%U", format_white_space, indent+2);
127         s = format(s, "[%d]", i);
128         s = format(s, " %U", format_dpo_id, &buckets[i], indent+6);
129     }
130     return (s);
131 }
132
133 u8*
134 format_replicate (u8 * s, va_list * args)
135 {
136     index_t repi = va_arg(*args, index_t);
137     replicate_format_flags_t flags = va_arg(*args, replicate_format_flags_t);
138
139     return (replicate_format(repi, flags, 0, s));
140 }
141 static u8*
142 format_replicate_dpo (u8 * s, va_list * args)
143 {
144     index_t repi = va_arg(*args, index_t);
145     u32 indent = va_arg(*args, u32);
146
147     return (replicate_format(repi, REPLICATE_FORMAT_DETAIL, indent, s));
148 }
149
150
151 static replicate_t *
152 replicate_create_i (u32 num_buckets,
153                     dpo_proto_t rep_proto)
154 {
155     replicate_t *rep;
156
157     rep = replicate_alloc_i();
158     rep->rep_n_buckets = num_buckets;
159     rep->rep_proto = rep_proto;
160
161     if (!REP_HAS_INLINE_BUCKETS(rep))
162     {
163         vec_validate_aligned(rep->rep_buckets,
164                              rep->rep_n_buckets - 1,
165                              CLIB_CACHE_LINE_BYTES);
166     }
167
168     REP_DBG(rep, "create");
169
170     return (rep);
171 }
172
173 index_t
174 replicate_create (u32 n_buckets,
175                   dpo_proto_t rep_proto)
176 {
177     return (replicate_get_index(replicate_create_i(n_buckets, rep_proto)));
178 }
179
180 static inline void
181 replicate_set_bucket_i (replicate_t *rep,
182                         u32 bucket,
183                         dpo_id_t *buckets,
184                         const dpo_id_t *next)
185 {
186     dpo_stack(DPO_REPLICATE, rep->rep_proto, &buckets[bucket], next);
187 }
188
189 void
190 replicate_set_bucket (index_t repi,
191                       u32 bucket,
192                       const dpo_id_t *next)
193 {
194     replicate_t *rep;
195     dpo_id_t *buckets;
196
197     repi &= ~MPLS_IS_REPLICATE;
198     rep = replicate_get(repi);
199     buckets = replicate_get_buckets(rep);
200
201     ASSERT(bucket < rep->rep_n_buckets);
202
203     replicate_set_bucket_i(rep, bucket, buckets, next);
204 }
205
206 int
207 replicate_is_drop (const dpo_id_t *dpo)
208 {
209     replicate_t *rep;
210     index_t repi;
211
212     if (DPO_REPLICATE != dpo->dpoi_type)
213         return (0);
214
215     repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
216     rep = replicate_get(repi);
217
218     if (1 == rep->rep_n_buckets)
219     {
220         return (dpo_is_drop(replicate_get_bucket_i(rep, 0)));
221     }
222     return (0);
223 }
224
225 const dpo_id_t *
226 replicate_get_bucket (index_t repi,
227                       u32 bucket)
228 {
229     replicate_t *rep;
230
231     repi &= ~MPLS_IS_REPLICATE;
232     rep = replicate_get(repi);
233
234     return (replicate_get_bucket_i(rep, bucket));
235 }
236
237
238 static load_balance_path_t *
239 replicate_multipath_next_hop_fixup (load_balance_path_t *nhs,
240                                     dpo_proto_t drop_proto)
241 {
242     if (0 == vec_len(nhs))
243     {
244         load_balance_path_t *nh;
245
246         /*
247          * we need something for the replicate. so use the drop
248          */
249         vec_add2(nhs, nh, 1);
250
251         nh->path_weight = 1;
252         dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
253     }
254
255     return (nhs);
256 }
257
258 /*
259  * Fill in adjacencies in block based on corresponding
260  * next hop adjacencies.
261  */
262 static void
263 replicate_fill_buckets (replicate_t *rep,
264                         load_balance_path_t *nhs,
265                         dpo_id_t *buckets,
266                         u32 n_buckets)
267 {
268     load_balance_path_t * nh;
269     u16 bucket;
270
271     bucket = 0;
272
273     /*
274      * the next-hops have normalised weights. that means their sum is the number
275      * of buckets we need to fill.
276      */
277     vec_foreach (nh, nhs)
278     {
279         ASSERT(bucket < n_buckets);
280         replicate_set_bucket_i(rep, bucket++, buckets, &nh->path_dpo);
281     }
282 }
283
284 static inline void
285 replicate_set_n_buckets (replicate_t *rep,
286                          u32 n_buckets)
287 {
288     rep->rep_n_buckets = n_buckets;
289 }
290
291 void
292 replicate_multipath_update (const dpo_id_t *dpo,
293                             load_balance_path_t * next_hops)
294 {
295     load_balance_path_t * nh, * nhs;
296     dpo_id_t *tmp_dpo;
297     u32 ii, n_buckets;
298     replicate_t *rep;
299     index_t repi;
300
301     ASSERT(DPO_REPLICATE == dpo->dpoi_type);
302     repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
303     rep = replicate_get(repi);
304     nhs = replicate_multipath_next_hop_fixup(next_hops,
305                                              rep->rep_proto);
306     n_buckets = vec_len(nhs);
307
308     if (0 == rep->rep_n_buckets)
309     {
310         /*
311          * first time initialisation. no packets inflight, so we can write
312          * at leisure.
313          */
314         replicate_set_n_buckets(rep, n_buckets);
315
316         if (!REP_HAS_INLINE_BUCKETS(rep))
317             vec_validate_aligned(rep->rep_buckets,
318                                  rep->rep_n_buckets - 1,
319                                  CLIB_CACHE_LINE_BYTES);
320
321         replicate_fill_buckets(rep, nhs,
322                                replicate_get_buckets(rep),
323                                n_buckets);
324     }
325     else
326     {
327         /*
328          * This is a modification of an existing replicate.
329          * We need to ensure that packets in flight see a consistent state, that
330          * is the number of reported buckets the REP has
331          * is not more than it actually has. So if the
332          * number of buckets is increasing, we must update the bucket array first,
333          * then the reported number. vice-versa if the number of buckets goes down.
334          */
335         if (n_buckets == rep->rep_n_buckets)
336         {
337             /*
338              * no change in the number of buckets. we can simply fill what
339              * is new over what is old.
340              */
341             replicate_fill_buckets(rep, nhs,
342                                    replicate_get_buckets(rep),
343                                    n_buckets);
344         }
345         else if (n_buckets > rep->rep_n_buckets)
346         {
347             /*
348              * we have more buckets. the old replicate map (if there is one)
349              * will remain valid, i.e. mapping to indices within range, so we
350              * update it last.
351              */
352             if (n_buckets > REP_NUM_INLINE_BUCKETS &&
353                 rep->rep_n_buckets <= REP_NUM_INLINE_BUCKETS)
354             {
355                 /*
356                  * the new increased number of buckets is crossing the threshold
357                  * from the inline storage to out-line. Alloc the outline buckets
358                  * first, then fixup the number. then reset the inlines.
359                  */
360                 ASSERT(NULL == rep->rep_buckets);
361                 vec_validate_aligned(rep->rep_buckets,
362                                      n_buckets - 1,
363                                      CLIB_CACHE_LINE_BYTES);
364
365                 replicate_fill_buckets(rep, nhs,
366                                        rep->rep_buckets,
367                                        n_buckets);
368                 CLIB_MEMORY_BARRIER();
369                 replicate_set_n_buckets(rep, n_buckets);
370
371                 CLIB_MEMORY_BARRIER();
372
373                 for (ii = 0; ii < REP_NUM_INLINE_BUCKETS; ii++)
374                 {
375                     dpo_reset(&rep->rep_buckets_inline[ii]);
376                 }
377             }
378             else
379             {
380                 if (n_buckets <= REP_NUM_INLINE_BUCKETS)
381                 {
382                     /*
383                      * we are not crossing the threshold and it's still inline buckets.
384                      * we can write the new on the old..
385                      */
386                     replicate_fill_buckets(rep, nhs,
387                                            replicate_get_buckets(rep),
388                                            n_buckets);
389                     CLIB_MEMORY_BARRIER();
390                     replicate_set_n_buckets(rep, n_buckets);
391                 }
392                 else
393                 {
394                     /*
395                      * we are not crossing the threshold. We need a new bucket array to
396                      * hold the increased number of choices.
397                      */
398                     dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
399
400                     new_buckets = NULL;
401                     old_buckets = replicate_get_buckets(rep);
402
403                     vec_validate_aligned(new_buckets,
404                                          n_buckets - 1,
405                                          CLIB_CACHE_LINE_BYTES);
406
407                     replicate_fill_buckets(rep, nhs, new_buckets, n_buckets);
408                     CLIB_MEMORY_BARRIER();
409                     rep->rep_buckets = new_buckets;
410                     CLIB_MEMORY_BARRIER();
411                     replicate_set_n_buckets(rep, n_buckets);
412
413                     vec_foreach(tmp_dpo, old_buckets)
414                     {
415                         dpo_reset(tmp_dpo);
416                     }
417                     vec_free(old_buckets);
418                 }
419             }
420         }
421         else
422         {
423             /*
424              * bucket size shrinkage.
425              */
426             if (n_buckets <= REP_NUM_INLINE_BUCKETS &&
427                 rep->rep_n_buckets > REP_NUM_INLINE_BUCKETS)
428             {
429                 /*
430                  * the new decreased number of buckets is crossing the threshold
431                  * from out-line storage to inline:
432                  *   1 - Fill the inline buckets,
433                  *   2 - fixup the number (and this point the inline buckets are
434                  *       used).
435                  *   3 - free the outline buckets
436                  */
437                 replicate_fill_buckets(rep, nhs,
438                                        rep->rep_buckets_inline,
439                                        n_buckets);
440                 CLIB_MEMORY_BARRIER();
441                 replicate_set_n_buckets(rep, n_buckets);
442                 CLIB_MEMORY_BARRIER();
443
444                 vec_foreach(tmp_dpo, rep->rep_buckets)
445                 {
446                     dpo_reset(tmp_dpo);
447                 }
448                 vec_free(rep->rep_buckets);
449             }
450             else
451             {
452                 /*
453                  * not crossing the threshold.
454                  *  1 - update the number to the smaller size
455                  *  2 - write the new buckets
456                  *  3 - reset those no longer used.
457                  */
458                 dpo_id_t *buckets;
459                 u32 old_n_buckets;
460
461                 old_n_buckets = rep->rep_n_buckets;
462                 buckets = replicate_get_buckets(rep);
463
464                 replicate_set_n_buckets(rep, n_buckets);
465                 CLIB_MEMORY_BARRIER();
466
467                 replicate_fill_buckets(rep, nhs,
468                                        buckets,
469                                        n_buckets);
470
471                 for (ii = n_buckets; ii < old_n_buckets; ii++)
472                 {
473                     dpo_reset(&buckets[ii]);
474                 }
475             }
476         }
477     }
478
479     vec_foreach (nh, nhs)
480     {
481         dpo_reset(&nh->path_dpo);
482     }
483     vec_free(nhs);
484 }
485
486 static void
487 replicate_lock (dpo_id_t *dpo)
488 {
489     replicate_t *rep;
490
491     rep = replicate_get(dpo->dpoi_index);
492
493     rep->rep_locks++;
494 }
495
496 static void
497 replicate_destroy (replicate_t *rep)
498 {
499     dpo_id_t *buckets;
500     int i;
501
502     buckets = replicate_get_buckets(rep);
503
504     for (i = 0; i < rep->rep_n_buckets; i++)
505     {
506         dpo_reset(&buckets[i]);
507     }
508
509     REP_DBG(rep, "destroy");
510     if (!REP_HAS_INLINE_BUCKETS(rep))
511     {
512         vec_free(rep->rep_buckets);
513     }
514
515     pool_put(replicate_pool, rep);
516 }
517
518 static void
519 replicate_unlock (dpo_id_t *dpo)
520 {
521     replicate_t *rep;
522
523     rep = replicate_get(dpo->dpoi_index);
524
525     rep->rep_locks--;
526
527     if (0 == rep->rep_locks)
528     {
529         replicate_destroy(rep);
530     }
531 }
532
533 static void
534 replicate_mem_show (void)
535 {
536     fib_show_memory_usage("replicate",
537                           pool_elts(replicate_pool),
538                           pool_len(replicate_pool),
539                           sizeof(replicate_t));
540 }
541
542 const static dpo_vft_t rep_vft = {
543     .dv_lock = replicate_lock,
544     .dv_unlock = replicate_unlock,
545     .dv_format = format_replicate_dpo,
546     .dv_mem_show = replicate_mem_show,
547 };
548
549 /**
550  * @brief The per-protocol VLIB graph nodes that are assigned to a replicate
551  *        object.
552  *
553  * this means that these graph nodes are ones from which a replicate is the
554  * parent object in the DPO-graph.
555  */
556 const static char* const replicate_ip4_nodes[] =
557 {
558     "ip4-replicate",
559     NULL,
560 };
561 const static char* const replicate_ip6_nodes[] =
562 {
563     "ip6-replicate",
564     NULL,
565 };
566 const static char* const replicate_mpls_nodes[] =
567 {
568     "mpls-replicate",
569     NULL,
570 };
571
572 const static char* const * const replicate_nodes[DPO_PROTO_NUM] =
573 {
574     [DPO_PROTO_IP4]  = replicate_ip4_nodes,
575     [DPO_PROTO_IP6]  = replicate_ip6_nodes,
576     [DPO_PROTO_MPLS] = replicate_mpls_nodes,
577 };
578
579 void
580 replicate_module_init (void)
581 {
582     dpo_register(DPO_REPLICATE, &rep_vft, replicate_nodes);
583 }
584
585 static clib_error_t *
586 replicate_show (vlib_main_t * vm,
587                 unformat_input_t * input,
588                 vlib_cli_command_t * cmd)
589 {
590     index_t repi = INDEX_INVALID;
591
592     while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
593     {
594         if (unformat (input, "%d", &repi))
595             ;
596         else
597             break;
598     }
599
600     if (INDEX_INVALID != repi)
601     {
602         vlib_cli_output (vm, "%U", format_replicate, repi,
603                          REPLICATE_FORMAT_DETAIL);
604     }
605     else
606     {
607         replicate_t *rep;
608
609         pool_foreach(rep, replicate_pool,
610         ({
611             vlib_cli_output (vm, "%U", format_replicate,
612                              replicate_get_index(rep),
613                              REPLICATE_FORMAT_NONE);
614         }));
615     }
616
617     return 0;
618 }
619
620 VLIB_CLI_COMMAND (replicate_show_command, static) = {
621     .path = "show replicate",
622     .short_help = "show replicate [<index>]",
623     .function = replicate_show,
624 };
625
626 typedef struct replicate_trace_t_
627 {
628     index_t rep_index;
629     dpo_id_t dpo;
630 } replicate_trace_t;
631
632 static uword
633 replicate_inline (vlib_main_t * vm,
634                   vlib_node_runtime_t * node,
635                   vlib_frame_t * frame)
636 {
637     vlib_combined_counter_main_t * cm = &replicate_main.repm_counters;
638     replicate_main_t * rm = &replicate_main;
639     u32 n_left_from, * from, * to_next, next_index;
640     u32 thread_index = vlib_get_thread_index();
641
642     from = vlib_frame_vector_args (frame);
643     n_left_from = frame->n_vectors;
644     next_index = node->cached_next_index;
645   
646     while (n_left_from > 0)
647     {
648         u32 n_left_to_next;
649
650         vlib_get_next_frame (vm, node, next_index,
651                              to_next, n_left_to_next);
652
653         while (n_left_from > 0 && n_left_to_next > 0)
654         {
655             u32 next0, ci0, bi0, bucket, repi0;
656             const replicate_t *rep0;
657             vlib_buffer_t * b0, *c0;
658             const dpo_id_t *dpo0;
659             u8 num_cloned;
660
661             bi0 = from[0];
662             from += 1;
663             n_left_from -= 1;
664
665             b0 = vlib_get_buffer (vm, bi0);
666             repi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
667             rep0 = replicate_get(repi0);
668
669             vlib_increment_combined_counter(
670                 cm, thread_index, repi0, 1,
671                 vlib_buffer_length_in_chain(vm, b0));
672
673             vec_validate (rm->clones[thread_index], rep0->rep_n_buckets - 1);
674
675             num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[thread_index],
676                                             rep0->rep_n_buckets, 128);
677
678             if (num_cloned != rep0->rep_n_buckets)
679               {
680                 vlib_node_increment_counter
681                   (vm, node->node_index,
682                    REPLICATE_DPO_ERROR_BUFFER_ALLOCATION_FAILURE, 1);
683               }
684
685             for (bucket = 0; bucket < num_cloned; bucket++)
686             {
687                 ci0 = rm->clones[thread_index][bucket];
688                 c0 = vlib_get_buffer(vm, ci0);
689
690                 to_next[0] = ci0;
691                 to_next += 1;
692                 n_left_to_next -= 1;
693
694                 dpo0 = replicate_get_bucket_i(rep0, bucket);
695                 next0 = dpo0->dpoi_next_node;
696                 vnet_buffer (c0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
697
698                 if (PREDICT_FALSE(c0->flags & VLIB_BUFFER_IS_TRACED))
699                 {
700                     replicate_trace_t *t;
701
702                     vlib_trace_buffer (vm, node, next0, c0, 0);
703                     t = vlib_add_trace (vm, node, c0, sizeof (*t));
704                     t->rep_index = repi0;
705                     t->dpo = *dpo0;
706                 }
707
708                 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
709                                                  to_next, n_left_to_next,
710                                                  ci0, next0);
711                 if (PREDICT_FALSE (n_left_to_next == 0))
712                   {
713                     vlib_put_next_frame (vm, node, next_index, n_left_to_next);
714                     vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
715                   }
716             }
717             vec_reset_length (rm->clones[thread_index]);
718         }
719
720         vlib_put_next_frame (vm, node, next_index, n_left_to_next);
721     }
722
723     return frame->n_vectors;
724 }
725
726 static u8 *
727 format_replicate_trace (u8 * s, va_list * args)
728 {
729   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
730   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
731   replicate_trace_t *t = va_arg (*args, replicate_trace_t *);
732
733   s = format (s, "replicate: %d via %U",
734               t->rep_index,
735               format_dpo_id, &t->dpo, 0);
736   return s;
737 }
738
739 static uword
740 ip4_replicate (vlib_main_t * vm,
741                vlib_node_runtime_t * node,
742                vlib_frame_t * frame)
743 {
744     return (replicate_inline (vm, node, frame));
745 }
746
747 /**
748  * @brief IP4 replication node
749  */
750 VLIB_REGISTER_NODE (ip4_replicate_node) = {
751   .function = ip4_replicate,
752   .name = "ip4-replicate",
753   .vector_size = sizeof (u32),
754
755   .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
756   .error_strings = replicate_dpo_error_strings,
757
758   .format_trace = format_replicate_trace,
759   .n_next_nodes = 1,
760   .next_nodes = {
761       [0] = "ip4-drop",
762   },
763 };
764
765 static uword
766 ip6_replicate (vlib_main_t * vm,
767                vlib_node_runtime_t * node,
768                vlib_frame_t * frame)
769 {
770     return (replicate_inline (vm, node, frame));
771 }
772
773 /**
774  * @brief IPv6 replication node
775  */
776 VLIB_REGISTER_NODE (ip6_replicate_node) = {
777   .function = ip6_replicate,
778   .name = "ip6-replicate",
779   .vector_size = sizeof (u32),
780
781   .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
782   .error_strings = replicate_dpo_error_strings,
783
784   .format_trace = format_replicate_trace,
785   .n_next_nodes = 1,
786   .next_nodes = {
787       [0] = "ip6-drop",
788   },
789 };
790
791 static uword
792 mpls_replicate (vlib_main_t * vm,
793                 vlib_node_runtime_t * node,
794                 vlib_frame_t * frame)
795 {
796     return (replicate_inline (vm, node, frame));
797 }
798
799 /**
800  * @brief MPLS replication node
801  */
802 VLIB_REGISTER_NODE (mpls_replicate_node) = {
803   .function = mpls_replicate,
804   .name = "mpls-replicate",
805   .vector_size = sizeof (u32),
806
807   .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
808   .error_strings = replicate_dpo_error_strings,
809
810   .format_trace = format_replicate_trace,
811   .n_next_nodes = 1,
812   .next_nodes = {
813       [0] = "mpls-drop",
814   },
815 };
816
817 clib_error_t *
818 replicate_dpo_init (vlib_main_t * vm)
819 {
820   replicate_main_t * rm = &replicate_main;
821
822   vec_validate (rm->clones, vlib_num_workers());
823
824   return 0;
825 }
826
827 VLIB_INIT_FUNCTION (replicate_dpo_init);