c11 safe string handling support
[vpp.git] / src / vnet / dpo / replicate_dpo.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vnet/ip/lookup.h>
17 #include <vnet/dpo/replicate_dpo.h>
18 #include <vnet/dpo/drop_dpo.h>
19 #include <vnet/adj/adj.h>
20 #include <vnet/mpls/mpls_types.h>
21
22 /**
23  * the logger
24  */
25 vlib_log_class_t replicate_logger;
26
27 #define REP_DBG(_rep, _fmt, _args...)                                   \
28 {                                                                       \
29     vlib_log_debug(replicate_logger,                                    \
30                    "rep:[%U]:" _fmt,                                    \
31                    format_replicate,                                    \
32                    replicate_get_index(_rep),                           \
33                    REPLICATE_FORMAT_NONE,                               \
34                    ##_args);                                            \
35 }
36
37 #define foreach_replicate_dpo_error                       \
38 _(BUFFER_ALLOCATION_FAILURE, "Buffer Allocation Failure")
39
40 typedef enum {
41 #define _(sym,str) REPLICATE_DPO_ERROR_##sym,
42   foreach_replicate_dpo_error
43 #undef _
44   REPLICATE_DPO_N_ERROR,
45 } replicate_dpo_error_t;
46
47 static char * replicate_dpo_error_strings[] = {
48 #define _(sym,string) string,
49   foreach_replicate_dpo_error
50 #undef _
51 };
52
53 /**
54  * Pool of all DPOs. It's not static so the DP can have fast access
55  */
56 replicate_t *replicate_pool;
57
58 /**
59  * The one instance of replicate main
60  */
61 replicate_main_t replicate_main = {
62     .repm_counters = {
63         .name = "mroutes",
64         .stat_segment_name = "/net/mroute",
65     },
66 };
67
68 static inline index_t
69 replicate_get_index (const replicate_t *rep)
70 {
71     return (rep - replicate_pool);
72 }
73
74 static inline dpo_id_t*
75 replicate_get_buckets (replicate_t *rep)
76 {
77     if (REP_HAS_INLINE_BUCKETS(rep))
78     {
79         return (rep->rep_buckets_inline);
80     }
81     else
82     {
83         return (rep->rep_buckets);
84     }
85 }
86
87 static replicate_t *
88 replicate_alloc_i (void)
89 {
90     replicate_t *rep;
91
92     pool_get_aligned(replicate_pool, rep, CLIB_CACHE_LINE_BYTES);
93     clib_memset(rep, 0, sizeof(*rep));
94
95     vlib_validate_combined_counter(&(replicate_main.repm_counters),
96                                    replicate_get_index(rep));
97     vlib_zero_combined_counter(&(replicate_main.repm_counters),
98                                replicate_get_index(rep));
99
100     return (rep);
101 }
102
103 static u8*
104 replicate_format (index_t repi,
105                   replicate_format_flags_t flags,
106                   u32 indent,
107                   u8 *s)
108 {
109     vlib_counter_t to;
110     replicate_t *rep;
111     dpo_id_t *buckets;
112     u32 i;
113
114     repi &= ~MPLS_IS_REPLICATE;
115     rep = replicate_get(repi);
116     vlib_get_combined_counter(&(replicate_main.repm_counters), repi, &to);
117     buckets = replicate_get_buckets(rep);
118
119     s = format(s, "%U: ", format_dpo_type, DPO_REPLICATE);
120     s = format(s, "[index:%d buckets:%d ", repi, rep->rep_n_buckets);
121     s = format(s, "to:[%Ld:%Ld]]", to.packets, to.bytes);
122
123     for (i = 0; i < rep->rep_n_buckets; i++)
124     {
125         s = format(s, "\n%U", format_white_space, indent+2);
126         s = format(s, "[%d]", i);
127         s = format(s, " %U", format_dpo_id, &buckets[i], indent+6);
128     }
129     return (s);
130 }
131
132 u8*
133 format_replicate (u8 * s, va_list * args)
134 {
135     index_t repi = va_arg(*args, index_t);
136     replicate_format_flags_t flags = va_arg(*args, replicate_format_flags_t);
137
138     return (replicate_format(repi, flags, 0, s));
139 }
140 static u8*
141 format_replicate_dpo (u8 * s, va_list * args)
142 {
143     index_t repi = va_arg(*args, index_t);
144     u32 indent = va_arg(*args, u32);
145
146     return (replicate_format(repi, REPLICATE_FORMAT_DETAIL, indent, s));
147 }
148
149
150 static replicate_t *
151 replicate_create_i (u32 num_buckets,
152                     dpo_proto_t rep_proto)
153 {
154     replicate_t *rep;
155
156     rep = replicate_alloc_i();
157     rep->rep_n_buckets = num_buckets;
158     rep->rep_proto = rep_proto;
159
160     if (!REP_HAS_INLINE_BUCKETS(rep))
161     {
162         vec_validate_aligned(rep->rep_buckets,
163                              rep->rep_n_buckets - 1,
164                              CLIB_CACHE_LINE_BYTES);
165     }
166
167     REP_DBG(rep, "create");
168
169     return (rep);
170 }
171
172 index_t
173 replicate_create (u32 n_buckets,
174                   dpo_proto_t rep_proto)
175 {
176     return (replicate_get_index(replicate_create_i(n_buckets, rep_proto)));
177 }
178
179 static inline void
180 replicate_set_bucket_i (replicate_t *rep,
181                         u32 bucket,
182                         dpo_id_t *buckets,
183                         const dpo_id_t *next)
184 {
185     dpo_stack(DPO_REPLICATE, rep->rep_proto, &buckets[bucket], next);
186 }
187
188 void
189 replicate_set_bucket (index_t repi,
190                       u32 bucket,
191                       const dpo_id_t *next)
192 {
193     replicate_t *rep;
194     dpo_id_t *buckets;
195
196     repi &= ~MPLS_IS_REPLICATE;
197     rep = replicate_get(repi);
198     buckets = replicate_get_buckets(rep);
199
200     ASSERT(bucket < rep->rep_n_buckets);
201
202     replicate_set_bucket_i(rep, bucket, buckets, next);
203 }
204
205 int
206 replicate_is_drop (const dpo_id_t *dpo)
207 {
208     replicate_t *rep;
209     index_t repi;
210
211     if (DPO_REPLICATE != dpo->dpoi_type)
212         return (0);
213
214     repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
215     rep = replicate_get(repi);
216
217     if (1 == rep->rep_n_buckets)
218     {
219         return (dpo_is_drop(replicate_get_bucket_i(rep, 0)));
220     }
221     return (0);
222 }
223
224 const dpo_id_t *
225 replicate_get_bucket (index_t repi,
226                       u32 bucket)
227 {
228     replicate_t *rep;
229
230     repi &= ~MPLS_IS_REPLICATE;
231     rep = replicate_get(repi);
232
233     return (replicate_get_bucket_i(rep, bucket));
234 }
235
236
237 static load_balance_path_t *
238 replicate_multipath_next_hop_fixup (load_balance_path_t *nhs,
239                                     dpo_proto_t drop_proto)
240 {
241     if (0 == vec_len(nhs))
242     {
243         load_balance_path_t *nh;
244
245         /*
246          * we need something for the replicate. so use the drop
247          */
248         vec_add2(nhs, nh, 1);
249
250         nh->path_weight = 1;
251         dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
252     }
253
254     return (nhs);
255 }
256
257 /*
258  * Fill in adjacencies in block based on corresponding
259  * next hop adjacencies.
260  */
261 static void
262 replicate_fill_buckets (replicate_t *rep,
263                         load_balance_path_t *nhs,
264                         dpo_id_t *buckets,
265                         u32 n_buckets)
266 {
267     load_balance_path_t * nh;
268     u16 bucket;
269
270     bucket = 0;
271
272     /*
273      * the next-hops have normalised weights. that means their sum is the number
274      * of buckets we need to fill.
275      */
276     vec_foreach (nh, nhs)
277     {
278         ASSERT(bucket < n_buckets);
279         replicate_set_bucket_i(rep, bucket++, buckets, &nh->path_dpo);
280     }
281 }
282
283 static inline void
284 replicate_set_n_buckets (replicate_t *rep,
285                          u32 n_buckets)
286 {
287     rep->rep_n_buckets = n_buckets;
288 }
289
290 void
291 replicate_multipath_update (const dpo_id_t *dpo,
292                             load_balance_path_t * next_hops)
293 {
294     load_balance_path_t * nh, * nhs;
295     dpo_id_t *tmp_dpo;
296     u32 ii, n_buckets;
297     replicate_t *rep;
298     index_t repi;
299
300     ASSERT(DPO_REPLICATE == dpo->dpoi_type);
301     repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
302     rep = replicate_get(repi);
303     nhs = replicate_multipath_next_hop_fixup(next_hops,
304                                              rep->rep_proto);
305     n_buckets = vec_len(nhs);
306
307     if (0 == rep->rep_n_buckets)
308     {
309         /*
310          * first time initialisation. no packets inflight, so we can write
311          * at leisure.
312          */
313         replicate_set_n_buckets(rep, n_buckets);
314
315         if (!REP_HAS_INLINE_BUCKETS(rep))
316             vec_validate_aligned(rep->rep_buckets,
317                                  rep->rep_n_buckets - 1,
318                                  CLIB_CACHE_LINE_BYTES);
319
320         replicate_fill_buckets(rep, nhs,
321                                replicate_get_buckets(rep),
322                                n_buckets);
323     }
324     else
325     {
326         /*
327          * This is a modification of an existing replicate.
328          * We need to ensure that packets in flight see a consistent state, that
329          * is the number of reported buckets the REP has
330          * is not more than it actually has. So if the
331          * number of buckets is increasing, we must update the bucket array first,
332          * then the reported number. vice-versa if the number of buckets goes down.
333          */
334         if (n_buckets == rep->rep_n_buckets)
335         {
336             /*
337              * no change in the number of buckets. we can simply fill what
338              * is new over what is old.
339              */
340             replicate_fill_buckets(rep, nhs,
341                                    replicate_get_buckets(rep),
342                                    n_buckets);
343         }
344         else if (n_buckets > rep->rep_n_buckets)
345         {
346             /*
347              * we have more buckets. the old replicate map (if there is one)
348              * will remain valid, i.e. mapping to indices within range, so we
349              * update it last.
350              */
351             if (n_buckets > REP_NUM_INLINE_BUCKETS &&
352                 rep->rep_n_buckets <= REP_NUM_INLINE_BUCKETS)
353             {
354                 /*
355                  * the new increased number of buckets is crossing the threshold
356                  * from the inline storage to out-line. Alloc the outline buckets
357                  * first, then fixup the number. then reset the inlines.
358                  */
359                 ASSERT(NULL == rep->rep_buckets);
360                 vec_validate_aligned(rep->rep_buckets,
361                                      n_buckets - 1,
362                                      CLIB_CACHE_LINE_BYTES);
363
364                 replicate_fill_buckets(rep, nhs,
365                                        rep->rep_buckets,
366                                        n_buckets);
367                 CLIB_MEMORY_BARRIER();
368                 replicate_set_n_buckets(rep, n_buckets);
369
370                 CLIB_MEMORY_BARRIER();
371
372                 for (ii = 0; ii < REP_NUM_INLINE_BUCKETS; ii++)
373                 {
374                     dpo_reset(&rep->rep_buckets_inline[ii]);
375                 }
376             }
377             else
378             {
379                 if (n_buckets <= REP_NUM_INLINE_BUCKETS)
380                 {
381                     /*
382                      * we are not crossing the threshold and it's still inline buckets.
383                      * we can write the new on the old..
384                      */
385                     replicate_fill_buckets(rep, nhs,
386                                            replicate_get_buckets(rep),
387                                            n_buckets);
388                     CLIB_MEMORY_BARRIER();
389                     replicate_set_n_buckets(rep, n_buckets);
390                 }
391                 else
392                 {
393                     /*
394                      * we are not crossing the threshold. We need a new bucket array to
395                      * hold the increased number of choices.
396                      */
397                     dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
398
399                     new_buckets = NULL;
400                     old_buckets = replicate_get_buckets(rep);
401
402                     vec_validate_aligned(new_buckets,
403                                          n_buckets - 1,
404                                          CLIB_CACHE_LINE_BYTES);
405
406                     replicate_fill_buckets(rep, nhs, new_buckets, n_buckets);
407                     CLIB_MEMORY_BARRIER();
408                     rep->rep_buckets = new_buckets;
409                     CLIB_MEMORY_BARRIER();
410                     replicate_set_n_buckets(rep, n_buckets);
411
412                     vec_foreach(tmp_dpo, old_buckets)
413                     {
414                         dpo_reset(tmp_dpo);
415                     }
416                     vec_free(old_buckets);
417                 }
418             }
419         }
420         else
421         {
422             /*
423              * bucket size shrinkage.
424              */
425             if (n_buckets <= REP_NUM_INLINE_BUCKETS &&
426                 rep->rep_n_buckets > REP_NUM_INLINE_BUCKETS)
427             {
428                 /*
429                  * the new decreased number of buckets is crossing the threshold
430                  * from out-line storage to inline:
431                  *   1 - Fill the inline buckets,
432                  *   2 - fixup the number (and this point the inline buckets are
433                  *       used).
434                  *   3 - free the outline buckets
435                  */
436                 replicate_fill_buckets(rep, nhs,
437                                        rep->rep_buckets_inline,
438                                        n_buckets);
439                 CLIB_MEMORY_BARRIER();
440                 replicate_set_n_buckets(rep, n_buckets);
441                 CLIB_MEMORY_BARRIER();
442
443                 vec_foreach(tmp_dpo, rep->rep_buckets)
444                 {
445                     dpo_reset(tmp_dpo);
446                 }
447                 vec_free(rep->rep_buckets);
448             }
449             else
450             {
451                 /*
452                  * not crossing the threshold.
453                  *  1 - update the number to the smaller size
454                  *  2 - write the new buckets
455                  *  3 - reset those no longer used.
456                  */
457                 dpo_id_t *buckets;
458                 u32 old_n_buckets;
459
460                 old_n_buckets = rep->rep_n_buckets;
461                 buckets = replicate_get_buckets(rep);
462
463                 replicate_set_n_buckets(rep, n_buckets);
464                 CLIB_MEMORY_BARRIER();
465
466                 replicate_fill_buckets(rep, nhs,
467                                        buckets,
468                                        n_buckets);
469
470                 for (ii = n_buckets; ii < old_n_buckets; ii++)
471                 {
472                     dpo_reset(&buckets[ii]);
473                 }
474             }
475         }
476     }
477
478     vec_foreach (nh, nhs)
479     {
480         dpo_reset(&nh->path_dpo);
481     }
482     vec_free(nhs);
483 }
484
485 static void
486 replicate_lock (dpo_id_t *dpo)
487 {
488     replicate_t *rep;
489
490     rep = replicate_get(dpo->dpoi_index);
491
492     rep->rep_locks++;
493 }
494
495 static void
496 replicate_destroy (replicate_t *rep)
497 {
498     dpo_id_t *buckets;
499     int i;
500
501     buckets = replicate_get_buckets(rep);
502
503     for (i = 0; i < rep->rep_n_buckets; i++)
504     {
505         dpo_reset(&buckets[i]);
506     }
507
508     REP_DBG(rep, "destroy");
509     if (!REP_HAS_INLINE_BUCKETS(rep))
510     {
511         vec_free(rep->rep_buckets);
512     }
513
514     pool_put(replicate_pool, rep);
515 }
516
517 static void
518 replicate_unlock (dpo_id_t *dpo)
519 {
520     replicate_t *rep;
521
522     rep = replicate_get(dpo->dpoi_index);
523
524     rep->rep_locks--;
525
526     if (0 == rep->rep_locks)
527     {
528         replicate_destroy(rep);
529     }
530 }
531
532 static void
533 replicate_mem_show (void)
534 {
535     fib_show_memory_usage("replicate",
536                           pool_elts(replicate_pool),
537                           pool_len(replicate_pool),
538                           sizeof(replicate_t));
539 }
540
541 const static dpo_vft_t rep_vft = {
542     .dv_lock = replicate_lock,
543     .dv_unlock = replicate_unlock,
544     .dv_format = format_replicate_dpo,
545     .dv_mem_show = replicate_mem_show,
546 };
547
548 /**
549  * @brief The per-protocol VLIB graph nodes that are assigned to a replicate
550  *        object.
551  *
552  * this means that these graph nodes are ones from which a replicate is the
553  * parent object in the DPO-graph.
554  */
555 const static char* const replicate_ip4_nodes[] =
556 {
557     "ip4-replicate",
558     NULL,
559 };
560 const static char* const replicate_ip6_nodes[] =
561 {
562     "ip6-replicate",
563     NULL,
564 };
565 const static char* const replicate_mpls_nodes[] =
566 {
567     "mpls-replicate",
568     NULL,
569 };
570
571 const static char* const * const replicate_nodes[DPO_PROTO_NUM] =
572 {
573     [DPO_PROTO_IP4]  = replicate_ip4_nodes,
574     [DPO_PROTO_IP6]  = replicate_ip6_nodes,
575     [DPO_PROTO_MPLS] = replicate_mpls_nodes,
576 };
577
578 void
579 replicate_module_init (void)
580 {
581     dpo_register(DPO_REPLICATE, &rep_vft, replicate_nodes);
582     replicate_logger = vlib_log_register_class("dpo", "replicate");
583 }
584
585 static clib_error_t *
586 replicate_show (vlib_main_t * vm,
587                 unformat_input_t * input,
588                 vlib_cli_command_t * cmd)
589 {
590     index_t repi = INDEX_INVALID;
591
592     while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
593     {
594         if (unformat (input, "%d", &repi))
595             ;
596         else
597             break;
598     }
599
600     if (INDEX_INVALID != repi)
601     {
602         vlib_cli_output (vm, "%U", format_replicate, repi,
603                          REPLICATE_FORMAT_DETAIL);
604     }
605     else
606     {
607         replicate_t *rep;
608
609         pool_foreach(rep, replicate_pool,
610         ({
611             vlib_cli_output (vm, "%U", format_replicate,
612                              replicate_get_index(rep),
613                              REPLICATE_FORMAT_NONE);
614         }));
615     }
616
617     return 0;
618 }
619
620 VLIB_CLI_COMMAND (replicate_show_command, static) = {
621     .path = "show replicate",
622     .short_help = "show replicate [<index>]",
623     .function = replicate_show,
624 };
625
626 typedef struct replicate_trace_t_
627 {
628     index_t rep_index;
629     dpo_id_t dpo;
630 } replicate_trace_t;
631
632 static uword
633 replicate_inline (vlib_main_t * vm,
634                   vlib_node_runtime_t * node,
635                   vlib_frame_t * frame)
636 {
637     vlib_combined_counter_main_t * cm = &replicate_main.repm_counters;
638     replicate_main_t * rm = &replicate_main;
639     u32 n_left_from, * from, * to_next, next_index;
640     u32 thread_index = vlib_get_thread_index();
641
642     from = vlib_frame_vector_args (frame);
643     n_left_from = frame->n_vectors;
644     next_index = node->cached_next_index;
645   
646     while (n_left_from > 0)
647     {
648         u32 n_left_to_next;
649
650         vlib_get_next_frame (vm, node, next_index,
651                              to_next, n_left_to_next);
652
653         while (n_left_from > 0 && n_left_to_next > 0)
654         {
655             u32 next0, ci0, bi0, bucket, repi0;
656             const replicate_t *rep0;
657             vlib_buffer_t * b0, *c0;
658             const dpo_id_t *dpo0;
659             u8 num_cloned;
660
661             bi0 = from[0];
662             from += 1;
663             n_left_from -= 1;
664
665             b0 = vlib_get_buffer (vm, bi0);
666             repi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
667             rep0 = replicate_get(repi0);
668
669             vlib_increment_combined_counter(
670                 cm, thread_index, repi0, 1,
671                 vlib_buffer_length_in_chain(vm, b0));
672
673             vec_validate (rm->clones[thread_index], rep0->rep_n_buckets - 1);
674
675             num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[thread_index],
676                                             rep0->rep_n_buckets, 128);
677
678             if (num_cloned != rep0->rep_n_buckets)
679               {
680                 vlib_node_increment_counter
681                   (vm, node->node_index,
682                    REPLICATE_DPO_ERROR_BUFFER_ALLOCATION_FAILURE, 1);
683               }
684
685             for (bucket = 0; bucket < num_cloned; bucket++)
686             {
687                 ci0 = rm->clones[thread_index][bucket];
688                 c0 = vlib_get_buffer(vm, ci0);
689
690                 to_next[0] = ci0;
691                 to_next += 1;
692                 n_left_to_next -= 1;
693
694                 dpo0 = replicate_get_bucket_i(rep0, bucket);
695                 next0 = dpo0->dpoi_next_node;
696                 vnet_buffer (c0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
697
698                 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
699                 {
700                     replicate_trace_t *t;
701
702                     if (c0 != b0)
703                     {
704                         vlib_buffer_copy_trace_flag (vm, b0, ci0);
705                         VLIB_BUFFER_TRACE_TRAJECTORY_INIT (c0);
706                     }
707                     t = vlib_add_trace (vm, node, c0, sizeof (*t));
708                     t->rep_index = repi0;
709                     t->dpo = *dpo0;
710                 }
711
712                 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
713                                                  to_next, n_left_to_next,
714                                                  ci0, next0);
715                 if (PREDICT_FALSE (n_left_to_next == 0))
716                   {
717                     vlib_put_next_frame (vm, node, next_index, n_left_to_next);
718                     vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
719                   }
720             }
721             vec_reset_length (rm->clones[thread_index]);
722         }
723
724         vlib_put_next_frame (vm, node, next_index, n_left_to_next);
725     }
726
727     return frame->n_vectors;
728 }
729
730 static u8 *
731 format_replicate_trace (u8 * s, va_list * args)
732 {
733   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
734   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
735   replicate_trace_t *t = va_arg (*args, replicate_trace_t *);
736
737   s = format (s, "replicate: %d via %U",
738               t->rep_index,
739               format_dpo_id, &t->dpo, 0);
740   return s;
741 }
742
743 static uword
744 ip4_replicate (vlib_main_t * vm,
745                vlib_node_runtime_t * node,
746                vlib_frame_t * frame)
747 {
748     return (replicate_inline (vm, node, frame));
749 }
750
751 /**
752  * @brief IP4 replication node
753  */
754 VLIB_REGISTER_NODE (ip4_replicate_node) = {
755   .function = ip4_replicate,
756   .name = "ip4-replicate",
757   .vector_size = sizeof (u32),
758
759   .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
760   .error_strings = replicate_dpo_error_strings,
761
762   .format_trace = format_replicate_trace,
763   .n_next_nodes = 1,
764   .next_nodes = {
765       [0] = "ip4-drop",
766   },
767 };
768
769 static uword
770 ip6_replicate (vlib_main_t * vm,
771                vlib_node_runtime_t * node,
772                vlib_frame_t * frame)
773 {
774     return (replicate_inline (vm, node, frame));
775 }
776
777 /**
778  * @brief IPv6 replication node
779  */
780 VLIB_REGISTER_NODE (ip6_replicate_node) = {
781   .function = ip6_replicate,
782   .name = "ip6-replicate",
783   .vector_size = sizeof (u32),
784
785   .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
786   .error_strings = replicate_dpo_error_strings,
787
788   .format_trace = format_replicate_trace,
789   .n_next_nodes = 1,
790   .next_nodes = {
791       [0] = "ip6-drop",
792   },
793 };
794
795 static uword
796 mpls_replicate (vlib_main_t * vm,
797                 vlib_node_runtime_t * node,
798                 vlib_frame_t * frame)
799 {
800     return (replicate_inline (vm, node, frame));
801 }
802
803 /**
804  * @brief MPLS replication node
805  */
806 VLIB_REGISTER_NODE (mpls_replicate_node) = {
807   .function = mpls_replicate,
808   .name = "mpls-replicate",
809   .vector_size = sizeof (u32),
810
811   .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
812   .error_strings = replicate_dpo_error_strings,
813
814   .format_trace = format_replicate_trace,
815   .n_next_nodes = 1,
816   .next_nodes = {
817       [0] = "mpls-drop",
818   },
819 };
820
821 clib_error_t *
822 replicate_dpo_init (vlib_main_t * vm)
823 {
824   replicate_main_t * rm = &replicate_main;
825
826   vec_validate (rm->clones, vlib_num_workers());
827
828   return 0;
829 }
830
831 VLIB_INIT_FUNCTION (replicate_dpo_init);