gbp: migrate old MULTIARCH macros to VLIB_NODE_FN
[vpp.git] / src / plugins / gbp / gbp_learn.c
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <plugins/gbp/gbp.h>
17 #include <plugins/gbp/gbp_learn.h>
18 #include <plugins/gbp/gbp_bridge_domain.h>
19 #include <vlibmemory/api.h>
20
21 #include <vnet/util/throttle.h>
22 #include <vnet/l2/l2_input.h>
23 #include <vnet/fib/fib_table.h>
24 #include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
25
26 /**
27  * Grouping of global data for the GBP source EPG classification feature
28  */
29 typedef struct gbp_learn_main_t_
30 {
31   /**
32    * Next nodes for L2 output features
33    */
34   u32 gl_l2_input_feat_next[32];
35
36   /**
37    * logger - VLIB log class
38    */
39   vlib_log_class_t gl_logger;
40
41   /**
42    * throttles for the DP leanring
43    */
44   throttle_t gl_l2_throttle;
45   throttle_t gl_l3_throttle;
46 } gbp_learn_main_t;
47
48 /**
49  * The maximum learning rate per-hashed EP
50  */
51 #define GBP_ENDPOINT_HASH_LEARN_RATE (1e-2)
52
53 static gbp_learn_main_t gbp_learn_main;
54
55 #define GBP_LEARN_DBG(...)                                      \
56     vlib_log_debug (gbp_learn_main.gl_logger, __VA_ARGS__);
57
58 #define foreach_gbp_learn                      \
59   _(DROP,    "drop")
60
61 typedef enum
62 {
63 #define _(sym,str) GBP_LEARN_ERROR_##sym,
64   foreach_gbp_learn
65 #undef _
66     GBP_LEARN_N_ERROR,
67 } gbp_learn_error_t;
68
69 static char *gbp_learn_error_strings[] = {
70 #define _(sym,string) string,
71   foreach_gbp_learn
72 #undef _
73 };
74
75 typedef enum
76 {
77 #define _(sym,str) GBP_LEARN_NEXT_##sym,
78   foreach_gbp_learn
79 #undef _
80     GBP_LEARN_N_NEXT,
81 } gbp_learn_next_t;
82
83 typedef struct gbp_learn_l2_t_
84 {
85   ip46_address_t ip;
86   mac_address_t mac;
87   u32 sw_if_index;
88   u32 bd_index;
89   epg_id_t epg;
90   ip46_address_t outer_src;
91   ip46_address_t outer_dst;
92 } gbp_learn_l2_t;
93
94
95 static void
96 gbp_learn_l2_cp (const gbp_learn_l2_t * gl2)
97 {
98   ip46_address_t *ips = NULL;
99
100   GBP_LEARN_DBG ("L2 EP: %U %U, %d",
101                  format_mac_address_t, &gl2->mac,
102                  format_ip46_address, &gl2->ip, IP46_TYPE_ANY, gl2->epg);
103
104   vec_add1 (ips, gl2->ip);
105
106   ASSERT (!ip46_address_is_zero (&gl2->outer_src));
107   ASSERT (!ip46_address_is_zero (&gl2->outer_dst));
108
109   /*
110    * flip the source and dst, since that's how it was received, this API
111    * takes how it's sent
112    */
113   gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
114                                 gl2->sw_if_index, ips,
115                                 &gl2->mac, INDEX_INVALID,
116                                 INDEX_INVALID, gl2->epg,
117                                 (GBP_ENDPOINT_FLAG_LEARNT |
118                                  GBP_ENDPOINT_FLAG_REMOTE),
119                                 &gl2->outer_dst, &gl2->outer_src, NULL);
120   vec_free (ips);
121 }
122
123 static void
124 gbp_learn_l2_ip4_dp (const u8 * mac, const ip4_address_t * ip,
125                      u32 bd_index, u32 sw_if_index, epg_id_t epg,
126                      const ip4_address_t * outer_src,
127                      const ip4_address_t * outer_dst)
128 {
129   gbp_learn_l2_t gl2 = {
130     .sw_if_index = sw_if_index,
131     .bd_index = bd_index,
132     .epg = epg,
133     .ip.ip4 = *ip,
134     .outer_src.ip4 = *outer_src,
135     .outer_dst.ip4 = *outer_dst,
136   };
137   mac_address_from_bytes (&gl2.mac, mac);
138
139   ASSERT (!ip46_address_is_zero (&gl2.outer_src));
140   ASSERT (!ip46_address_is_zero (&gl2.outer_dst));
141
142   vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
143 }
144
145 static void
146 gbp_learn_l2_ip6_dp (const u8 * mac, const ip6_address_t * ip,
147                      u32 bd_index, u32 sw_if_index, epg_id_t epg,
148                      const ip4_address_t * outer_src,
149                      const ip4_address_t * outer_dst)
150 {
151   gbp_learn_l2_t gl2 = {
152     .sw_if_index = sw_if_index,
153     .bd_index = bd_index,
154     .epg = epg,
155     .ip.ip6 = *ip,
156     .outer_src.ip4 = *outer_src,
157     .outer_dst.ip4 = *outer_dst,
158   };
159   mac_address_from_bytes (&gl2.mac, mac);
160
161   vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
162 }
163
164 static void
165 gbp_learn_l2_dp (const u8 * mac, u32 bd_index, u32 sw_if_index,
166                  epg_id_t epg,
167                  const ip4_address_t * outer_src,
168                  const ip4_address_t * outer_dst)
169 {
170   gbp_learn_l2_t gl2 = {
171     .sw_if_index = sw_if_index,
172     .bd_index = bd_index,
173     .epg = epg,
174     .outer_src.ip4 = *outer_src,
175     .outer_dst.ip4 = *outer_dst,
176   };
177   mac_address_from_bytes (&gl2.mac, mac);
178
179   vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
180 }
181
182 /**
183  * per-packet trace data
184  */
185 typedef struct gbp_learn_l2_trace_t_
186 {
187   /* per-pkt trace data */
188   mac_address_t mac;
189   u32 sw_if_index;
190   u32 new;
191   u32 throttled;
192   u32 epg;
193   u32 d_bit;
194 } gbp_learn_l2_trace_t;
195
196 always_inline void
197 gbp_learn_get_outer (const ethernet_header_t * eh0,
198                      ip4_address_t * outer_src, ip4_address_t * outer_dst)
199 {
200   ip4_header_t *ip0;
201   u8 *buff;
202
203   /* rewind back to the ivxlan header */
204   buff = (u8 *) eh0;
205   buff -= (sizeof (vxlan_gbp_header_t) +
206            sizeof (udp_header_t) + sizeof (ip4_header_t));
207
208   ip0 = (ip4_header_t *) buff;
209
210   *outer_src = ip0->src_address;
211   *outer_dst = ip0->dst_address;
212 }
213
214 VLIB_NODE_FN (gbp_learn_l2_node) (vlib_main_t * vm,
215                                   vlib_node_runtime_t * node,
216                                   vlib_frame_t * frame)
217 {
218   u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
219   gbp_learn_main_t *glm;
220   f64 time_now;
221
222   glm = &gbp_learn_main;
223   next_index = 0;
224   n_left_from = frame->n_vectors;
225   from = vlib_frame_vector_args (frame);
226   time_now = vlib_time_now (vm);
227   thread_index = vm->thread_index;
228
229   seed = throttle_seed (&glm->gl_l2_throttle, thread_index, time_now);
230
231   while (n_left_from > 0)
232     {
233       u32 n_left_to_next;
234
235       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
236
237       while (n_left_from > 0 && n_left_to_next > 0)
238         {
239           ip4_address_t outer_src, outer_dst;
240           u32 bi0, sw_if_index0, t0, epg0;
241           const ethernet_header_t *eh0;
242           gbp_bridge_domain_t *gb0;
243           gbp_learn_next_t next0;
244           gbp_endpoint_t *ge0;
245           vlib_buffer_t *b0;
246
247           next0 = GBP_LEARN_NEXT_DROP;
248           bi0 = from[0];
249           to_next[0] = bi0;
250           from += 1;
251           to_next += 1;
252           n_left_from -= 1;
253           n_left_to_next -= 1;
254
255           b0 = vlib_get_buffer (vm, bi0);
256           sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
257
258           eh0 = vlib_buffer_get_current (b0);
259           epg0 = vnet_buffer2 (b0)->gbp.src_epg;
260
261           next0 = vnet_l2_feature_next (b0, glm->gl_l2_input_feat_next,
262                                         L2INPUT_FEAT_GBP_LEARN);
263
264           ge0 = gbp_endpoint_find_mac (eh0->src_address,
265                                        vnet_buffer (b0)->l2.bd_index);
266           gb0 =
267             gbp_bridge_domain_get_by_bd_index (vnet_buffer (b0)->l2.bd_index);
268
269           if ((vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D) ||
270               (gb0->gb_flags & GBP_BD_FLAG_DO_NOT_LEARN))
271             {
272               t0 = 1;
273               goto trace;
274             }
275
276           /*
277            * check for new EP or a moved EP
278            */
279           if (NULL == ge0 || ge0->ge_fwd.gef_itf != sw_if_index0)
280
281             {
282               /*
283                * use the last 4 bytes of the mac address as the hash for the EP
284                */
285               t0 = throttle_check (&glm->gl_l2_throttle, thread_index,
286                                    *((u32 *) (eh0->src_address + 2)), seed);
287               if (!t0)
288                 {
289                   gbp_learn_get_outer (eh0, &outer_src, &outer_dst);
290
291                   switch (clib_net_to_host_u16 (eh0->type))
292                     {
293                     case ETHERNET_TYPE_IP4:
294                       {
295                         const ip4_header_t *ip0;
296
297                         ip0 = (ip4_header_t *) (eh0 + 1);
298
299                         gbp_learn_l2_ip4_dp (eh0->src_address,
300                                              &ip0->src_address,
301                                              vnet_buffer (b0)->l2.bd_index,
302                                              sw_if_index0, epg0,
303                                              &outer_src, &outer_dst);
304
305                         break;
306                       }
307                     case ETHERNET_TYPE_IP6:
308                       {
309                         const ip6_header_t *ip0;
310
311                         ip0 = (ip6_header_t *) (eh0 + 1);
312
313                         gbp_learn_l2_ip6_dp (eh0->src_address,
314                                              &ip0->src_address,
315                                              vnet_buffer (b0)->l2.bd_index,
316                                              sw_if_index0, epg0,
317                                              &outer_src, &outer_dst);
318
319                         break;
320                       }
321                     default:
322                       gbp_learn_l2_dp (eh0->src_address,
323                                        vnet_buffer (b0)->l2.bd_index,
324                                        sw_if_index0, epg0,
325                                        &outer_src, &outer_dst);
326                       break;
327                     }
328                 }
329             }
330           else
331             {
332               /*
333                * this update could happen simultaneoulsy from multiple workers
334                * but that's ok we are not interested in being very accurate.
335                */
336               t0 = 0;
337               ge0->ge_last_time = time_now;
338             }
339         trace:
340           if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
341             {
342               gbp_learn_l2_trace_t *t =
343                 vlib_add_trace (vm, node, b0, sizeof (*t));
344               clib_memcpy_fast (t->mac.bytes, eh0->src_address, 6);
345               t->new = (NULL == ge0);
346               t->throttled = t0;
347               t->sw_if_index = sw_if_index0;
348               t->epg = epg0;
349               t->d_bit = ! !(vnet_buffer2 (b0)->gbp.flags &
350                              VXLAN_GBP_GPFLAGS_D);
351             }
352
353           /* verify speculative enqueue, maybe switch current next frame */
354           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
355                                            to_next, n_left_to_next,
356                                            bi0, next0);
357         }
358
359       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
360     }
361
362   return frame->n_vectors;
363 }
364
365 /* packet trace format function */
366 static u8 *
367 format_gbp_learn_l2_trace (u8 * s, va_list * args)
368 {
369   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
370   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
371   gbp_learn_l2_trace_t *t = va_arg (*args, gbp_learn_l2_trace_t *);
372
373   s = format (s, "new:%d throttled:%d d-bit:%d mac:%U itf:%d epg:%d",
374               t->new, t->throttled, t->d_bit,
375               format_mac_address_t, &t->mac, t->sw_if_index, t->epg);
376
377   return s;
378 }
379
380 /* *INDENT-OFF* */
381 VLIB_REGISTER_NODE (gbp_learn_l2_node) = {
382   .name = "gbp-learn-l2",
383   .vector_size = sizeof (u32),
384   .format_trace = format_gbp_learn_l2_trace,
385   .type = VLIB_NODE_TYPE_INTERNAL,
386
387   .n_errors = ARRAY_LEN(gbp_learn_error_strings),
388   .error_strings = gbp_learn_error_strings,
389
390   .n_next_nodes = GBP_LEARN_N_NEXT,
391
392   .next_nodes = {
393     [GBP_LEARN_NEXT_DROP] = "error-drop",
394   },
395 };
396 /* *INDENT-ON* */
397
398 typedef struct gbp_learn_l3_t_
399 {
400   ip46_address_t ip;
401   u32 fib_index;
402   u32 sw_if_index;
403   epg_id_t epg;
404   ip46_address_t outer_src;
405   ip46_address_t outer_dst;
406 } gbp_learn_l3_t;
407
408 static void
409 gbp_learn_l3_cp (const gbp_learn_l3_t * gl3)
410 {
411   ip46_address_t *ips = NULL;
412
413   GBP_LEARN_DBG ("L3 EP: %U, %d", format_ip46_address, &gl3->ip,
414                  IP46_TYPE_ANY, gl3->epg);
415
416   vec_add1 (ips, gl3->ip);
417
418   gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
419                                 gl3->sw_if_index, ips, NULL,
420                                 INDEX_INVALID, INDEX_INVALID, gl3->epg,
421                                 (GBP_ENDPOINT_FLAG_REMOTE |
422                                  GBP_ENDPOINT_FLAG_LEARNT),
423                                 &gl3->outer_dst, &gl3->outer_src, NULL);
424   vec_free (ips);
425 }
426
427 static void
428 gbp_learn_ip4_dp (const ip4_address_t * ip,
429                   u32 fib_index, u32 sw_if_index, epg_id_t epg,
430                   const ip4_address_t * outer_src,
431                   const ip4_address_t * outer_dst)
432 {
433   /* *INDENT-OFF* */
434   gbp_learn_l3_t gl3 = {
435     .ip = {
436       .ip4 = *ip,
437     },
438     .sw_if_index = sw_if_index,
439     .fib_index = fib_index,
440     .epg = epg,
441     .outer_src.ip4 = *outer_src,
442     .outer_dst.ip4 = *outer_dst,
443   };
444   /* *INDENT-ON* */
445
446   vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
447 }
448
449 static void
450 gbp_learn_ip6_dp (const ip6_address_t * ip,
451                   u32 fib_index, u32 sw_if_index, epg_id_t epg,
452                   const ip4_address_t * outer_src,
453                   const ip4_address_t * outer_dst)
454 {
455   /* *INDENT-OFF* */
456   gbp_learn_l3_t gl3 = {
457     .ip = {
458       .ip6 = *ip,
459     },
460     .sw_if_index = sw_if_index,
461     .fib_index = fib_index,
462     .epg = epg,
463     .outer_src.ip4 = *outer_src,
464     .outer_dst.ip4 = *outer_dst,
465   };
466   /* *INDENT-ON* */
467
468   vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
469 }
470
471 /**
472  * per-packet trace data
473  */
474 typedef struct gbp_learn_l3_trace_t_
475 {
476   /* per-pkt trace data */
477   ip46_address_t ip;
478   u32 sw_if_index;
479   u32 new;
480   u32 throttled;
481   u32 epg;
482 } gbp_learn_l3_trace_t;
483
484 static uword
485 gbp_learn_l3 (vlib_main_t * vm,
486               vlib_node_runtime_t * node, vlib_frame_t * frame,
487               fib_protocol_t fproto)
488 {
489   u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
490   gbp_learn_main_t *glm;
491   f64 time_now;
492
493   glm = &gbp_learn_main;
494   next_index = 0;
495   n_left_from = frame->n_vectors;
496   from = vlib_frame_vector_args (frame);
497   time_now = vlib_time_now (vm);
498   thread_index = vm->thread_index;
499
500   seed = throttle_seed (&glm->gl_l3_throttle, thread_index, time_now);
501
502   while (n_left_from > 0)
503     {
504       u32 n_left_to_next;
505
506       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
507
508       while (n_left_from > 0 && n_left_to_next > 0)
509         {
510           u32 bi0, sw_if_index0, t0, epg0, fib_index0;
511           CLIB_UNUSED (const ip4_header_t *) ip4_0;
512           CLIB_UNUSED (const ip6_header_t *) ip6_0;
513           ip4_address_t outer_src, outer_dst;
514           ethernet_header_t *eth0;
515           gbp_learn_next_t next0;
516           gbp_endpoint_t *ge0;
517           vlib_buffer_t *b0;
518
519           next0 = GBP_LEARN_NEXT_DROP;
520           bi0 = from[0];
521           to_next[0] = bi0;
522           from += 1;
523           to_next += 1;
524           n_left_from -= 1;
525           n_left_to_next -= 1;
526
527           b0 = vlib_get_buffer (vm, bi0);
528           sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
529           epg0 = vnet_buffer2 (b0)->gbp.src_epg;
530           ip6_0 = NULL;
531           ip4_0 = NULL;
532
533           vnet_feature_next (&next0, b0);
534
535           if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D)
536             {
537               t0 = 1;
538               ge0 = NULL;
539               goto trace;
540             }
541
542           fib_index0 = fib_table_get_index_for_sw_if_index (fproto,
543                                                             sw_if_index0);
544
545           if (FIB_PROTOCOL_IP6 == fproto)
546             {
547               ip6_0 = vlib_buffer_get_current (b0);
548               eth0 = (ethernet_header_t *) (((u8 *) ip6_0) - sizeof (*eth0));
549
550               gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
551
552               ge0 = gbp_endpoint_find_ip6 (&ip6_0->src_address, fib_index0);
553
554               if (NULL == ge0)
555                 {
556                   t0 = throttle_check (&glm->gl_l3_throttle,
557                                        thread_index,
558                                        ip6_address_hash_to_u32
559                                        (&ip6_0->src_address), seed);
560
561                   if (!t0)
562                     {
563                       gbp_learn_ip6_dp (&ip6_0->src_address,
564                                         fib_index0, sw_if_index0, epg0,
565                                         &outer_src, &outer_dst);
566                     }
567                 }
568               else
569                 {
570                   /*
571                    * this update could happen simultaneoulsy from multiple
572                    * workers but that's ok we are not interested in being
573                    * very accurate.
574                    */
575                   t0 = 0;
576                   ge0->ge_last_time = time_now;
577                 }
578             }
579           else
580             {
581               ip4_0 = vlib_buffer_get_current (b0);
582               eth0 = (ethernet_header_t *) (((u8 *) ip4_0) - sizeof (*eth0));
583
584               gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
585               ge0 = gbp_endpoint_find_ip4 (&ip4_0->src_address, fib_index0);
586
587               if (NULL == ge0)
588                 {
589                   t0 = throttle_check (&glm->gl_l3_throttle, thread_index,
590                                        ip4_0->src_address.as_u32, seed);
591
592                   if (!t0)
593                     {
594                       gbp_learn_ip4_dp (&ip4_0->src_address,
595                                         fib_index0, sw_if_index0, epg0,
596                                         &outer_src, &outer_dst);
597                     }
598                 }
599               else
600                 {
601                   /*
602                    * this update could happen simultaneoulsy from multiple
603                    * workers but that's ok we are not interested in being
604                    * very accurate.
605                    */
606                   t0 = 0;
607                   ge0->ge_last_time = time_now;
608                 }
609             }
610         trace:
611           if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
612             {
613               gbp_learn_l3_trace_t *t;
614
615               t = vlib_add_trace (vm, node, b0, sizeof (*t));
616               if (FIB_PROTOCOL_IP6 == fproto && ip6_0)
617                 ip46_address_set_ip6 (&t->ip, &ip6_0->src_address);
618               if (FIB_PROTOCOL_IP4 == fproto && ip4_0)
619                 ip46_address_set_ip4 (&t->ip, &ip4_0->src_address);
620               t->new = (NULL == ge0);
621               t->throttled = t0;
622               t->sw_if_index = sw_if_index0;
623               t->epg = epg0;
624             }
625
626           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
627                                            to_next, n_left_to_next,
628                                            bi0, next0);
629         }
630
631       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
632     }
633
634   return frame->n_vectors;
635 }
636
637 /* packet trace format function */
638 static u8 *
639 format_gbp_learn_l3_trace (u8 * s, va_list * args)
640 {
641   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
642   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
643   gbp_learn_l3_trace_t *t = va_arg (*args, gbp_learn_l3_trace_t *);
644
645   s = format (s, "new:%d throttled:%d ip:%U itf:%d epg:%d",
646               t->new, t->throttled,
647               format_ip46_address, &t->ip, IP46_TYPE_ANY, t->sw_if_index,
648               t->epg);
649
650   return s;
651 }
652
653 VLIB_NODE_FN (gbp_learn_ip4_node) (vlib_main_t * vm,
654                                    vlib_node_runtime_t * node,
655                                    vlib_frame_t * frame)
656 {
657   return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP4));
658 }
659
660 VLIB_NODE_FN (gbp_learn_ip6_node) (vlib_main_t * vm,
661                                    vlib_node_runtime_t * node,
662                                    vlib_frame_t * frame)
663 {
664   return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP6));
665 }
666
667 /* *INDENT-OFF* */
668 VLIB_REGISTER_NODE (gbp_learn_ip4_node) = {
669   .name = "gbp-learn-ip4",
670   .vector_size = sizeof (u32),
671   .format_trace = format_gbp_learn_l3_trace,
672   .type = VLIB_NODE_TYPE_INTERNAL,
673 };
674
675 VNET_FEATURE_INIT (gbp_learn_ip4, static) =
676 {
677   .arc_name = "ip4-unicast",
678   .node_name = "gbp-learn-ip4",
679 };
680
681 VLIB_REGISTER_NODE (gbp_learn_ip6_node) = {
682   .name = "gbp-learn-ip6",
683   .vector_size = sizeof (u32),
684   .format_trace = format_gbp_learn_l3_trace,
685   .type = VLIB_NODE_TYPE_INTERNAL,
686 };
687
688 VNET_FEATURE_INIT (gbp_learn_ip6, static) =
689 {
690   .arc_name = "ip6-unicast",
691   .node_name = "gbp-learn-ip6",
692 };
693
694 /* *INDENT-ON* */
695
696 static clib_error_t *
697 gbp_learn_init (vlib_main_t * vm)
698 {
699   gbp_learn_main_t *glm = &gbp_learn_main;
700   vlib_thread_main_t *tm = &vlib_thread_main;
701
702   vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "gbp-learn-l2");
703
704   /* Initialize the feature next-node indices */
705   feat_bitmap_init_next_nodes (vm,
706                                node->index,
707                                L2INPUT_N_FEAT,
708                                l2input_get_feat_names (),
709                                glm->gl_l2_input_feat_next);
710
711   throttle_init (&glm->gl_l2_throttle,
712                  tm->n_vlib_mains, GBP_ENDPOINT_HASH_LEARN_RATE);
713
714   throttle_init (&glm->gl_l3_throttle,
715                  tm->n_vlib_mains, GBP_ENDPOINT_HASH_LEARN_RATE);
716
717   glm->gl_logger = vlib_log_register_class ("gbp", "learn");
718
719   return 0;
720 }
721
722 VLIB_INIT_FUNCTION (gbp_learn_init);
723
724 /*
725  * fd.io coding-style-patch-verification: ON
726  *
727  * Local Variables:
728  * eval: (c-set-style "gnu")
729  * End:
730  */