GBP: redirect contracts
[vpp.git] / src / plugins / gbp / gbp_learn.c
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <plugins/gbp/gbp.h>
17 #include <plugins/gbp/gbp_learn.h>
18 #include <plugins/gbp/gbp_bridge_domain.h>
19 #include <vlibmemory/api.h>
20
21 #include <vnet/util/throttle.h>
22 #include <vnet/l2/l2_input.h>
23 #include <vnet/fib/fib_table.h>
24 #include <vnet/vxlan-gbp/vxlan_gbp_packet.h>
25
26 /**
27  * Grouping of global data for the GBP source EPG classification feature
28  */
29 typedef struct gbp_learn_main_t_
30 {
31   /**
32    * Next nodes for L2 output features
33    */
34   u32 gl_l2_input_feat_next[32];
35
36   /**
37    * logger - VLIB log class
38    */
39   vlib_log_class_t gl_logger;
40
41   /**
42    * throttles for the DP leanring
43    */
44   throttle_t gl_l2_throttle;
45   throttle_t gl_l3_throttle;
46 } gbp_learn_main_t;
47
48 /**
49  * The maximum learning rate per-hashed EP
50  */
51 #define GBP_ENDPOINT_HASH_LEARN_RATE (1e-2)
52
53 static gbp_learn_main_t gbp_learn_main;
54
55 #define GBP_LEARN_DBG(...)                                      \
56     vlib_log_debug (gbp_learn_main.gl_logger, __VA_ARGS__);
57
58 #define foreach_gbp_learn                      \
59   _(DROP,    "drop")
60
61 typedef enum
62 {
63 #define _(sym,str) GBP_LEARN_ERROR_##sym,
64   foreach_gbp_learn
65 #undef _
66     GBP_LEARN_N_ERROR,
67 } gbp_learn_error_t;
68
69 static char *gbp_learn_error_strings[] = {
70 #define _(sym,string) string,
71   foreach_gbp_learn
72 #undef _
73 };
74
75 typedef enum
76 {
77 #define _(sym,str) GBP_LEARN_NEXT_##sym,
78   foreach_gbp_learn
79 #undef _
80     GBP_LEARN_N_NEXT,
81 } gbp_learn_next_t;
82
83 typedef struct gbp_learn_l2_t_
84 {
85   ip46_address_t ip;
86   mac_address_t mac;
87   u32 sw_if_index;
88   u32 bd_index;
89   epg_id_t epg;
90   ip46_address_t outer_src;
91   ip46_address_t outer_dst;
92 } gbp_learn_l2_t;
93
94
95 static void
96 gbp_learn_l2_cp (const gbp_learn_l2_t * gl2)
97 {
98   ip46_address_t *ips = NULL;
99
100   GBP_LEARN_DBG ("L2 EP: %U %U, %d",
101                  format_mac_address_t, &gl2->mac,
102                  format_ip46_address, &gl2->ip, IP46_TYPE_ANY, gl2->epg);
103
104   vec_add1 (ips, gl2->ip);
105
106   ASSERT (!ip46_address_is_zero (&gl2->outer_src));
107   ASSERT (!ip46_address_is_zero (&gl2->outer_dst));
108
109   /*
110    * flip the source and dst, since that's how it was received, this API
111    * takes how it's sent
112    */
113   gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
114                                 gl2->sw_if_index, ips,
115                                 &gl2->mac, INDEX_INVALID,
116                                 INDEX_INVALID, gl2->epg,
117                                 (GBP_ENDPOINT_FLAG_LEARNT |
118                                  GBP_ENDPOINT_FLAG_REMOTE),
119                                 &gl2->outer_dst, &gl2->outer_src, NULL);
120   vec_free (ips);
121 }
122
123 static void
124 gbp_learn_l2_ip4_dp (const u8 * mac, const ip4_address_t * ip,
125                      u32 bd_index, u32 sw_if_index, epg_id_t epg,
126                      const ip4_address_t * outer_src,
127                      const ip4_address_t * outer_dst)
128 {
129   gbp_learn_l2_t gl2 = {
130     .sw_if_index = sw_if_index,
131     .bd_index = bd_index,
132     .epg = epg,
133     .ip.ip4 = *ip,
134     .outer_src.ip4 = *outer_src,
135     .outer_dst.ip4 = *outer_dst,
136   };
137   mac_address_from_bytes (&gl2.mac, mac);
138
139   ASSERT (!ip46_address_is_zero (&gl2.outer_src));
140   ASSERT (!ip46_address_is_zero (&gl2.outer_dst));
141
142   vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
143 }
144
145 static void
146 gbp_learn_l2_ip6_dp (const u8 * mac, const ip6_address_t * ip,
147                      u32 bd_index, u32 sw_if_index, epg_id_t epg,
148                      const ip4_address_t * outer_src,
149                      const ip4_address_t * outer_dst)
150 {
151   gbp_learn_l2_t gl2 = {
152     .sw_if_index = sw_if_index,
153     .bd_index = bd_index,
154     .epg = epg,
155     .ip.ip6 = *ip,
156     .outer_src.ip4 = *outer_src,
157     .outer_dst.ip4 = *outer_dst,
158   };
159   mac_address_from_bytes (&gl2.mac, mac);
160
161   vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
162 }
163
164 static void
165 gbp_learn_l2_dp (const u8 * mac, u32 bd_index, u32 sw_if_index,
166                  epg_id_t epg,
167                  const ip4_address_t * outer_src,
168                  const ip4_address_t * outer_dst)
169 {
170   gbp_learn_l2_t gl2 = {
171     .sw_if_index = sw_if_index,
172     .bd_index = bd_index,
173     .epg = epg,
174     .outer_src.ip4 = *outer_src,
175     .outer_dst.ip4 = *outer_dst,
176   };
177   mac_address_from_bytes (&gl2.mac, mac);
178
179   vl_api_rpc_call_main_thread (gbp_learn_l2_cp, (u8 *) & gl2, sizeof (gl2));
180 }
181
182 /**
183  * per-packet trace data
184  */
185 typedef struct gbp_learn_l2_trace_t_
186 {
187   /* per-pkt trace data */
188   mac_address_t mac;
189   u32 sw_if_index;
190   u32 new;
191   u32 throttled;
192   u32 epg;
193   u32 d_bit;
194 } gbp_learn_l2_trace_t;
195
196 always_inline void
197 gbp_learn_get_outer (const ethernet_header_t * eh0,
198                      ip4_address_t * outer_src, ip4_address_t * outer_dst)
199 {
200   ip4_header_t *ip0;
201   u8 *buff;
202
203   /* rewind back to the ivxlan header */
204   buff = (u8 *) eh0;
205   buff -= (sizeof (vxlan_gbp_header_t) +
206            sizeof (udp_header_t) + sizeof (ip4_header_t));
207
208   ip0 = (ip4_header_t *) buff;
209
210   *outer_src = ip0->src_address;
211   *outer_dst = ip0->dst_address;
212 }
213
214 static uword
215 gbp_learn_l2 (vlib_main_t * vm,
216               vlib_node_runtime_t * node, vlib_frame_t * frame)
217 {
218   u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
219   gbp_learn_main_t *glm;
220   f64 time_now;
221
222   glm = &gbp_learn_main;
223   next_index = 0;
224   n_left_from = frame->n_vectors;
225   from = vlib_frame_vector_args (frame);
226   time_now = vlib_time_now (vm);
227   thread_index = vm->thread_index;
228
229   seed = throttle_seed (&glm->gl_l2_throttle, thread_index, time_now);
230
231   while (n_left_from > 0)
232     {
233       u32 n_left_to_next;
234
235       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
236
237       while (n_left_from > 0 && n_left_to_next > 0)
238         {
239           ip4_address_t outer_src, outer_dst;
240           u32 bi0, sw_if_index0, t0, epg0;
241           const ethernet_header_t *eh0;
242           gbp_bridge_domain_t *gb0;
243           gbp_learn_next_t next0;
244           gbp_endpoint_t *ge0;
245           vlib_buffer_t *b0;
246
247           next0 = GBP_LEARN_NEXT_DROP;
248           bi0 = from[0];
249           to_next[0] = bi0;
250           from += 1;
251           to_next += 1;
252           n_left_from -= 1;
253           n_left_to_next -= 1;
254
255           b0 = vlib_get_buffer (vm, bi0);
256           sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
257
258           eh0 = vlib_buffer_get_current (b0);
259           epg0 = vnet_buffer2 (b0)->gbp.src_epg;
260
261           next0 = vnet_l2_feature_next (b0, glm->gl_l2_input_feat_next,
262                                         L2INPUT_FEAT_GBP_LEARN);
263
264           ge0 = gbp_endpoint_find_mac (eh0->src_address,
265                                        vnet_buffer (b0)->l2.bd_index);
266           gb0 =
267             gbp_bridge_domain_get_by_bd_index (vnet_buffer (b0)->l2.bd_index);
268
269           if ((vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D) ||
270               (gb0->gb_flags & GBP_BD_FLAG_DO_NOT_LEARN))
271             {
272               t0 = 1;
273               goto trace;
274             }
275
276           /*
277            * check for new EP or a moved EP
278            */
279           if (NULL == ge0 || ge0->ge_fwd.gef_itf != sw_if_index0)
280
281             {
282               /*
283                * use the last 4 bytes of the mac address as the hash for the EP
284                */
285               t0 = throttle_check (&glm->gl_l2_throttle, thread_index,
286                                    *((u32 *) (eh0->src_address + 2)), seed);
287               if (!t0)
288                 {
289                   gbp_learn_get_outer (eh0, &outer_src, &outer_dst);
290
291                   switch (clib_net_to_host_u16 (eh0->type))
292                     {
293                     case ETHERNET_TYPE_IP4:
294                       {
295                         const ip4_header_t *ip0;
296
297                         ip0 = (ip4_header_t *) (eh0 + 1);
298
299                         gbp_learn_l2_ip4_dp (eh0->src_address,
300                                              &ip0->src_address,
301                                              vnet_buffer (b0)->l2.bd_index,
302                                              sw_if_index0, epg0,
303                                              &outer_src, &outer_dst);
304
305                         break;
306                       }
307                     case ETHERNET_TYPE_IP6:
308                       {
309                         const ip6_header_t *ip0;
310
311                         ip0 = (ip6_header_t *) (eh0 + 1);
312
313                         gbp_learn_l2_ip6_dp (eh0->src_address,
314                                              &ip0->src_address,
315                                              vnet_buffer (b0)->l2.bd_index,
316                                              sw_if_index0, epg0,
317                                              &outer_src, &outer_dst);
318
319                         break;
320                       }
321                     default:
322                       gbp_learn_l2_dp (eh0->src_address,
323                                        vnet_buffer (b0)->l2.bd_index,
324                                        sw_if_index0, epg0,
325                                        &outer_src, &outer_dst);
326                       break;
327                     }
328                 }
329             }
330           else
331             {
332               /*
333                * this update could happen simultaneoulsy from multiple workers
334                * but that's ok we are not interested in being very accurate.
335                */
336               t0 = 0;
337               ge0->ge_last_time = time_now;
338             }
339         trace:
340           if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
341             {
342               gbp_learn_l2_trace_t *t =
343                 vlib_add_trace (vm, node, b0, sizeof (*t));
344               clib_memcpy_fast (t->mac.bytes, eh0->src_address, 6);
345               t->new = (NULL == ge0);
346               t->throttled = t0;
347               t->sw_if_index = sw_if_index0;
348               t->epg = epg0;
349               t->d_bit = ! !(vnet_buffer2 (b0)->gbp.flags &
350                              VXLAN_GBP_GPFLAGS_D);
351             }
352
353           /* verify speculative enqueue, maybe switch current next frame */
354           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
355                                            to_next, n_left_to_next,
356                                            bi0, next0);
357         }
358
359       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
360     }
361
362   return frame->n_vectors;
363 }
364
365 /* packet trace format function */
366 static u8 *
367 format_gbp_learn_l2_trace (u8 * s, va_list * args)
368 {
369   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
370   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
371   gbp_learn_l2_trace_t *t = va_arg (*args, gbp_learn_l2_trace_t *);
372
373   s = format (s, "new:%d throttled:%d d-bit:%d mac:%U itf:%d epg:%d",
374               t->new, t->throttled, t->d_bit,
375               format_mac_address_t, &t->mac, t->sw_if_index, t->epg);
376
377   return s;
378 }
379
380 /* *INDENT-OFF* */
381 VLIB_REGISTER_NODE (gbp_learn_l2_node) = {
382   .function = gbp_learn_l2,
383   .name = "gbp-learn-l2",
384   .vector_size = sizeof (u32),
385   .format_trace = format_gbp_learn_l2_trace,
386   .type = VLIB_NODE_TYPE_INTERNAL,
387
388   .n_errors = ARRAY_LEN(gbp_learn_error_strings),
389   .error_strings = gbp_learn_error_strings,
390
391   .n_next_nodes = GBP_LEARN_N_NEXT,
392
393   .next_nodes = {
394     [GBP_LEARN_NEXT_DROP] = "error-drop",
395   },
396 };
397
398 VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_l2_node, gbp_learn_l2);
399 /* *INDENT-ON* */
400
401 typedef struct gbp_learn_l3_t_
402 {
403   ip46_address_t ip;
404   u32 fib_index;
405   u32 sw_if_index;
406   epg_id_t epg;
407   ip46_address_t outer_src;
408   ip46_address_t outer_dst;
409 } gbp_learn_l3_t;
410
411 static void
412 gbp_learn_l3_cp (const gbp_learn_l3_t * gl3)
413 {
414   ip46_address_t *ips = NULL;
415
416   GBP_LEARN_DBG ("L3 EP: %U, %d", format_ip46_address, &gl3->ip,
417                  IP46_TYPE_ANY, gl3->epg);
418
419   vec_add1 (ips, gl3->ip);
420
421   gbp_endpoint_update_and_lock (GBP_ENDPOINT_SRC_DP,
422                                 gl3->sw_if_index, ips, NULL,
423                                 INDEX_INVALID, INDEX_INVALID, gl3->epg,
424                                 (GBP_ENDPOINT_FLAG_REMOTE |
425                                  GBP_ENDPOINT_FLAG_LEARNT),
426                                 &gl3->outer_dst, &gl3->outer_src, NULL);
427   vec_free (ips);
428 }
429
430 static void
431 gbp_learn_ip4_dp (const ip4_address_t * ip,
432                   u32 fib_index, u32 sw_if_index, epg_id_t epg,
433                   const ip4_address_t * outer_src,
434                   const ip4_address_t * outer_dst)
435 {
436   /* *INDENT-OFF* */
437   gbp_learn_l3_t gl3 = {
438     .ip = {
439       .ip4 = *ip,
440     },
441     .sw_if_index = sw_if_index,
442     .fib_index = fib_index,
443     .epg = epg,
444     .outer_src.ip4 = *outer_src,
445     .outer_dst.ip4 = *outer_dst,
446   };
447   /* *INDENT-ON* */
448
449   vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
450 }
451
452 static void
453 gbp_learn_ip6_dp (const ip6_address_t * ip,
454                   u32 fib_index, u32 sw_if_index, epg_id_t epg,
455                   const ip4_address_t * outer_src,
456                   const ip4_address_t * outer_dst)
457 {
458   /* *INDENT-OFF* */
459   gbp_learn_l3_t gl3 = {
460     .ip = {
461       .ip6 = *ip,
462     },
463     .sw_if_index = sw_if_index,
464     .fib_index = fib_index,
465     .epg = epg,
466     .outer_src.ip4 = *outer_src,
467     .outer_dst.ip4 = *outer_dst,
468   };
469   /* *INDENT-ON* */
470
471   vl_api_rpc_call_main_thread (gbp_learn_l3_cp, (u8 *) & gl3, sizeof (gl3));
472 }
473
474 /**
475  * per-packet trace data
476  */
477 typedef struct gbp_learn_l3_trace_t_
478 {
479   /* per-pkt trace data */
480   ip46_address_t ip;
481   u32 sw_if_index;
482   u32 new;
483   u32 throttled;
484   u32 epg;
485 } gbp_learn_l3_trace_t;
486
487 static uword
488 gbp_learn_l3 (vlib_main_t * vm,
489               vlib_node_runtime_t * node, vlib_frame_t * frame,
490               fib_protocol_t fproto)
491 {
492   u32 n_left_from, *from, *to_next, next_index, thread_index, seed;
493   gbp_learn_main_t *glm;
494   f64 time_now;
495
496   glm = &gbp_learn_main;
497   next_index = 0;
498   n_left_from = frame->n_vectors;
499   from = vlib_frame_vector_args (frame);
500   time_now = vlib_time_now (vm);
501   thread_index = vm->thread_index;
502
503   seed = throttle_seed (&glm->gl_l3_throttle, thread_index, time_now);
504
505   while (n_left_from > 0)
506     {
507       u32 n_left_to_next;
508
509       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
510
511       while (n_left_from > 0 && n_left_to_next > 0)
512         {
513           u32 bi0, sw_if_index0, t0, epg0, fib_index0;
514           CLIB_UNUSED (const ip4_header_t *) ip4_0;
515           CLIB_UNUSED (const ip6_header_t *) ip6_0;
516           ip4_address_t outer_src, outer_dst;
517           ethernet_header_t *eth0;
518           gbp_learn_next_t next0;
519           gbp_endpoint_t *ge0;
520           vlib_buffer_t *b0;
521
522           next0 = GBP_LEARN_NEXT_DROP;
523           bi0 = from[0];
524           to_next[0] = bi0;
525           from += 1;
526           to_next += 1;
527           n_left_from -= 1;
528           n_left_to_next -= 1;
529
530           b0 = vlib_get_buffer (vm, bi0);
531           sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
532           epg0 = vnet_buffer2 (b0)->gbp.src_epg;
533           ip6_0 = NULL;
534           ip4_0 = NULL;
535
536           vnet_feature_next (&next0, b0);
537
538           if (vnet_buffer2 (b0)->gbp.flags & VXLAN_GBP_GPFLAGS_D)
539             {
540               t0 = 1;
541               ge0 = NULL;
542               goto trace;
543             }
544
545           fib_index0 = fib_table_get_index_for_sw_if_index (fproto,
546                                                             sw_if_index0);
547
548           if (FIB_PROTOCOL_IP6 == fproto)
549             {
550               ip6_0 = vlib_buffer_get_current (b0);
551               eth0 = (ethernet_header_t *) (((u8 *) ip6_0) - sizeof (*eth0));
552
553               gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
554
555               ge0 = gbp_endpoint_find_ip6 (&ip6_0->src_address, fib_index0);
556
557               if (NULL == ge0)
558                 {
559                   t0 = throttle_check (&glm->gl_l3_throttle,
560                                        thread_index,
561                                        ip6_address_hash_to_u32
562                                        (&ip6_0->src_address), seed);
563
564                   if (!t0)
565                     {
566                       gbp_learn_ip6_dp (&ip6_0->src_address,
567                                         fib_index0, sw_if_index0, epg0,
568                                         &outer_src, &outer_dst);
569                     }
570                 }
571               else
572                 {
573                   /*
574                    * this update could happen simultaneoulsy from multiple
575                    * workers but that's ok we are not interested in being
576                    * very accurate.
577                    */
578                   t0 = 0;
579                   ge0->ge_last_time = time_now;
580                 }
581             }
582           else
583             {
584               ip4_0 = vlib_buffer_get_current (b0);
585               eth0 = (ethernet_header_t *) (((u8 *) ip4_0) - sizeof (*eth0));
586
587               gbp_learn_get_outer (eth0, &outer_src, &outer_dst);
588               ge0 = gbp_endpoint_find_ip4 (&ip4_0->src_address, fib_index0);
589
590               if (NULL == ge0)
591                 {
592                   t0 = throttle_check (&glm->gl_l3_throttle, thread_index,
593                                        ip4_0->src_address.as_u32, seed);
594
595                   if (!t0)
596                     {
597                       gbp_learn_ip4_dp (&ip4_0->src_address,
598                                         fib_index0, sw_if_index0, epg0,
599                                         &outer_src, &outer_dst);
600                     }
601                 }
602               else
603                 {
604                   /*
605                    * this update could happen simultaneoulsy from multiple
606                    * workers but that's ok we are not interested in being
607                    * very accurate.
608                    */
609                   t0 = 0;
610                   ge0->ge_last_time = time_now;
611                 }
612             }
613         trace:
614           if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED)))
615             {
616               gbp_learn_l3_trace_t *t;
617
618               t = vlib_add_trace (vm, node, b0, sizeof (*t));
619               if (FIB_PROTOCOL_IP6 == fproto && ip6_0)
620                 ip46_address_set_ip6 (&t->ip, &ip6_0->src_address);
621               if (FIB_PROTOCOL_IP4 == fproto && ip4_0)
622                 ip46_address_set_ip4 (&t->ip, &ip4_0->src_address);
623               t->new = (NULL == ge0);
624               t->throttled = t0;
625               t->sw_if_index = sw_if_index0;
626               t->epg = epg0;
627             }
628
629           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
630                                            to_next, n_left_to_next,
631                                            bi0, next0);
632         }
633
634       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
635     }
636
637   return frame->n_vectors;
638 }
639
640 /* packet trace format function */
641 static u8 *
642 format_gbp_learn_l3_trace (u8 * s, va_list * args)
643 {
644   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
645   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
646   gbp_learn_l3_trace_t *t = va_arg (*args, gbp_learn_l3_trace_t *);
647
648   s = format (s, "new:%d throttled:%d ip:%U itf:%d epg:%d",
649               t->new, t->throttled,
650               format_ip46_address, &t->ip, IP46_TYPE_ANY, t->sw_if_index,
651               t->epg);
652
653   return s;
654 }
655
656 static uword
657 gbp_learn_ip4 (vlib_main_t * vm,
658                vlib_node_runtime_t * node, vlib_frame_t * frame)
659 {
660   return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP4));
661 }
662
663 static uword
664 gbp_learn_ip6 (vlib_main_t * vm,
665                vlib_node_runtime_t * node, vlib_frame_t * frame)
666 {
667   return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP6));
668 }
669
670 /* *INDENT-OFF* */
671 VLIB_REGISTER_NODE (gbp_learn_ip4_node) = {
672   .function = gbp_learn_ip4,
673   .name = "gbp-learn-ip4",
674   .vector_size = sizeof (u32),
675   .format_trace = format_gbp_learn_l3_trace,
676   .type = VLIB_NODE_TYPE_INTERNAL,
677 };
678
679 VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_ip4_node, gbp_learn_ip4);
680
681 VNET_FEATURE_INIT (gbp_learn_ip4, static) =
682 {
683   .arc_name = "ip4-unicast",
684   .node_name = "gbp-learn-ip4",
685 };
686
687 VLIB_REGISTER_NODE (gbp_learn_ip6_node) = {
688   .function = gbp_learn_ip6,
689   .name = "gbp-learn-ip6",
690   .vector_size = sizeof (u32),
691   .format_trace = format_gbp_learn_l3_trace,
692   .type = VLIB_NODE_TYPE_INTERNAL,
693 };
694
695 VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_ip6_node, gbp_learn_ip6);
696
697 VNET_FEATURE_INIT (gbp_learn_ip6, static) =
698 {
699   .arc_name = "ip6-unicast",
700   .node_name = "gbp-learn-ip6",
701 };
702
703 /* *INDENT-ON* */
704
705 void
706 gbp_learn_enable (u32 sw_if_index, gbb_learn_mode_t mode)
707 {
708   if (GBP_LEARN_MODE_L2 == mode)
709     l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 1);
710   else
711     {
712       vnet_feature_enable_disable ("ip4-unicast",
713                                    "gbp-learn-ip4", sw_if_index, 1, 0, 0);
714       vnet_feature_enable_disable ("ip6-unicast",
715                                    "gbp-learn-ip6", sw_if_index, 1, 0, 0);
716     }
717 }
718
719 void
720 gbp_learn_disable (u32 sw_if_index, gbb_learn_mode_t mode)
721 {
722   if (GBP_LEARN_MODE_L2 == mode)
723     l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 0);
724   else
725     {
726       vnet_feature_enable_disable ("ip4-unicast",
727                                    "gbp-learn-ip4", sw_if_index, 0, 0, 0);
728       vnet_feature_enable_disable ("ip6-unicast",
729                                    "gbp-learn-ip6", sw_if_index, 0, 0, 0);
730     }
731 }
732
733 static clib_error_t *
734 gbp_learn_init (vlib_main_t * vm)
735 {
736   gbp_learn_main_t *glm = &gbp_learn_main;
737   vlib_thread_main_t *tm = &vlib_thread_main;
738
739   /* Initialize the feature next-node indices */
740   feat_bitmap_init_next_nodes (vm,
741                                gbp_learn_l2_node.index,
742                                L2INPUT_N_FEAT,
743                                l2input_get_feat_names (),
744                                glm->gl_l2_input_feat_next);
745
746   throttle_init (&glm->gl_l2_throttle,
747                  tm->n_vlib_mains, GBP_ENDPOINT_HASH_LEARN_RATE);
748
749   throttle_init (&glm->gl_l3_throttle,
750                  tm->n_vlib_mains, GBP_ENDPOINT_HASH_LEARN_RATE);
751
752   glm->gl_logger = vlib_log_register_class ("gbp", "learn");
753
754   return 0;
755 }
756
757 VLIB_INIT_FUNCTION (gbp_learn_init);
758
759 /*
760  * fd.io coding-style-patch-verification: ON
761  *
762  * Local Variables:
763  * eval: (c-set-style "gnu")
764  * End:
765  */