2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <plugins/gbp/gbp_vxlan.h>
17 #include <plugins/gbp/gbp_itf.h>
18 #include <plugins/gbp/gbp_learn.h>
19 #include <plugins/gbp/gbp_bridge_domain.h>
20 #include <plugins/gbp/gbp_route_domain.h>
22 #include <vnet/vxlan-gbp/vxlan_gbp.h>
23 #include <vlibmemory/api.h>
24 #include <vnet/fib/fib_table.h>
27 * A reference to a VXLAN-GBP tunnel created as a child/dependent tunnel
28 * of the tempplate GBP-VXLAN tunnel
30 typedef struct vxlan_tunnel_ref_t_
36 gbp_vxlan_tunnel_layer_t vxr_layer;
47 vlib_log_class_t gt_logger;
50 * Pool of template tunnels
52 gbp_vxlan_tunnel_t *gbp_vxlan_tunnel_pool;
55 * Pool of child tunnels
57 vxlan_tunnel_ref_t *vxlan_tunnel_ref_pool;
60 * DB of template interfaces by SW interface index
62 index_t *gbp_vxlan_tunnel_db;
65 * DB of child interfaces by SW interface index
67 index_t *vxlan_tunnel_ref_db;
70 static char *gbp_vxlan_tunnel_layer_strings[] = {
71 #define _(n,s) [GBP_VXLAN_TUN_##n] = s,
72 forecah_gbp_vxlan_tunnel_layer
76 #define GBP_VXLAN_TUN_DBG(...) \
77 vlib_log_debug (gt_logger, __VA_ARGS__);
81 always_inline gbp_vxlan_tunnel_t *
82 gbp_vxlan_tunnel_get (index_t gti)
84 return (pool_elt_at_index (gbp_vxlan_tunnel_pool, gti));
87 static vxlan_tunnel_ref_t *
88 vxlan_tunnel_ref_get (index_t vxri)
90 return (pool_elt_at_index (vxlan_tunnel_ref_pool, vxri));
94 format_vxlan_tunnel_ref (u8 * s, va_list * args)
96 index_t vxri = va_arg (*args, u32);
97 vxlan_tunnel_ref_t *vxr;
99 vxr = vxlan_tunnel_ref_get (vxri);
101 s = format (s, "[%U locks:%d]", format_vnet_sw_if_index_name,
102 vnet_get_main (), vxr->vxr_sw_if_index, vxr->vxr_locks);
108 gdb_vxlan_dep_add (gbp_vxlan_tunnel_t * gt,
110 const ip46_address_t * src, const ip46_address_t * dst)
112 vnet_vxlan_gbp_tunnel_add_del_args_t args = {
114 .is_ip6 = !ip46_address_is_ip4 (src),
119 .mode = (GBP_VXLAN_TUN_L2 == gt->gt_layer ?
120 VXLAN_GBP_TUNNEL_MODE_L2 : VXLAN_GBP_TUNNEL_MODE_L3),
122 vxlan_tunnel_ref_t *vxr;
128 rv = vnet_vxlan_gbp_tunnel_add_del (&args, &sw_if_index);
130 if (VNET_API_ERROR_TUNNEL_EXIST == rv)
132 vxri = vxlan_tunnel_ref_db[sw_if_index];
134 vxr = vxlan_tunnel_ref_get (vxri);
139 ASSERT (~0 != sw_if_index);
140 GBP_VXLAN_TUN_DBG ("add-dep:%U %U %U %d", format_vnet_sw_if_index_name,
141 vnet_get_main (), sw_if_index,
142 format_ip46_address, src, IP46_TYPE_ANY,
143 format_ip46_address, dst, IP46_TYPE_ANY, vni);
145 pool_get_zero (vxlan_tunnel_ref_pool, vxr);
147 vxri = (vxr - vxlan_tunnel_ref_pool);
148 vxr->vxr_parent = gt - gbp_vxlan_tunnel_pool;
149 vxr->vxr_sw_if_index = sw_if_index;
151 vxr->vxr_layer = gt->gt_layer;
154 * store the child both on the parent's list and the global DB
156 vec_add1 (gt->gt_tuns, vxri);
158 vec_validate_init_empty (vxlan_tunnel_ref_db,
159 vxr->vxr_sw_if_index, INDEX_INVALID);
160 vxlan_tunnel_ref_db[vxr->vxr_sw_if_index] = vxri;
162 if (GBP_VXLAN_TUN_L2 == vxr->vxr_layer)
164 vxr->vxr_itf = gbp_itf_add_and_lock (vxr->vxr_sw_if_index,
167 gbp_itf_set_l2_output_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
168 L2OUTPUT_FEAT_GBP_POLICY_MAC);
169 gbp_itf_set_l2_input_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
170 L2INPUT_FEAT_GBP_LEARN);
174 const gbp_route_domain_t *grd;
175 fib_protocol_t fproto;
177 grd = gbp_route_domain_get (gt->gt_grd);
179 FOR_EACH_FIB_IP_PROTOCOL (fproto)
180 ip_table_bind (fproto, vxr->vxr_sw_if_index,
181 grd->grd_table_id[fproto], 1);
183 gbp_learn_enable (vxr->vxr_sw_if_index, GBP_LEARN_MODE_L3);
187 return (sw_if_index);
191 vxlan_gbp_tunnel_get_parent (u32 sw_if_index)
193 ASSERT ((sw_if_index < vec_len (vxlan_tunnel_ref_db)) &&
194 (INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index]));
196 gbp_vxlan_tunnel_t *gt;
197 vxlan_tunnel_ref_t *vxr;
199 vxr = vxlan_tunnel_ref_get (vxlan_tunnel_ref_db[sw_if_index]);
200 gt = gbp_vxlan_tunnel_get (vxr->vxr_parent);
202 return (gt->gt_sw_if_index);
205 gbp_vxlan_tunnel_type_t
206 gbp_vxlan_tunnel_get_type (u32 sw_if_index)
208 if (sw_if_index < vec_len (vxlan_tunnel_ref_db) &&
209 INDEX_INVALID != vxlan_tunnel_ref_db[sw_if_index])
211 return (VXLAN_GBP_TUNNEL);
213 else if (sw_if_index < vec_len (gbp_vxlan_tunnel_db) &&
214 INDEX_INVALID != gbp_vxlan_tunnel_db[sw_if_index])
216 return (GBP_VXLAN_TEMPLATE_TUNNEL);
220 return (GBP_VXLAN_TEMPLATE_TUNNEL);
224 gbp_vxlan_tunnel_clone_and_lock (u32 sw_if_index,
225 const ip46_address_t * src,
226 const ip46_address_t * dst)
228 gbp_vxlan_tunnel_t *gt;
231 gti = gbp_vxlan_tunnel_db[sw_if_index];
233 if (INDEX_INVALID == gti)
236 gt = pool_elt_at_index (gbp_vxlan_tunnel_pool, gti);
238 return (gdb_vxlan_dep_add (gt, gt->gt_vni, src, dst));
242 gdb_vxlan_dep_del (index_t vxri)
244 vxlan_tunnel_ref_t *vxr;
245 gbp_vxlan_tunnel_t *gt;
248 vxr = vxlan_tunnel_ref_get (vxri);
249 gt = gbp_vxlan_tunnel_get (vxr->vxr_parent);
251 GBP_VXLAN_TUN_DBG ("del-dep:%U", format_vxlan_tunnel_ref, vxri);
253 vxlan_tunnel_ref_db[vxr->vxr_sw_if_index] = INDEX_INVALID;
254 pos = vec_search (gt->gt_tuns, vxri);
257 vec_del1 (gt->gt_tuns, pos);
259 if (GBP_VXLAN_TUN_L2 == vxr->vxr_layer)
261 gbp_itf_set_l2_output_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
263 gbp_itf_set_l2_input_feature (vxr->vxr_itf, vxr->vxr_sw_if_index,
265 gbp_itf_unlock (vxr->vxr_itf);
269 fib_protocol_t fproto;
271 FOR_EACH_FIB_IP_PROTOCOL (fproto)
272 ip_table_bind (fproto, vxr->vxr_sw_if_index, 0, 0);
275 vnet_vxlan_gbp_tunnel_del (vxr->vxr_sw_if_index);
277 pool_put (vxlan_tunnel_ref_pool, vxr);
281 vxlan_gbp_tunnel_unlock (u32 sw_if_index)
283 vxlan_tunnel_ref_t *vxr;
286 vxri = vxlan_tunnel_ref_db[sw_if_index];
288 ASSERT (vxri != INDEX_INVALID);
290 vxr = vxlan_tunnel_ref_get (vxri);
293 if (0 == vxr->vxr_locks)
295 gdb_vxlan_dep_del (vxri);
300 vxlan_gbp_tunnel_lock (u32 sw_if_index)
302 vxlan_tunnel_ref_t *vxr;
305 vxri = vxlan_tunnel_ref_db[sw_if_index];
307 ASSERT (vxri != INDEX_INVALID);
309 vxr = vxlan_tunnel_ref_get (vxri);
313 #define foreach_gbp_vxlan_input_next \
314 _(DROP, "error-drop") \
315 _(L2_INPUT, "l2-input") \
316 _(IP4_INPUT, "ip4-input") \
317 _(IP6_INPUT, "ip6-input")
321 #define _(s,n) GBP_VXLAN_INPUT_NEXT_##s,
322 foreach_gbp_vxlan_input_next
324 GBP_VXLAN_INPUT_N_NEXT,
325 } gbp_vxlan_input_next_t;
327 #define foreach_gbp_vxlan_error \
328 _(DECAPPED, "decapped") \
329 _(LEARNED, "learned")
333 #define _(s,n) GBP_VXLAN_ERROR_##s,
334 foreach_gbp_vxlan_error
337 } gbp_vxlan_input_error_t;
339 static char *gbp_vxlan_error_strings[] = {
341 foreach_gbp_vxlan_error
345 typedef struct gbp_vxlan_trace_t_
356 gbp_vxlan_decap (vlib_main_t * vm,
357 vlib_node_runtime_t * node,
358 vlib_frame_t * from_frame, u8 is_ip4)
360 u32 n_left_to_next, n_left_from, next_index, *to_next, *from;
363 from = vlib_frame_vector_args (from_frame);
364 n_left_from = from_frame->n_vectors;
366 while (n_left_from > 0)
369 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
371 while (n_left_from > 0 && n_left_to_next > 0)
373 vxlan_gbp_header_t *vxlan_gbp0;
374 gbp_vxlan_input_next_t next0;
375 gbp_vxlan_tunnel_t *gt0;
380 bi0 = to_next[0] = from[0];
385 next0 = GBP_VXLAN_INPUT_NEXT_DROP;
387 b0 = vlib_get_buffer (vm, bi0);
389 vlib_buffer_get_current (b0) - sizeof (vxlan_gbp_header_t);
391 vni0 = vxlan_gbp_get_vni (vxlan_gbp0);
392 p = hash_get (gv_db, vni0);
394 if (PREDICT_FALSE (NULL == p))
397 next0 = GBP_VXLAN_INPUT_NEXT_DROP;
401 gt0 = gbp_vxlan_tunnel_get (p[0]);
403 vnet_buffer (b0)->sw_if_index[VLIB_RX] = gt0->gt_sw_if_index;
405 if (GBP_VXLAN_TUN_L2 == gt0->gt_layer)
407 * An L2 layer tunnel goes into the BD
409 next0 = GBP_VXLAN_INPUT_NEXT_L2_INPUT;
413 * An L3 layer tunnel needs to strip the L2 header
414 * an inject into the RD
416 ethernet_header_t *e0;
419 e0 = vlib_buffer_get_current (b0);
420 type0 = clib_net_to_host_u16 (e0->type);
423 case ETHERNET_TYPE_IP4:
424 next0 = GBP_VXLAN_INPUT_NEXT_IP4_INPUT;
426 case ETHERNET_TYPE_IP6:
427 next0 = GBP_VXLAN_INPUT_NEXT_IP6_INPUT;
432 vlib_buffer_advance (b0, sizeof (*e0));
437 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
439 gbp_vxlan_trace_t *tr;
441 tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
442 tr->dropped = (next0 == GBP_VXLAN_INPUT_NEXT_DROP);
444 tr->sw_if_index = (gt0 ? gt0->gt_sw_if_index : ~0);
445 tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0);
446 tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0);
449 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
450 to_next, n_left_to_next,
454 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
457 return from_frame->n_vectors;
461 format_gbp_vxlan_rx_trace (u8 * s, va_list * args)
463 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
464 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
465 gbp_vxlan_trace_t *t = va_arg (*args, gbp_vxlan_trace_t *);
467 s = format (s, "vni:%d dropped:%d rx:%d sclass:%d flags:%U",
468 t->vni, t->dropped, t->sw_if_index,
469 t->sclass, format_vxlan_gbp_header_gpflags, t->flags);
475 gbp_vxlan4_decap (vlib_main_t * vm,
476 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
478 return gbp_vxlan_decap (vm, node, from_frame, 1);
482 VLIB_REGISTER_NODE (gbp_vxlan4_input_node) =
484 .function = gbp_vxlan4_decap,
485 .name = "gbp-vxlan4",
486 .vector_size = sizeof (u32),
487 .n_errors = GBP_VXLAN_N_ERROR,
488 .error_strings = gbp_vxlan_error_strings,
489 .n_next_nodes = GBP_VXLAN_INPUT_N_NEXT,
490 .format_trace = format_gbp_vxlan_rx_trace,
492 #define _(s,n) [GBP_VXLAN_INPUT_NEXT_##s] = n,
493 foreach_gbp_vxlan_input_next
497 VLIB_NODE_FUNCTION_MULTIARCH (gbp_vxlan4_input_node, gbp_vxlan4_decap)
502 gbp_vxlan_walk (gbp_vxlan_cb_t cb, void *ctx)
504 gbp_vxlan_tunnel_t *gt;
507 pool_foreach (gt, gbp_vxlan_tunnel_pool,
509 if (WALK_CONTINUE != cb(gt, ctx))
516 gbp_vxlan_tunnel_show_one (gbp_vxlan_tunnel_t * gt, void *ctx)
518 vlib_cli_output (ctx, "%U", format_gbp_vxlan_tunnel,
519 gt - gbp_vxlan_tunnel_pool);
521 return (WALK_CONTINUE);
525 format_gbp_vxlan_tunnel_name (u8 * s, va_list * args)
527 u32 dev_instance = va_arg (*args, u32);
529 return format (s, "gbp-vxlan-%d", dev_instance);
533 format_gbp_vxlan_tunnel_layer (u8 * s, va_list * args)
535 gbp_vxlan_tunnel_layer_t gl = va_arg (*args, gbp_vxlan_tunnel_layer_t);
536 s = format (s, "%s", gbp_vxlan_tunnel_layer_strings[gl]);
542 format_gbp_vxlan_tunnel (u8 * s, va_list * args)
544 u32 dev_instance = va_arg (*args, u32);
545 CLIB_UNUSED (int verbose) = va_arg (*args, int);
546 gbp_vxlan_tunnel_t *gt = gbp_vxlan_tunnel_get (dev_instance);
549 s = format (s, "GBP VXLAN tunnel: hw:%d sw:%d vni:%d %U",
550 gt->gt_hw_if_index, gt->gt_sw_if_index, gt->gt_vni,
551 format_gbp_vxlan_tunnel_layer, gt->gt_layer);
552 if (GBP_VXLAN_TUN_L2 == gt->gt_layer)
553 s = format (s, " BD:%d bd-index:%d", gt->gt_bd_rd_id, gt->gt_bd_index);
555 s = format (s, " RD:%d fib-index:[%d,%d]",
557 gt->gt_fib_index[FIB_PROTOCOL_IP4],
558 gt->gt_fib_index[FIB_PROTOCOL_IP6]);
560 s = format (s, " children:[");
561 vec_foreach (vxri, gt->gt_tuns)
563 s = format (s, "%U, ", format_vxlan_tunnel_ref, *vxri);
570 typedef struct gbp_vxlan_tx_trace_t_
573 } gbp_vxlan_tx_trace_t;
576 format_gbp_vxlan_tx_trace (u8 * s, va_list * args)
578 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
579 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
580 gbp_vxlan_tx_trace_t *t = va_arg (*args, gbp_vxlan_tx_trace_t *);
582 s = format (s, "GBP-VXLAN: vni:%d", t->vni);
588 gbp_vxlan_interface_admin_up_down (vnet_main_t * vnm,
589 u32 hw_if_index, u32 flags)
591 vnet_hw_interface_t *hi;
594 hi = vnet_get_hw_interface (vnm, hw_if_index);
596 if (NULL == gbp_vxlan_tunnel_db ||
597 hi->sw_if_index >= vec_len (gbp_vxlan_tunnel_db))
600 ti = gbp_vxlan_tunnel_db[hi->sw_if_index];
603 /* not one of ours */
606 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
607 vnet_hw_interface_set_flags (vnm, hw_if_index,
608 VNET_HW_INTERFACE_FLAG_LINK_UP);
610 vnet_hw_interface_set_flags (vnm, hw_if_index, 0);
616 gbp_vxlan_interface_tx (vlib_main_t * vm,
617 vlib_node_runtime_t * node, vlib_frame_t * frame)
619 clib_warning ("you shouldn't be here, leaking buffers...");
620 return frame->n_vectors;
624 VNET_DEVICE_CLASS (gbp_vxlan_device_class) = {
625 .name = "GBP VXLAN tunnel-template",
626 .format_device_name = format_gbp_vxlan_tunnel_name,
627 .format_device = format_gbp_vxlan_tunnel,
628 .format_tx_trace = format_gbp_vxlan_tx_trace,
629 .admin_up_down_function = gbp_vxlan_interface_admin_up_down,
630 .tx_function = gbp_vxlan_interface_tx,
633 VNET_HW_INTERFACE_CLASS (gbp_vxlan_hw_interface_class) = {
635 .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
640 gbp_vxlan_tunnel_add (u32 vni, gbp_vxlan_tunnel_layer_t layer,
641 u32 bd_rd_id, u32 * sw_if_indexp)
643 gbp_vxlan_tunnel_t *gt;
649 p = hash_get (gv_db, vni);
651 GBP_VXLAN_TUN_DBG ("add: %d %d %d", vni, layer, bd_rd_id);
655 vnet_sw_interface_t *si;
656 vnet_hw_interface_t *hi;
660 gbi = grdi = INDEX_INVALID;
662 if (layer == GBP_VXLAN_TUN_L2)
664 gbi = gbp_bridge_domain_find_and_lock (bd_rd_id);
666 if (INDEX_INVALID == gbi)
668 return (VNET_API_ERROR_BD_NOT_MODIFIABLE);
673 grdi = gbp_route_domain_find_and_lock (bd_rd_id);
675 if (INDEX_INVALID == grdi)
677 return (VNET_API_ERROR_NO_SUCH_FIB);
681 vnm = vnet_get_main ();
682 pool_get (gbp_vxlan_tunnel_pool, gt);
683 gti = gt - gbp_vxlan_tunnel_pool;
686 gt->gt_layer = layer;
687 gt->gt_bd_rd_id = bd_rd_id;
688 gt->gt_hw_if_index = vnet_register_interface (vnm,
689 gbp_vxlan_device_class.index,
691 gbp_vxlan_hw_interface_class.index,
694 hi = vnet_get_hw_interface (vnm, gt->gt_hw_if_index);
696 gt->gt_sw_if_index = hi->sw_if_index;
698 /* don't flood packets in a BD to these interfaces */
699 si = vnet_get_sw_interface (vnm, gt->gt_sw_if_index);
700 si->flood_class = VNET_FLOOD_CLASS_NO_FLOOD;
702 if (layer == GBP_VXLAN_TUN_L2)
704 gbp_bridge_domain_t *gb;
706 gb = gbp_bridge_domain_get (gbi);
709 gt->gt_bd_index = gb->gb_bd_id;
710 gb->gb_vni_sw_if_index = gt->gt_sw_if_index;
711 /* set it up as a GBP interface */
712 gt->gt_itf = gbp_itf_add_and_lock (gt->gt_sw_if_index,
714 gbp_learn_enable (gt->gt_sw_if_index, GBP_LEARN_MODE_L2);
718 gbp_route_domain_t *grd;
719 fib_protocol_t fproto;
721 grd = gbp_route_domain_get (grdi);
724 grd->grd_vni_sw_if_index = gt->gt_sw_if_index;
726 gbp_learn_enable (gt->gt_sw_if_index, GBP_LEARN_MODE_L3);
728 ip4_sw_interface_enable_disable (gt->gt_sw_if_index, 1);
729 ip6_sw_interface_enable_disable (gt->gt_sw_if_index, 1);
731 FOR_EACH_FIB_IP_PROTOCOL (fproto)
733 gt->gt_fib_index[fproto] = grd->grd_fib_index[fproto];
735 ip_table_bind (fproto, gt->gt_sw_if_index,
736 grd->grd_table_id[fproto], 1);
741 * save the tunnel by VNI and by sw_if_index
743 hash_set (gv_db, vni, gti);
745 vec_validate (gbp_vxlan_tunnel_db, gt->gt_sw_if_index);
746 gbp_vxlan_tunnel_db[gt->gt_sw_if_index] = gti;
749 *sw_if_indexp = gt->gt_sw_if_index;
751 vxlan_gbp_register_udp_ports ();
756 rv = VNET_API_ERROR_IF_ALREADY_EXISTS;
759 GBP_VXLAN_TUN_DBG ("add: %U", format_gbp_vxlan_tunnel, gti);
765 gbp_vxlan_tunnel_del (u32 vni)
767 gbp_vxlan_tunnel_t *gt;
770 p = hash_get (gv_db, vni);
776 vnm = vnet_get_main ();
777 gt = gbp_vxlan_tunnel_get (p[0]);
779 vxlan_gbp_unregister_udp_ports ();
781 GBP_VXLAN_TUN_DBG ("del: %U", format_gbp_vxlan_tunnel,
782 gt - gbp_vxlan_tunnel_pool);
784 gbp_endpoint_flush (GBP_ENDPOINT_SRC_DP, gt->gt_sw_if_index);
785 ASSERT (0 == vec_len (gt->gt_tuns));
786 vec_free (gt->gt_tuns);
788 if (GBP_VXLAN_TUN_L2 == gt->gt_layer)
790 gbp_learn_disable (gt->gt_sw_if_index, GBP_LEARN_MODE_L2);
791 gbp_itf_unlock (gt->gt_itf);
792 gbp_bridge_domain_unlock (gt->gt_gbd);
796 fib_protocol_t fproto;
798 FOR_EACH_FIB_IP_PROTOCOL (fproto)
799 ip_table_bind (fproto, gt->gt_sw_if_index, 0, 0);
801 ip4_sw_interface_enable_disable (gt->gt_sw_if_index, 0);
802 ip6_sw_interface_enable_disable (gt->gt_sw_if_index, 0);
804 gbp_learn_disable (gt->gt_sw_if_index, GBP_LEARN_MODE_L3);
805 gbp_route_domain_unlock (gt->gt_grd);
808 vnet_sw_interface_set_flags (vnm, gt->gt_sw_if_index, 0);
809 vnet_delete_hw_interface (vnm, gt->gt_hw_if_index);
811 hash_unset (gv_db, vni);
812 gbp_vxlan_tunnel_db[gt->gt_sw_if_index] = INDEX_INVALID;
814 pool_put (gbp_vxlan_tunnel_pool, gt);
817 return VNET_API_ERROR_NO_SUCH_ENTRY;
822 static clib_error_t *
823 gbp_vxlan_show (vlib_main_t * vm,
824 unformat_input_t * input, vlib_cli_command_t * cmd)
826 gbp_vxlan_walk (gbp_vxlan_tunnel_show_one, vm);
832 * Show Group Based Policy VXLAN tunnels
835 * @cliexstart{show gbp vxlan}
839 VLIB_CLI_COMMAND (gbp_vxlan_show_node, static) = {
840 .path = "show gbp vxlan",
841 .short_help = "show gbp vxlan\n",
842 .function = gbp_vxlan_show,
846 static clib_error_t *
847 gbp_vxlan_init (vlib_main_t * vm)
852 * insert ourselves into the VXLAN-GBP arc to collect the no-tunnel
855 slot4 = vlib_node_add_next_with_slot (vm,
856 vxlan4_gbp_input_node.index,
857 gbp_vxlan4_input_node.index,
858 VXLAN_GBP_INPUT_NEXT_NO_TUNNEL);
859 ASSERT (slot4 == VXLAN_GBP_INPUT_NEXT_NO_TUNNEL);
861 /* slot6 = vlib_node_add_next_with_slot (vm, */
862 /* vxlan6_gbp_input_node.index, */
863 /* gbp_vxlan6_input_node.index, */
864 /* VXLAN_GBP_INPUT_NEXT_NO_TUNNEL); */
865 /* ASSERT (slot6 == VXLAN_GBP_INPUT_NEXT_NO_TUNNEL); */
867 gt_logger = vlib_log_register_class ("gbp", "tun");
872 VLIB_INIT_FUNCTION (gbp_vxlan_init);
875 * fd.io coding-style-patch-verification: ON
878 * eval: (c-set-style "gnu")