2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 * @brief Common utility functions for LISP-GPE interfaces.
22 #include <vppinfra/error.h>
23 #include <vppinfra/hash.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ip/ip.h>
26 #include <vnet/ip/udp.h>
27 #include <vnet/ethernet/ethernet.h>
28 #include <vnet/lisp-gpe/lisp_gpe.h>
29 #include <vnet/adj/adj.h>
30 #include <vnet/fib/fib_table.h>
31 #include <vnet/fib/ip4_fib.h>
32 #include <vnet/fib/ip6_fib.h>
34 #define foreach_lisp_gpe_tx_next \
35 _(DROP, "error-drop") \
36 _(IP4_LOOKUP, "ip4-lookup") \
37 _(IP6_LOOKUP, "ip6-lookup")
41 #define _(sym,str) LISP_GPE_TX_NEXT_##sym,
42 foreach_lisp_gpe_tx_next
50 } lisp_gpe_tx_trace_t;
53 format_lisp_gpe_tx_trace (u8 * s, va_list * args)
55 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
56 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
57 lisp_gpe_tx_trace_t *t = va_arg (*args, lisp_gpe_tx_trace_t *);
59 s = format (s, "LISP-GPE-TX: tunnel %d", t->tunnel_index);
63 #define is_v4_packet(_h) ((*(u8*) _h) & 0xF0) == 0x40
66 * @brief LISP-GPE interface TX (encap) function.
67 * @node lisp_gpe_interface_tx
69 * The LISP-GPE interface TX (encap) function.
71 * Looks up the associated tunnel based on the adjacency hit in the SD FIB
72 * and if the tunnel is multihomed it uses the flow hash to determine
73 * sub-tunnel, and rewrite string, to be used to encapsulate the packet.
75 * @param[in] vm vlib_main_t corresponding to the current thread.
76 * @param[in] node vlib_node_runtime_t data for this node.
77 * @param[in] frame vlib_frame_t whose contents should be dispatched.
79 * @return number of vectors in frame.
82 lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
83 vlib_frame_t * from_frame)
85 u32 n_left_from, next_index, *from, *to_next;
86 lisp_gpe_main_t *lgm = &lisp_gpe_main;
88 from = vlib_frame_vector_args (from_frame);
89 n_left_from = from_frame->n_vectors;
91 next_index = node->cached_next_index;
93 while (n_left_from > 0)
97 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
99 while (n_left_from > 0 && n_left_to_next > 0)
101 u32 bi0, adj_index0, next0;
102 const ip_adjacency_t *adj0;
103 const dpo_id_t *dpo0;
114 b0 = vlib_get_buffer (vm, bi0);
116 /* Fixup the checksum and len fields in the LISP tunnel encap
117 * that was applied at the midchain node */
118 is_v4_0 = is_v4_packet (vlib_buffer_get_current (b0));
119 ip_udp_fixup_one (lgm->vlib_main, b0, is_v4_0);
121 /* Follow the DPO on which the midchain is stacked */
122 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
123 adj0 = adj_get (adj_index0);
124 dpo0 = &adj0->sub_type.midchain.next_dpo;
125 next0 = dpo0->dpoi_next_node;
126 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
128 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
130 lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
132 tr->tunnel_index = adj_index0;
134 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
135 n_left_to_next, bi0, next0);
138 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
141 return from_frame->n_vectors;
145 format_lisp_gpe_name (u8 * s, va_list * args)
147 u32 dev_instance = va_arg (*args, u32);
148 return format (s, "lisp_gpe%d", dev_instance);
152 VNET_DEVICE_CLASS (lisp_gpe_device_class) = {
154 .format_device_name = format_lisp_gpe_name,
155 .format_tx_trace = format_lisp_gpe_tx_trace,
156 .tx_function = lisp_gpe_interface_tx,
157 .no_flatten_output_chains = 1,
162 dummy_set_rewrite (vnet_main_t * vnm, u32 sw_if_index, u32 l3_type,
163 void *dst_address, void *rewrite, uword max_rewrite_bytes)
169 format_lisp_gpe_header_with_length (u8 * s, va_list * args)
171 lisp_gpe_header_t *h = va_arg (*args, lisp_gpe_header_t *);
172 u32 max_header_bytes = va_arg (*args, u32);
175 header_bytes = sizeof (h[0]);
176 if (max_header_bytes != 0 && header_bytes > max_header_bytes)
177 return format (s, "lisp-gpe header truncated");
179 s = format (s, "flags: ");
180 #define _(n,v) if (h->flags & v) s = format (s, "%s ", #n);
181 foreach_lisp_gpe_flag_bit;
184 s = format (s, "\n ver_res %d res %d next_protocol %d iid %d(%x)",
185 h->ver_res, h->res, h->next_protocol,
186 clib_net_to_host_u32 (h->iid), clib_net_to_host_u32 (h->iid));
191 VNET_HW_INTERFACE_CLASS (lisp_gpe_hw_class) = {
193 .format_header = format_lisp_gpe_header_with_length,
194 .set_rewrite = dummy_set_rewrite,
199 add_del_lisp_gpe_default_route (u32 table_id, fib_protocol_t proto, u8 is_add)
201 fib_prefix_t prefix = {
209 * Add a deafult route that results in a control plane punt DPO
211 dpo_id_t cp_punt = DPO_NULL;
213 dpo_set (&cp_punt, DPO_LISP_CP, fib_proto_to_dpo (proto), proto);
216 fib_table_find_or_create_and_lock (prefix.fp_proto, table_id);
217 fib_table_entry_special_dpo_add (fib_index, &prefix, FIB_SOURCE_LISP,
218 FIB_ENTRY_FLAG_EXCLUSIVE, &cp_punt);
219 dpo_unlock (&cp_punt);
223 fib_index = fib_table_find (prefix.fp_proto, table_id);
224 fib_table_entry_special_remove (fib_index, &prefix, FIB_SOURCE_LISP);
225 fib_table_unlock (fib_index, prefix.fp_proto);
230 lisp_gpe_iface_set_table (u32 sw_if_index, u32 table_id)
232 fib_node_index_t fib_index;
234 fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, table_id);
235 vec_validate (ip4_main.fib_index_by_sw_if_index, sw_if_index);
236 ip4_main.fib_index_by_sw_if_index[sw_if_index] = fib_index;
237 ip4_sw_interface_enable_disable (sw_if_index, 1);
239 fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, table_id);
240 vec_validate (ip6_main.fib_index_by_sw_if_index, sw_if_index);
241 ip6_main.fib_index_by_sw_if_index[sw_if_index] = fib_index;
242 ip6_sw_interface_enable_disable (sw_if_index, 1);
245 #define foreach_l2_lisp_gpe_tx_next \
246 _(DROP, "error-drop") \
247 _(IP4_LOOKUP, "ip4-lookup") \
248 _(IP6_LOOKUP, "ip6-lookup") \
249 _(LISP_CP_LOOKUP, "lisp-cp-lookup")
253 #define _(sym,str) L2_LISP_GPE_TX_NEXT_##sym,
254 foreach_l2_lisp_gpe_tx_next
256 L2_LISP_GPE_TX_N_NEXT,
257 } l2_lisp_gpe_tx_next_t;
262 } l2_lisp_gpe_tx_trace_t;
265 format_l2_lisp_gpe_tx_trace (u8 * s, va_list * args)
267 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
268 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
269 l2_lisp_gpe_tx_trace_t *t = va_arg (*args, l2_lisp_gpe_tx_trace_t *);
271 s = format (s, "L2-LISP-GPE-TX: tunnel %d", t->tunnel_index);
276 l2_process_tunnel_action (vlib_buffer_t * b0, u8 action, u32 * next0)
278 if (LISP_SEND_MAP_REQUEST == action)
280 next0[0] = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
281 vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC;
285 next0[0] = L2_LISP_GPE_TX_NEXT_DROP;
290 ip_flow_hash (void *data)
292 ip4_header_t *iph = (ip4_header_t *) data;
294 if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
295 return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
297 return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
301 l2_flow_hash (vlib_buffer_t * b0)
303 ethernet_header_t *eh;
305 uword is_ip, eh_size;
308 eh = vlib_buffer_get_current (b0);
309 eh_type = clib_net_to_host_u16 (eh->type);
310 eh_size = ethernet_buffer_header_size (b0);
312 is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
314 /* since we have 2 cache lines, use them */
316 a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
320 b = mac_to_u64 ((u8 *) eh->dst_address);
321 c = mac_to_u64 ((u8 *) eh->src_address);
322 hash_mix64 (a, b, c);
327 /* always_inline void */
328 /* l2_process_one (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, u32 ti0, */
331 /* lisp_gpe_tunnel_t *t0; */
333 /* t0 = pool_elt_at_index (lgm->tunnels, ti0); */
334 /* ASSERT (0 != t0); */
336 /* if (PREDICT_TRUE (LISP_NO_ACTION == t0->action)) */
338 /* /\* compute 'flow' hash *\/ */
339 /* if (PREDICT_TRUE (t0->sub_tunnels_lbv_count > 1)) */
340 /* vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0); */
341 /* encap_one_inline (lgm, b0, t0, next0); */
345 /* l2_process_tunnel_action (b0, t0->action, next0); */
349 /* always_inline void */
350 /* l2_process_two (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, vlib_buffer_t * b1, */
351 /* u32 ti0, u32 ti1, u32 * next0, u32 * next1) */
353 /* lisp_gpe_tunnel_t *t0, *t1; */
355 /* t0 = pool_elt_at_index (lgm->tunnels, ti0); */
356 /* t1 = pool_elt_at_index (lgm->tunnels, ti1); */
358 /* ASSERT (0 != t0 && 0 != t1); */
360 /* if (PREDICT_TRUE (LISP_NO_ACTION == t0->action */
361 /* && LISP_NO_ACTION == t1->action)) */
363 /* if (PREDICT_TRUE (t0->sub_tunnels_lbv_count > 1)) */
364 /* vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0); */
365 /* if (PREDICT_TRUE (t1->sub_tunnels_lbv_count > 1)) */
366 /* vnet_buffer (b1)->ip.flow_hash = l2_flow_hash (b1); */
367 /* encap_two_inline (lgm, b0, b1, t0, t1, next0, next1); */
371 /* if (LISP_NO_ACTION == t0->action) */
373 /* if (PREDICT_TRUE (t0->sub_tunnels_lbv_count > 1)) */
374 /* vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0); */
375 /* encap_one_inline (lgm, b0, t0, next0); */
376 /* l2_process_tunnel_action (b1, t1->action, next1); */
378 /* else if (LISP_NO_ACTION == t1->action) */
380 /* if (PREDICT_TRUE (t1->sub_tunnels_lbv_count > 1)) */
381 /* vnet_buffer (b1)->ip.flow_hash = l2_flow_hash (b1); */
382 /* encap_one_inline (lgm, b1, t1, next1); */
383 /* l2_process_tunnel_action (b0, t0->action, next0); */
387 /* l2_process_tunnel_action (b0, t0->action, next0); */
388 /* l2_process_tunnel_action (b1, t1->action, next1); */
394 * @brief LISP-GPE interface TX (encap) function for L2 overlays.
395 * @node l2_lisp_gpe_interface_tx
397 * The L2 LISP-GPE interface TX (encap) function.
399 * Uses bridge domain index, source and destination ethernet addresses to
400 * lookup tunnel. If the tunnel is multihomed a flow has is used to determine
401 * the sub-tunnel and therefore the rewrite string to be used to encapsulate
404 * @param[in] vm vlib_main_t corresponding to the current thread.
405 * @param[in] node vlib_node_runtime_t data for this node.
406 * @param[in] frame vlib_frame_t whose contents should be dispatched.
408 * @return number of vectors in frame.
411 l2_lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
412 vlib_frame_t * from_frame)
414 u32 n_left_from, next_index, *from, *to_next;
415 lisp_gpe_main_t *lgm = &lisp_gpe_main;
417 from = vlib_frame_vector_args (from_frame);
418 n_left_from = from_frame->n_vectors;
420 next_index = node->cached_next_index;
422 while (n_left_from > 0)
426 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
428 while (n_left_from >= 4 && n_left_to_next >= 2)
431 vlib_buffer_t *b0, *b1;
433 lisp_gpe_tunnel_t *t0 = 0, *t1 = 0;
434 // ethernet_header_t *e0, *e1;
436 next0 = next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
438 /* Prefetch next iteration. */
440 vlib_buffer_t *p2, *p3;
442 p2 = vlib_get_buffer (vm, from[2]);
443 p3 = vlib_get_buffer (vm, from[3]);
445 vlib_prefetch_buffer_header (p2, LOAD);
446 vlib_prefetch_buffer_header (p3, LOAD);
448 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
449 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
461 b0 = vlib_get_buffer (vm, bi0);
462 b1 = vlib_get_buffer (vm, bi1);
464 /* e0 = vlib_buffer_get_current (b0); */
465 /* e1 = vlib_buffer_get_current (b1); */
467 /* lookup dst + src mac */
468 /* ti0 = lisp_l2_fib_lookup (lgm, vnet_buffer (b0)->l2.bd_index, */
469 /* e0->src_address, e0->dst_address); */
470 /* ti1 = lisp_l2_fib_lookup (lgm, vnet_buffer (b1)->l2.bd_index, */
471 /* e1->src_address, e1->dst_address); */
473 /* if (PREDICT_TRUE ((u32) ~ 0 != ti0) && (u32) ~ 0 != ti1) */
475 /* /\* process both tunnels *\/ */
476 /* l2_process_two (lgm, b0, b1, ti0, ti1, &next0, &next1); */
480 /* if ((u32) ~ 0 != ti0) */
482 /* /\* process tunnel for b0 *\/ */
483 /* l2_process_one (lgm, b0, ti0, &next0); */
485 /* /\* no tunnel found for b1, send to control plane *\/ */
486 /* next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; */
487 /* vnet_buffer (b1)->lisp.overlay_afi = LISP_AFI_MAC; */
489 /* else if ((u32) ~ 0 != ti1) */
491 /* /\* process tunnel for b1 *\/ */
492 /* l2_process_one (lgm, b1, ti1, &next1); */
494 /* /\* no tunnel found b0, send to control plane *\/ */
495 /* next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; */
496 /* vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC; */
500 /* /\* no tunnels found *\/ */
501 /* next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; */
502 /* vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC; */
503 /* next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; */
504 /* vnet_buffer (b1)->lisp.overlay_afi = LISP_AFI_MAC; */
508 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
510 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
512 tr->tunnel_index = t0 - lgm->tunnels;
514 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
516 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b1,
518 tr->tunnel_index = t1 - lgm->tunnels;
521 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
522 n_left_to_next, bi0, bi1, next0,
526 while (n_left_from > 0 && n_left_to_next > 0)
529 u32 bi0, ti0, next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
530 ethernet_header_t *e0;
539 b0 = vlib_get_buffer (vm, bi0);
540 e0 = vlib_buffer_get_current (b0);
542 /* lookup dst + src mac */
543 ti0 = lisp_l2_fib_lookup (lgm, vnet_buffer (b0)->l2.bd_index,
544 e0->src_address, e0->dst_address);
546 /* if (PREDICT_TRUE ((u32) ~ 0 != ti0)) */
548 /* l2_process_one (lgm, b0, ti0, &next0); */
552 /* /\* no tunnel found send to control plane *\/ */
553 /* next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP; */
554 /* vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC; */
557 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
559 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
561 tr->tunnel_index = ti0 ? ti0 : ~0;
563 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
564 n_left_to_next, bi0, next0);
567 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
570 return from_frame->n_vectors;
574 format_l2_lisp_gpe_name (u8 * s, va_list * args)
576 u32 dev_instance = va_arg (*args, u32);
577 return format (s, "l2_lisp_gpe%d", dev_instance);
581 VNET_DEVICE_CLASS (l2_lisp_gpe_device_class,static) = {
582 .name = "L2_LISP_GPE",
583 .format_device_name = format_l2_lisp_gpe_name,
584 .format_tx_trace = format_lisp_gpe_tx_trace,
585 .tx_function = l2_lisp_gpe_interface_tx,
586 .no_flatten_output_chains = 1,
590 static vnet_hw_interface_t *
591 create_lisp_gpe_iface (lisp_gpe_main_t * lgm, u32 vni, u32 dp_table,
592 vnet_device_class_t * dev_class,
593 tunnel_lookup_t * tuns)
596 u32 hw_if_index = ~0;
598 vnet_hw_interface_t *hi;
599 vnet_main_t *vnm = lgm->vnet_main;
601 /* create hw lisp_gpeX iface if needed, otherwise reuse existing */
602 flen = vec_len (lgm->free_tunnel_hw_if_indices);
605 hw_if_index = lgm->free_tunnel_hw_if_indices[flen - 1];
606 _vec_len (lgm->free_tunnel_hw_if_indices) -= 1;
608 hi = vnet_get_hw_interface (vnm, hw_if_index);
610 /* rename interface */
611 new_name = format (0, "%U", dev_class->format_device_name, vni);
613 vec_add1 (new_name, 0);
614 vnet_rename_interface (vnm, hw_if_index, (char *) new_name);
617 /* clear old stats of freed interface before reuse */
618 vnet_interface_main_t *im = &vnm->interface_main;
619 vnet_interface_counter_lock (im);
620 vlib_zero_combined_counter (&im->combined_sw_if_counters
621 [VNET_INTERFACE_COUNTER_TX],
623 vlib_zero_combined_counter (&im->combined_sw_if_counters
624 [VNET_INTERFACE_COUNTER_RX],
626 vlib_zero_simple_counter (&im->sw_if_counters
627 [VNET_INTERFACE_COUNTER_DROP],
629 vnet_interface_counter_unlock (im);
633 hw_if_index = vnet_register_interface (vnm, dev_class->index, vni,
634 lisp_gpe_hw_class.index, 0);
635 hi = vnet_get_hw_interface (vnm, hw_if_index);
638 hash_set (tuns->hw_if_index_by_dp_table, dp_table, hw_if_index);
640 /* set tunnel termination: post decap, packets are tagged as having been
641 * originated by lisp-gpe interface */
642 hash_set (tuns->sw_if_index_by_vni, vni, hi->sw_if_index);
643 hash_set (tuns->vni_by_sw_if_index, hi->sw_if_index, vni);
649 remove_lisp_gpe_iface (lisp_gpe_main_t * lgm, u32 hi_index, u32 dp_table,
650 tunnel_lookup_t * tuns)
652 vnet_main_t *vnm = lgm->vnet_main;
653 vnet_hw_interface_t *hi;
656 hi = vnet_get_hw_interface (vnm, hi_index);
658 /* disable interface */
659 vnet_sw_interface_set_flags (vnm, hi->sw_if_index, 0 /* down */ );
660 vnet_hw_interface_set_flags (vnm, hi->hw_if_index, 0 /* down */ );
661 hash_unset (tuns->hw_if_index_by_dp_table, dp_table);
662 vec_add1 (lgm->free_tunnel_hw_if_indices, hi->hw_if_index);
664 /* clean tunnel termination and vni to sw_if_index binding */
665 vnip = hash_get (tuns->vni_by_sw_if_index, hi->sw_if_index);
668 clib_warning ("No vni associated to interface %d", hi->sw_if_index);
671 hash_unset (tuns->sw_if_index_by_vni, vnip[0]);
672 hash_unset (tuns->vni_by_sw_if_index, hi->sw_if_index);
676 * @brief Add/del LISP-GPE L3 interface.
678 * Creates LISP-GPE interface, sets ingress arcs from lisp_gpeX_lookup,
679 * installs default routes that attract all traffic with no more specific
680 * routes to lgpe-ipx-lookup, set egress arcs to ipx-lookup, sets
681 * the interface in the right vrf and enables it.
683 * @param[in] lgm Reference to @ref lisp_gpe_main_t.
684 * @param[in] a Parameters to create interface.
686 * @return number of vectors in frame.
689 lisp_gpe_add_del_l3_iface (lisp_gpe_main_t * lgm,
690 vnet_lisp_gpe_add_del_iface_args_t * a)
692 vnet_main_t *vnm = lgm->vnet_main;
693 tunnel_lookup_t *l3_ifaces = &lgm->l3_ifaces;
694 vnet_hw_interface_t *hi;
697 hip = hash_get (l3_ifaces->hw_if_index_by_dp_table, a->table_id);
703 clib_warning ("vrf %d already mapped to a vni", a->table_id);
707 si = hash_get (l3_ifaces->sw_if_index_by_vni, a->vni);
710 clib_warning ("Interface for vni %d already exists", a->vni);
714 /* create lisp iface and populate tunnel tables */
715 hi = create_lisp_gpe_iface (lgm, a->vni, a->table_id,
716 &lisp_gpe_device_class, l3_ifaces);
718 /* insert default routes that point to lisp-cp lookup */
719 lisp_gpe_iface_set_table (hi->sw_if_index, a->table_id);
720 add_del_lisp_gpe_default_route (a->table_id, FIB_PROTOCOL_IP4, 1);
721 add_del_lisp_gpe_default_route (a->table_id, FIB_PROTOCOL_IP6, 1);
723 /* enable interface */
724 vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
725 VNET_SW_INTERFACE_FLAG_ADMIN_UP);
726 vnet_hw_interface_set_flags (vnm, hi->hw_if_index,
727 VNET_HW_INTERFACE_FLAG_LINK_UP);
733 clib_warning ("The interface for vrf %d doesn't exist",
738 hi = vnet_get_hw_interface (vnm, hip[0]);
740 remove_lisp_gpe_iface (lgm, hip[0], a->table_id, &lgm->l3_ifaces);
742 /* unset default routes */
743 ip4_sw_interface_enable_disable (hi->sw_if_index, 0);
744 ip6_sw_interface_enable_disable (hi->sw_if_index, 0);
745 add_del_lisp_gpe_default_route (a->table_id, FIB_PROTOCOL_IP4, 0);
746 add_del_lisp_gpe_default_route (a->table_id, FIB_PROTOCOL_IP6, 0);
753 * @brief Add/del LISP-GPE L2 interface.
755 * Creates LISP-GPE interface, sets it in L2 mode in the appropriate
756 * bridge domain, sets egress arcs and enables it.
758 * @param[in] lgm Reference to @ref lisp_gpe_main_t.
759 * @param[in] a Parameters to create interface.
761 * @return number of vectors in frame.
764 lisp_gpe_add_del_l2_iface (lisp_gpe_main_t * lgm,
765 vnet_lisp_gpe_add_del_iface_args_t * a)
767 vnet_main_t *vnm = lgm->vnet_main;
768 tunnel_lookup_t *l2_ifaces = &lgm->l2_ifaces;
769 vnet_hw_interface_t *hi;
773 bd_index = bd_find_or_add_bd_index (&bd_main, a->bd_id);
774 hip = hash_get (l2_ifaces->hw_if_index_by_dp_table, bd_index);
780 clib_warning ("bridge domain %d already mapped to a vni", a->bd_id);
784 si = hash_get (l2_ifaces->sw_if_index_by_vni, a->vni);
787 clib_warning ("Interface for vni %d already exists", a->vni);
791 /* create lisp iface and populate tunnel tables */
792 hi = create_lisp_gpe_iface (lgm, a->vni, bd_index,
793 &l2_lisp_gpe_device_class, &lgm->l2_ifaces);
795 /* add iface to l2 bridge domain */
796 set_int_l2_mode (lgm->vlib_main, vnm, MODE_L2_BRIDGE, hi->sw_if_index,
799 /* set egress arcs */
800 #define _(sym,str) vlib_node_add_named_next_with_slot (vnm->vlib_main, \
801 hi->tx_node_index, str, L2_LISP_GPE_TX_NEXT_##sym);
802 foreach_l2_lisp_gpe_tx_next
804 /* enable interface */
805 vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
806 VNET_SW_INTERFACE_FLAG_ADMIN_UP);
807 vnet_hw_interface_set_flags (vnm, hi->hw_if_index,
808 VNET_HW_INTERFACE_FLAG_LINK_UP);
814 clib_warning ("The interface for bridge domain %d doesn't exist",
818 remove_lisp_gpe_iface (lgm, hip[0], bd_index, &lgm->l2_ifaces);
824 /** Add/del L2 or L3 LISP-GPE interface. */
826 vnet_lisp_gpe_add_del_iface (vnet_lisp_gpe_add_del_iface_args_t * a,
829 lisp_gpe_main_t *lgm = &lisp_gpe_main;
831 if (vnet_lisp_gpe_enable_disable_status () == 0)
833 clib_warning ("LISP is disabled!");
834 return VNET_API_ERROR_LISP_DISABLED;
838 return lisp_gpe_add_del_l3_iface (lgm, a);
840 return lisp_gpe_add_del_l2_iface (lgm, a);
843 static clib_error_t *
844 lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input,
845 vlib_cli_command_t * cmd)
847 unformat_input_t _line_input, *line_input = &_line_input;
849 clib_error_t *error = 0;
851 u32 table_id, vni, bd_id;
852 u8 vni_is_set = 0, vrf_is_set = 0, bd_index_is_set = 0;
854 vnet_lisp_gpe_add_del_iface_args_t _a, *a = &_a;
856 /* Get a line of input. */
857 if (!unformat_user (input, unformat_line_input, line_input))
860 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
862 if (unformat (line_input, "add"))
864 else if (unformat (line_input, "del"))
866 else if (unformat (line_input, "vrf %d", &table_id))
870 else if (unformat (line_input, "vni %d", &vni))
874 else if (unformat (line_input, "bd %d", &bd_id))
880 return clib_error_return (0, "parse error: '%U'",
881 format_unformat_error, line_input);
885 if (vrf_is_set && bd_index_is_set)
886 return clib_error_return (0,
887 "Cannot set both vrf and brdige domain index!");
890 return clib_error_return (0, "vni must be set!");
892 if (!vrf_is_set && !bd_index_is_set)
893 return clib_error_return (0, "vrf or bridge domain index must be set!");
896 a->dp_table = vrf_is_set ? table_id : bd_id;
898 a->is_l2 = bd_index_is_set;
900 rv = vnet_lisp_gpe_add_del_iface (a, 0);
903 error = clib_error_return (0, "failed to %s gpe iface!",
904 is_add ? "add" : "delete");
911 VLIB_CLI_COMMAND (add_del_lisp_gpe_iface_command, static) = {
912 .path = "lisp gpe iface",
913 .short_help = "lisp gpe iface add/del vni <vni> vrf <vrf>",
914 .function = lisp_gpe_add_del_iface_command_fn,
919 * fd.io coding-style-patch-verification: ON
922 * eval: (c-set-style "gnu")