2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/error.h>
17 #include <vppinfra/hash.h>
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ip/udp.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/lisp-gpe/lisp_gpe.h>
24 #define foreach_lisp_gpe_tx_next \
25 _(DROP, "error-drop") \
26 _(IP4_LOOKUP, "ip4-lookup") \
27 _(IP6_LOOKUP, "ip6-lookup")
31 #define _(sym,str) LISP_GPE_TX_NEXT_##sym,
32 foreach_lisp_gpe_tx_next
40 } lisp_gpe_tx_trace_t;
43 format_lisp_gpe_tx_trace (u8 * s, va_list * args)
45 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
46 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
47 lisp_gpe_tx_trace_t * t = va_arg (*args, lisp_gpe_tx_trace_t *);
49 s = format (s, "LISP-GPE-TX: tunnel %d", t->tunnel_index);
54 get_one_tunnel_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0,
55 lisp_gpe_tunnel_t ** t0, u8 is_v4)
57 u32 adj_index0, tunnel_index0;
58 ip_adjacency_t * adj0;
60 /* Get adjacency and from it the tunnel_index */
61 adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
64 adj0 = ip_get_adjacency (lgm->lm4, adj_index0);
66 adj0 = ip_get_adjacency (lgm->lm6, adj_index0);
68 tunnel_index0 = adj0->if_address_index;
69 t0[0] = pool_elt_at_index(lgm->tunnels, tunnel_index0);
75 encap_one_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0,
76 lisp_gpe_tunnel_t * t0, u32 * next0, u8 is_v4)
78 ASSERT(sizeof(ip4_udp_lisp_gpe_header_t) == 36);
79 ASSERT(sizeof(ip6_udp_lisp_gpe_header_t) == 56);
83 ip_udp_encap_one (lgm->vlib_main, b0, t0->rewrite, 36, 1);
84 next0[0] = LISP_GPE_TX_NEXT_IP4_LOOKUP;
88 ip_udp_encap_one (lgm->vlib_main, b0, t0->rewrite, 56, 0);
89 next0[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP;
92 /* Reset to look up tunnel partner in the configured FIB */
93 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
97 get_two_tunnels_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0,
98 vlib_buffer_t * b1, lisp_gpe_tunnel_t ** t0,
99 lisp_gpe_tunnel_t ** t1, u8 is_v4)
101 u32 adj_index0, adj_index1, tunnel_index0, tunnel_index1;
102 ip_adjacency_t * adj0, * adj1;
104 /* Get adjacency and from it the tunnel_index */
105 adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
106 adj_index1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
110 adj0 = ip_get_adjacency (lgm->lm4, adj_index0);
111 adj1 = ip_get_adjacency (lgm->lm4, adj_index1);
115 adj0 = ip_get_adjacency (lgm->lm6, adj_index0);
116 adj1 = ip_get_adjacency (lgm->lm6, adj_index1);
119 tunnel_index0 = adj0->if_address_index;
120 tunnel_index1 = adj1->if_address_index;
122 t0[0] = pool_elt_at_index(lgm->tunnels, tunnel_index0);
123 t1[0] = pool_elt_at_index(lgm->tunnels, tunnel_index1);
130 encap_two_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, vlib_buffer_t * b1,
131 lisp_gpe_tunnel_t * t0, lisp_gpe_tunnel_t * t1, u32 * next0,
132 u32 * next1, u8 is_v4)
134 ASSERT(sizeof(ip4_udp_lisp_gpe_header_t) == 36);
135 ASSERT(sizeof(ip6_udp_lisp_gpe_header_t) == 56);
139 ip_udp_encap_one (lgm->vlib_main, b0, t0->rewrite, 36, 1);
140 ip_udp_encap_one (lgm->vlib_main, b1, t1->rewrite, 36, 1);
141 next0[0] = next1[0] = LISP_GPE_TX_NEXT_IP4_LOOKUP;
145 ip_udp_encap_one (lgm->vlib_main, b0, t0->rewrite, 56, 0);
146 ip_udp_encap_one (lgm->vlib_main, b1, t1->rewrite, 56, 0);
147 next0[0] = next1[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP;
150 /* Reset to look up tunnel partner in the configured FIB */
151 vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
152 vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
155 #define is_v4_packet(_h) ((*(u8*) _h) & 0xF0) == 0x40
158 lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
159 vlib_frame_t * from_frame)
161 u32 n_left_from, next_index, * from, * to_next;
162 lisp_gpe_main_t * lgm = &lisp_gpe_main;
164 from = vlib_frame_vector_args (from_frame);
165 n_left_from = from_frame->n_vectors;
167 next_index = node->cached_next_index;
169 while (n_left_from > 0)
173 vlib_get_next_frame (vm, node, next_index,
174 to_next, n_left_to_next);
176 while (n_left_from >= 4 && n_left_to_next >= 2)
179 vlib_buffer_t * b0, * b1;
181 lisp_gpe_tunnel_t * t0 = 0, * t1 = 0;
182 u8 is_v4_eid0, is_v4_eid1;
184 next0 = next1 = LISP_GPE_TX_NEXT_IP4_LOOKUP;
186 /* Prefetch next iteration. */
188 vlib_buffer_t * p2, *p3;
190 p2 = vlib_get_buffer (vm, from[2]);
191 p3 = vlib_get_buffer (vm, from[3]);
193 vlib_prefetch_buffer_header(p2, LOAD);
194 vlib_prefetch_buffer_header(p3, LOAD);
196 CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
197 CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
209 b0 = vlib_get_buffer (vm, bi0);
210 b1 = vlib_get_buffer (vm, bi1);
212 is_v4_eid0 = is_v4_packet(vlib_buffer_get_current (b0));
213 is_v4_eid1 = is_v4_packet(vlib_buffer_get_current (b1));
215 if (PREDICT_TRUE(is_v4_eid0 == is_v4_eid1))
217 get_two_tunnels_inline (lgm, b0, b1, &t0, &t1,
222 get_one_tunnel_inline (lgm, b0, &t0, is_v4_eid0 ? 1 : 0);
223 get_one_tunnel_inline (lgm, b1, &t1, is_v4_eid1 ? 1 : 0);
227 ip_addr_version(&t0->dst) == ip_addr_version(&t1->dst)))
229 encap_two_inline (lgm, b0, b1, t0, t1, &next0, &next1,
230 ip_addr_version(&t0->dst) == IP4 ? 1 : 0);
234 encap_one_inline (lgm, b0, t0, &next0,
235 ip_addr_version(&t0->dst) == IP4 ? 1 : 0);
236 encap_one_inline (lgm, b1, t1, &next1,
237 ip_addr_version(&t1->dst) == IP4 ? 1 : 0);
240 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
242 lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
244 tr->tunnel_index = t0 - lgm->tunnels;
246 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
248 lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b1,
250 tr->tunnel_index = t1 - lgm->tunnels;
253 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
254 n_left_to_next, bi0, bi1, next0,
258 while (n_left_from > 0 && n_left_to_next > 0)
261 u32 bi0, next0 = LISP_GPE_TX_NEXT_IP4_LOOKUP;
262 lisp_gpe_tunnel_t * t0 = 0;
272 b0 = vlib_get_buffer (vm, bi0);
274 is_v4_0 = is_v4_packet(vlib_buffer_get_current (b0));
275 get_one_tunnel_inline (lgm, b0, &t0, is_v4_0 ? 1 : 0);
277 encap_one_inline (lgm, b0, t0, &next0,
278 ip_addr_version(&t0->dst) == IP4 ? 1 : 0);
280 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
282 lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
284 tr->tunnel_index = t0 - lgm->tunnels;
286 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
287 n_left_to_next, bi0, next0);
290 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
293 return from_frame->n_vectors;
297 format_lisp_gpe_name (u8 * s, va_list * args)
299 u32 dev_instance = va_arg (*args, u32);
300 return format (s, "lisp_gpe%d", dev_instance);
303 VNET_DEVICE_CLASS (lisp_gpe_device_class,static) = {
305 .format_device_name = format_lisp_gpe_name,
306 .format_tx_trace = format_lisp_gpe_tx_trace,
307 .tx_function = lisp_gpe_interface_tx,
308 .no_flatten_output_chains = 1,
312 dummy_set_rewrite (vnet_main_t * vnm, u32 sw_if_index, u32 l3_type,
313 void * dst_address, void * rewrite, uword max_rewrite_bytes)
319 format_lisp_gpe_header_with_length (u8 * s, va_list * args)
321 lisp_gpe_header_t * h = va_arg (*args, lisp_gpe_header_t *);
322 u32 max_header_bytes = va_arg (*args, u32);
325 header_bytes = sizeof (h[0]);
326 if (max_header_bytes != 0 && header_bytes > max_header_bytes)
327 return format (s, "lisp-gpe header truncated");
329 s = format (s, "flags: ");
330 #define _(n,v) if (h->flags & v) s = format (s, "%s ", #n);
331 foreach_lisp_gpe_flag_bit;
334 s = format (s, "\n ver_res %d res %d next_protocol %d iid %d(%x)",
335 h->ver_res, h->res, h->next_protocol,
336 clib_net_to_host_u32 (h->iid),
337 clib_net_to_host_u32 (h->iid));
341 VNET_HW_INTERFACE_CLASS (lisp_gpe_hw_class) = {
343 .format_header = format_lisp_gpe_header_with_length,
344 .set_rewrite = dummy_set_rewrite,
348 add_del_ip_prefix_route (ip_prefix_t * dst_prefix, u32 table_id,
349 ip_adjacency_t * add_adj, u8 is_add, u32 * adj_index)
353 if (ip_prefix_version(dst_prefix) == IP4)
355 ip4_main_t * im4 = &ip4_main;
356 ip4_add_del_route_args_t a;
357 ip4_address_t addr = ip_prefix_v4(dst_prefix);
359 memset(&a, 0, sizeof(a));
360 a.flags = IP4_ROUTE_FLAG_TABLE_ID;
361 a.table_index_or_table_id = table_id;
363 a.dst_address_length = ip_prefix_len(dst_prefix);
364 a.dst_address = addr;
365 a.flags |= is_add ? IP4_ROUTE_FLAG_ADD : IP4_ROUTE_FLAG_DEL;
367 a.n_add_adj = is_add ? 1 : 0;
369 ip4_add_del_route (im4, &a);
373 p = ip4_get_route (im4, table_id, 0, addr.as_u8,
374 ip_prefix_len(dst_prefix));
377 clib_warning("Failed to insert route for eid %U!",
378 format_ip4_address_and_length, addr.as_u8,
379 ip_prefix_len(dst_prefix));
387 ip6_main_t * im6 = &ip6_main;
388 ip6_add_del_route_args_t a;
389 ip6_address_t addr = ip_prefix_v6(dst_prefix);
391 memset(&a, 0, sizeof(a));
392 a.flags = IP6_ROUTE_FLAG_TABLE_ID;
393 a.table_index_or_table_id = table_id;
395 a.dst_address_length = ip_prefix_len(dst_prefix);
396 a.dst_address = addr;
397 a.flags |= is_add ? IP6_ROUTE_FLAG_ADD : IP6_ROUTE_FLAG_DEL;
399 a.n_add_adj = is_add ? 1 : 0;
401 ip6_add_del_route (im6, &a);
405 adj_index[0] = ip6_get_route (im6, table_id, 0, &addr,
406 ip_prefix_len(dst_prefix));
407 if (adj_index[0] == 0)
409 clib_warning("Failed to insert route for eid %U!",
410 format_ip6_address_and_length, addr.as_u8,
411 ip_prefix_len(dst_prefix));
420 add_del_lisp_gpe_default_route (u32 table_id, u8 is_v4, u8 is_add)
422 lisp_gpe_main_t * lgm = &lisp_gpe_main;
427 /* setup adjacency */
428 memset (&adj, 0, sizeof(adj));
431 adj.explicit_fib_index = ~0;
432 adj.lookup_next_index = is_v4 ? lgm->ip4_lookup_next_lgpe_ip4_lookup :
433 lgm->ip6_lookup_next_lgpe_ip6_lookup;
434 /* default route has tunnel_index ~0 */
435 adj.rewrite_header.sw_if_index = ~0;
437 /* set prefix to 0/0 */
438 memset(&prefix, 0, sizeof(prefix));
439 ip_prefix_version(&prefix) = is_v4 ? IP4 : IP6;
441 /* add/delete route for prefix */
442 add_del_ip_prefix_route (&prefix, table_id, &adj, is_add, &adj_index);
446 lisp_gpe_iface_set_table (u32 sw_if_index, u32 table_id, u8 is_ip4)
450 ip4_main_t * im4 = &ip4_main;
452 fib = find_ip4_fib_by_table_index_or_id (im4, table_id,
453 IP4_ROUTE_FLAG_TABLE_ID);
455 /* fib's created if it doesn't exist */
458 vec_validate(im4->fib_index_by_sw_if_index, sw_if_index);
459 im4->fib_index_by_sw_if_index[sw_if_index] = fib->index;
463 ip6_main_t * im6 = &ip6_main;
465 fib = find_ip6_fib_by_table_index_or_id (im6, table_id,
466 IP6_ROUTE_FLAG_TABLE_ID);
468 /* fib's created if it doesn't exist */
471 vec_validate(im6->fib_index_by_sw_if_index, sw_if_index);
472 im6->fib_index_by_sw_if_index[sw_if_index] = fib->index;
476 #define foreach_l2_lisp_gpe_tx_next \
477 _(DROP, "error-drop") \
478 _(IP4_LOOKUP, "ip4-lookup") \
479 _(IP6_LOOKUP, "ip6-lookup") \
480 _(LISP_CP_LOOKUP, "lisp-cp-lookup")
484 #define _(sym,str) L2_LISP_GPE_TX_NEXT_##sym,
485 foreach_l2_lisp_gpe_tx_next
487 L2_LISP_GPE_TX_N_NEXT,
488 } l2_lisp_gpe_tx_next_t;
493 } l2_lisp_gpe_tx_trace_t;
496 format_l2_lisp_gpe_tx_trace (u8 * s, va_list * args)
498 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
499 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
500 l2_lisp_gpe_tx_trace_t * t = va_arg (*args, l2_lisp_gpe_tx_trace_t *);
502 s = format (s, "L2-LISP-GPE-TX: tunnel %d", t->tunnel_index);
507 l2_process_tunnel_action (vlib_buffer_t * b0, u8 action, u32 * next0)
509 if (LISP_SEND_MAP_REQUEST == action)
511 next0[0] = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
512 vnet_buffer(b0)->lisp.overlay_afi = LISP_AFI_MAC;
516 next0[0] = L2_LISP_GPE_TX_NEXT_DROP;
521 l2_process_one (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, u32 ti0, u32 * next0)
523 lisp_gpe_tunnel_t * t0;
525 t0 = pool_elt_at_index(lgm->tunnels, ti0);
528 if (PREDICT_TRUE(LISP_NO_ACTION == t0->action))
530 encap_one_inline (lgm, b0, t0, next0,
531 ip_addr_version(&t0->dst) == IP4 ? 1 : 0);
535 l2_process_tunnel_action(b0, t0->action, next0);
540 l2_process_two (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, vlib_buffer_t * b1,
541 u32 ti0, u32 ti1, u32 * next0, u32 * next1)
543 lisp_gpe_tunnel_t * t0, * t1;
545 t0 = pool_elt_at_index(lgm->tunnels, ti0);
546 t1 = pool_elt_at_index(lgm->tunnels, ti1);
548 ASSERT(0 != t0 && 0 != t1);
550 if (PREDICT_TRUE(LISP_NO_ACTION == t0->action
551 && LISP_NO_ACTION == t1->action))
553 encap_two_inline (lgm, b0, b1, t0, t1, next0, next1,
554 ip_addr_version(&t0->dst) == IP4 ? 1 : 0);
558 if (LISP_NO_ACTION == t0->action)
560 encap_one_inline (lgm, b0, t0, next0,
561 ip_addr_version(&t0->dst) == IP4 ? 1 : 0);
562 l2_process_tunnel_action (b1, t1->action, next1);
564 else if (LISP_NO_ACTION == t1->action)
566 encap_one_inline (lgm, b1, t1, next1,
567 ip_addr_version(&t1->dst) == IP4 ? 1 : 0);
568 l2_process_tunnel_action (b0, t0->action, next0);
572 l2_process_tunnel_action (b0, t0->action, next0);
573 l2_process_tunnel_action (b1, t1->action, next1);
579 l2_lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
580 vlib_frame_t * from_frame)
582 u32 n_left_from, next_index, * from, * to_next;
583 lisp_gpe_main_t * lgm = &lisp_gpe_main;
585 from = vlib_frame_vector_args (from_frame);
586 n_left_from = from_frame->n_vectors;
588 next_index = node->cached_next_index;
590 while (n_left_from > 0)
594 vlib_get_next_frame (vm, node, next_index,
595 to_next, n_left_to_next);
597 while (n_left_from >= 4 && n_left_to_next >= 2)
600 vlib_buffer_t * b0, * b1;
601 u32 next0, next1, ti0, ti1;
602 lisp_gpe_tunnel_t * t0 = 0, * t1 = 0;
603 ethernet_header_t * e0, * e1;
605 next0 = next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
607 /* Prefetch next iteration. */
609 vlib_buffer_t * p2, *p3;
611 p2 = vlib_get_buffer (vm, from[2]);
612 p3 = vlib_get_buffer (vm, from[3]);
614 vlib_prefetch_buffer_header(p2, LOAD);
615 vlib_prefetch_buffer_header(p3, LOAD);
617 CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
618 CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
630 b0 = vlib_get_buffer (vm, bi0);
631 b1 = vlib_get_buffer (vm, bi1);
633 e0 = vlib_buffer_get_current (b0);
634 e1 = vlib_buffer_get_current (b1);
636 /* lookup dst + src mac */
637 ti0 = lisp_l2_fib_lookup (lgm, vnet_buffer(b0)->l2.bd_index,
638 e0->src_address, e0->dst_address);
639 ti1 = lisp_l2_fib_lookup (lgm, vnet_buffer(b1)->l2.bd_index,
640 e1->src_address, e1->dst_address);
642 if (PREDICT_TRUE((u32)~0 != ti0) && (u32)~0 != ti1)
644 /* process both tunnels */
645 l2_process_two (lgm, b0, b1, ti0, ti1, &next0, &next1);
651 /* process tunnel for b0 */
652 l2_process_one (lgm, b0, ti0, &next0);
654 /* no tunnel found for b1, send to control plane */
655 next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
656 vnet_buffer(b1)->lisp.overlay_afi = LISP_AFI_MAC;
658 else if ((u32)~0 != ti1)
660 /* process tunnel for b1 */
661 l2_process_one (lgm, b1, ti1, &next1);
663 /* no tunnel found b0, send to control plane */
664 next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
665 vnet_buffer(b0)->lisp.overlay_afi = LISP_AFI_MAC;
669 /* no tunnels found */
670 next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
671 vnet_buffer(b0)->lisp.overlay_afi = LISP_AFI_MAC;
672 next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
673 vnet_buffer(b1)->lisp.overlay_afi = LISP_AFI_MAC;
677 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
679 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
681 tr->tunnel_index = t0 - lgm->tunnels;
683 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
685 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b1,
687 tr->tunnel_index = t1 - lgm->tunnels;
690 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
691 n_left_to_next, bi0, bi1, next0,
695 while (n_left_from > 0 && n_left_to_next > 0)
698 u32 bi0, ti0, next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
699 ethernet_header_t * e0;
708 b0 = vlib_get_buffer (vm, bi0);
709 e0 = vlib_buffer_get_current (b0);
711 /* lookup dst + src mac */
712 ti0 = lisp_l2_fib_lookup (lgm, vnet_buffer(b0)->l2.bd_index,
713 e0->src_address, e0->dst_address);
715 if (PREDICT_TRUE((u32)~0 != ti0))
717 l2_process_one (lgm, b0, ti0, &next0);
721 /* no tunnel found send to control plane */
722 next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
723 vnet_buffer(b0)->lisp.overlay_afi = LISP_AFI_MAC;
726 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
728 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
730 tr->tunnel_index = ti0 ? ti0 : ~0;
732 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
733 n_left_to_next, bi0, next0);
736 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
739 return from_frame->n_vectors;
743 format_l2_lisp_gpe_name (u8 * s, va_list * args)
745 u32 dev_instance = va_arg (*args, u32);
746 return format (s, "l2_lisp_gpe%d", dev_instance);
749 VNET_DEVICE_CLASS (l2_lisp_gpe_device_class,static) = {
750 .name = "L2_LISP_GPE",
751 .format_device_name = format_l2_lisp_gpe_name,
752 .format_tx_trace = format_lisp_gpe_tx_trace,
753 .tx_function = l2_lisp_gpe_interface_tx,
754 .no_flatten_output_chains = 1,
758 static vnet_hw_interface_t *
759 create_lisp_gpe_iface (lisp_gpe_main_t * lgm, u32 vni, u32 dp_table,
760 vnet_device_class_t * dev_class,
761 tunnel_lookup_t * tuns)
764 u32 hw_if_index = ~0;
766 vnet_hw_interface_t * hi;
767 vnet_main_t * vnm = lgm->vnet_main;
769 /* create hw lisp_gpeX iface if needed, otherwise reuse existing */
770 flen = vec_len(lgm->free_tunnel_hw_if_indices);
773 hw_if_index = lgm->free_tunnel_hw_if_indices[flen - 1];
774 _vec_len(lgm->free_tunnel_hw_if_indices) -= 1;
776 hi = vnet_get_hw_interface (vnm, hw_if_index);
778 /* rename interface */
779 new_name = format (0, "%U", dev_class->format_device_name,
782 vec_add1(new_name, 0);
783 vnet_rename_interface (vnm, hw_if_index, (char *) new_name);
786 /* clear old stats of freed interface before reuse */
787 vnet_interface_main_t * im = &vnm->interface_main;
788 vnet_interface_counter_lock (im);
789 vlib_zero_combined_counter (
790 &im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX],
792 vlib_zero_combined_counter (
793 &im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX],
795 vlib_zero_simple_counter (
796 &im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP],
798 vnet_interface_counter_unlock (im);
802 hw_if_index = vnet_register_interface (vnm, dev_class->index, vni,
803 lisp_gpe_hw_class.index, 0);
804 hi = vnet_get_hw_interface (vnm, hw_if_index);
807 hash_set(tuns->hw_if_index_by_dp_table, dp_table, hw_if_index);
809 /* set tunnel termination: post decap, packets are tagged as having been
810 * originated by lisp-gpe interface */
811 hash_set(tuns->sw_if_index_by_vni, vni, hi->sw_if_index);
812 hash_set(tuns->vni_by_sw_if_index, hi->sw_if_index, vni);
818 remove_lisp_gpe_iface (lisp_gpe_main_t * lgm, u32 hi_index, u32 dp_table,
819 tunnel_lookup_t * tuns)
821 vnet_main_t * vnm = lgm->vnet_main;
822 vnet_hw_interface_t * hi;
825 hi = vnet_get_hw_interface (vnm, hi_index);
827 /* disable interface */
828 vnet_sw_interface_set_flags (vnm, hi->sw_if_index, 0/* down */);
829 vnet_hw_interface_set_flags (vnm, hi->hw_if_index, 0/* down */);
830 hash_unset(tuns->hw_if_index_by_dp_table, dp_table);
831 vec_add1(lgm->free_tunnel_hw_if_indices, hi->hw_if_index);
833 /* clean tunnel termination and vni to sw_if_index binding */
834 vnip = hash_get(tuns->vni_by_sw_if_index, hi->sw_if_index);
837 clib_warning ("No vni associated to interface %d", hi->sw_if_index);
840 hash_unset(tuns->sw_if_index_by_vni, vnip[0]);
841 hash_unset(tuns->vni_by_sw_if_index, hi->sw_if_index);
845 lisp_gpe_add_del_l3_iface (lisp_gpe_main_t * lgm,
846 vnet_lisp_gpe_add_del_iface_args_t * a)
848 vnet_main_t * vnm = lgm->vnet_main;
849 tunnel_lookup_t * l3_ifaces = &lgm->l3_ifaces;
850 vnet_hw_interface_t * hi;
851 u32 lookup_next_index4, lookup_next_index6;
854 hip = hash_get(l3_ifaces->hw_if_index_by_dp_table, a->table_id);
860 clib_warning ("vrf %d already mapped to a vni", a->table_id);
864 si = hash_get(l3_ifaces->sw_if_index_by_vni, a->vni);
867 clib_warning ("Interface for vni %d already exists", a->vni);
871 /* create lisp iface and populate tunnel tables */
872 hi = create_lisp_gpe_iface (lgm, a->vni, a->table_id,
873 &lisp_gpe_device_class, l3_ifaces);
875 /* set ingress arc from lgpe_ipX_lookup */
876 lookup_next_index4 = vlib_node_add_next (lgm->vlib_main,
877 lgpe_ip4_lookup_node.index,
878 hi->output_node_index);
879 lookup_next_index6 = vlib_node_add_next (lgm->vlib_main,
880 lgpe_ip6_lookup_node.index,
881 hi->output_node_index);
882 hash_set(lgm->lgpe_ip4_lookup_next_index_by_table_id, a->table_id,
884 hash_set(lgm->lgpe_ip6_lookup_next_index_by_table_id, a->table_id,
887 /* insert default routes that point to lgpe-ipx-lookup */
888 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */1, 1);
889 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */0, 1);
891 /* set egress arcs */
892 #define _(sym,str) vlib_node_add_named_next_with_slot (vnm->vlib_main, \
893 hi->tx_node_index, str, LISP_GPE_TX_NEXT_##sym);
894 foreach_lisp_gpe_tx_next
897 /* set interface in appropriate v4 and v6 FIBs */
898 lisp_gpe_iface_set_table (hi->sw_if_index, a->table_id, 1);
899 lisp_gpe_iface_set_table (hi->sw_if_index, a->table_id, 0);
901 /* enable interface */
902 vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
903 VNET_SW_INTERFACE_FLAG_ADMIN_UP);
904 vnet_hw_interface_set_flags (vnm, hi->hw_if_index,
905 VNET_HW_INTERFACE_FLAG_LINK_UP);
911 clib_warning("The interface for vrf %d doesn't exist", a->table_id);
915 remove_lisp_gpe_iface (lgm, hip[0], a->table_id, &lgm->l3_ifaces);
917 /* unset default routes */
918 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */1, 0);
919 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */0, 0);
926 lisp_gpe_add_del_l2_iface (lisp_gpe_main_t * lgm,
927 vnet_lisp_gpe_add_del_iface_args_t * a)
929 vnet_main_t * vnm = lgm->vnet_main;
930 tunnel_lookup_t * l2_ifaces = &lgm->l2_ifaces;
931 vnet_hw_interface_t * hi;
935 bd_index = bd_find_or_add_bd_index(&bd_main, a->bd_id);
936 hip = hash_get(l2_ifaces->hw_if_index_by_dp_table, bd_index);
942 clib_warning("bridge domain %d already mapped to a vni", a->bd_id);
946 si = hash_get(l2_ifaces->sw_if_index_by_vni, a->vni);
949 clib_warning ("Interface for vni %d already exists", a->vni);
953 /* create lisp iface and populate tunnel tables */
954 hi = create_lisp_gpe_iface (lgm, a->vni, bd_index,
955 &l2_lisp_gpe_device_class, &lgm->l2_ifaces);
957 /* add iface to l2 bridge domain */
958 set_int_l2_mode (lgm->vlib_main, vnm, MODE_L2_BRIDGE, hi->sw_if_index,
961 /* set egress arcs */
962 #define _(sym,str) vlib_node_add_named_next_with_slot (vnm->vlib_main, \
963 hi->tx_node_index, str, L2_LISP_GPE_TX_NEXT_##sym);
964 foreach_l2_lisp_gpe_tx_next
967 /* enable interface */
968 vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
969 VNET_SW_INTERFACE_FLAG_ADMIN_UP);
970 vnet_hw_interface_set_flags (vnm, hi->hw_if_index,
971 VNET_HW_INTERFACE_FLAG_LINK_UP);
977 clib_warning("The interface for bridge domain %d doesn't exist",
981 remove_lisp_gpe_iface (lgm, hip[0], bd_index, &lgm->l2_ifaces);
988 vnet_lisp_gpe_add_del_iface (vnet_lisp_gpe_add_del_iface_args_t * a,
991 lisp_gpe_main_t * lgm = &lisp_gpe_main;
993 if (vnet_lisp_gpe_enable_disable_status() == 0)
995 clib_warning ("LISP is disabled!");
996 return VNET_API_ERROR_LISP_DISABLED;
1000 return lisp_gpe_add_del_l3_iface (lgm, a);
1002 return lisp_gpe_add_del_l2_iface (lgm, a);
1005 static clib_error_t *
1006 lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input,
1007 vlib_cli_command_t * cmd)
1009 unformat_input_t _line_input, * line_input = &_line_input;
1011 clib_error_t * error = 0;
1013 u32 table_id, vni, bd_id;
1014 u8 vni_is_set = 0, vrf_is_set = 0, bd_index_is_set = 0;
1016 vnet_lisp_gpe_add_del_iface_args_t _a, * a = &_a;
1018 /* Get a line of input. */
1019 if (! unformat_user (input, unformat_line_input, line_input))
1022 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1024 if (unformat (line_input, "add"))
1026 else if (unformat (line_input, "del"))
1028 else if (unformat (line_input, "vrf %d", &table_id))
1032 else if (unformat (line_input, "vni %d", &vni))
1036 else if (unformat (line_input, "bd %d", &bd_id))
1038 bd_index_is_set = 1;
1042 return clib_error_return (0, "parse error: '%U'",
1043 format_unformat_error, line_input);
1047 if (vrf_is_set && bd_index_is_set)
1048 return clib_error_return(0, "Cannot set both vrf and brdige domain index!");
1051 return clib_error_return(0, "vni must be set!");
1053 if (!vrf_is_set && !bd_index_is_set)
1054 return clib_error_return(0, "vrf or bridge domain index must be set!");
1057 a->dp_table = vrf_is_set ? table_id : bd_id;
1059 a->is_l2 = bd_index_is_set;
1061 rv = vnet_lisp_gpe_add_del_iface (a, 0);
1064 error = clib_error_return(0, "failed to %s gpe iface!",
1065 is_add ? "add" : "delete");
1071 VLIB_CLI_COMMAND (add_del_lisp_gpe_iface_command, static) = {
1072 .path = "lisp gpe iface",
1073 .short_help = "lisp gpe iface add/del vni <vni> vrf <vrf>",
1074 .function = lisp_gpe_add_del_iface_command_fn,