2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 * @brief Common utility functions for LISP-GPE interfaces.
22 #include <vppinfra/error.h>
23 #include <vppinfra/hash.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ip/ip.h>
26 #include <vnet/ip/udp.h>
27 #include <vnet/ethernet/ethernet.h>
28 #include <vnet/lisp-gpe/lisp_gpe.h>
30 #define foreach_lisp_gpe_tx_next \
31 _(DROP, "error-drop") \
32 _(IP4_LOOKUP, "ip4-lookup") \
33 _(IP6_LOOKUP, "ip6-lookup")
37 #define _(sym,str) LISP_GPE_TX_NEXT_##sym,
38 foreach_lisp_gpe_tx_next
46 } lisp_gpe_tx_trace_t;
49 format_lisp_gpe_tx_trace (u8 * s, va_list * args)
51 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
52 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53 lisp_gpe_tx_trace_t *t = va_arg (*args, lisp_gpe_tx_trace_t *);
55 s = format (s, "LISP-GPE-TX: tunnel %d", t->tunnel_index);
60 get_one_tunnel_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0,
61 lisp_gpe_tunnel_t ** t0, u8 is_v4)
63 u32 adj_index0, tunnel_index0;
66 /* Get adjacency and from it the tunnel_index */
67 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
70 adj0 = ip_get_adjacency (lgm->lm4, adj_index0);
72 adj0 = ip_get_adjacency (lgm->lm6, adj_index0);
74 tunnel_index0 = adj0->if_address_index;
75 t0[0] = pool_elt_at_index (lgm->tunnels, tunnel_index0);
81 encap_one_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0,
82 lisp_gpe_tunnel_t * t0, u32 * next0)
84 ASSERT (sizeof (ip4_udp_lisp_gpe_header_t) == 36);
85 ASSERT (sizeof (ip6_udp_lisp_gpe_header_t) == 56);
87 lisp_gpe_sub_tunnel_t *st0;
90 sti0 = vec_elt_at_index (t0->sub_tunnels_lbv,
91 vnet_buffer (b0)->ip.flow_hash %
92 t0->sub_tunnels_lbv_count);
93 st0 = vec_elt_at_index (t0->sub_tunnels, sti0[0]);
96 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 36, 1);
97 next0[0] = LISP_GPE_TX_NEXT_IP4_LOOKUP;
101 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 56, 0);
102 next0[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP;
105 /* Reset to look up tunnel partner in the configured FIB */
106 vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
110 get_two_tunnels_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0,
111 vlib_buffer_t * b1, lisp_gpe_tunnel_t ** t0,
112 lisp_gpe_tunnel_t ** t1, u8 is_v4)
114 u32 adj_index0, adj_index1, tunnel_index0, tunnel_index1;
115 ip_adjacency_t *adj0, *adj1;
117 /* Get adjacency and from it the tunnel_index */
118 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
119 adj_index1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
123 adj0 = ip_get_adjacency (lgm->lm4, adj_index0);
124 adj1 = ip_get_adjacency (lgm->lm4, adj_index1);
128 adj0 = ip_get_adjacency (lgm->lm6, adj_index0);
129 adj1 = ip_get_adjacency (lgm->lm6, adj_index1);
132 tunnel_index0 = adj0->if_address_index;
133 tunnel_index1 = adj1->if_address_index;
135 t0[0] = pool_elt_at_index (lgm->tunnels, tunnel_index0);
136 t1[0] = pool_elt_at_index (lgm->tunnels, tunnel_index1);
143 encap_two_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0,
144 vlib_buffer_t * b1, lisp_gpe_tunnel_t * t0,
145 lisp_gpe_tunnel_t * t1, u32 * next0, u32 * next1)
147 ASSERT (sizeof (ip4_udp_lisp_gpe_header_t) == 36);
148 ASSERT (sizeof (ip6_udp_lisp_gpe_header_t) == 56);
150 lisp_gpe_sub_tunnel_t *st0, *st1;
152 sti0 = vec_elt_at_index (t0->sub_tunnels_lbv,
153 vnet_buffer (b0)->ip.flow_hash %
154 t0->sub_tunnels_lbv_count);
156 vec_elt_at_index (t1->sub_tunnels_lbv,
157 vnet_buffer (b1)->ip.flow_hash %
158 t1->sub_tunnels_lbv_count);
159 st0 = vec_elt_at_index (t0->sub_tunnels, sti0[0]);
160 st1 = vec_elt_at_index (t1->sub_tunnels, sti1[0]);
162 if (PREDICT_TRUE (st0->is_ip4 == st1->is_ip4))
166 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 36, 1);
167 ip_udp_encap_one (lgm->vlib_main, b1, st1->rewrite, 36, 1);
168 next0[0] = next1[0] = LISP_GPE_TX_NEXT_IP4_LOOKUP;
172 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 56, 0);
173 ip_udp_encap_one (lgm->vlib_main, b1, st1->rewrite, 56, 0);
174 next0[0] = next1[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP;
181 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 36, 1);
182 ip_udp_encap_one (lgm->vlib_main, b1, st1->rewrite, 56, 1);
183 next0[0] = LISP_GPE_TX_NEXT_IP4_LOOKUP;
184 next1[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP;
188 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 56, 1);
189 ip_udp_encap_one (lgm->vlib_main, b1, st1->rewrite, 36, 1);
190 next0[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP;
191 next1[0] = LISP_GPE_TX_NEXT_IP4_LOOKUP;
195 /* Reset to look up tunnel partner in the configured FIB */
196 vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
197 vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
200 #define is_v4_packet(_h) ((*(u8*) _h) & 0xF0) == 0x40
203 * @brief LISP-GPE interface TX (encap) function.
204 * @node lisp_gpe_interface_tx
206 * The LISP-GPE interface TX (encap) function.
208 * Looks up the associated tunnel based on the adjacency hit in the SD FIB
209 * and if the tunnel is multihomed it uses the flow hash to determine
210 * sub-tunnel, and rewrite string, to be used to encapsulate the packet.
212 * @param[in] vm vlib_main_t corresponding to the current thread.
213 * @param[in] node vlib_node_runtime_t data for this node.
214 * @param[in] frame vlib_frame_t whose contents should be dispatched.
216 * @return number of vectors in frame.
219 lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
220 vlib_frame_t * from_frame)
222 u32 n_left_from, next_index, *from, *to_next;
223 lisp_gpe_main_t *lgm = &lisp_gpe_main;
225 from = vlib_frame_vector_args (from_frame);
226 n_left_from = from_frame->n_vectors;
228 next_index = node->cached_next_index;
230 while (n_left_from > 0)
234 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
236 while (n_left_from >= 4 && n_left_to_next >= 2)
239 vlib_buffer_t *b0, *b1;
241 lisp_gpe_tunnel_t *t0 = 0, *t1 = 0;
242 u8 is_v4_eid0, is_v4_eid1;
244 next0 = next1 = LISP_GPE_TX_NEXT_IP4_LOOKUP;
246 /* Prefetch next iteration. */
248 vlib_buffer_t *p2, *p3;
250 p2 = vlib_get_buffer (vm, from[2]);
251 p3 = vlib_get_buffer (vm, from[3]);
253 vlib_prefetch_buffer_header (p2, LOAD);
254 vlib_prefetch_buffer_header (p3, LOAD);
256 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
257 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
269 b0 = vlib_get_buffer (vm, bi0);
270 b1 = vlib_get_buffer (vm, bi1);
272 is_v4_eid0 = is_v4_packet (vlib_buffer_get_current (b0));
273 is_v4_eid1 = is_v4_packet (vlib_buffer_get_current (b1));
275 if (PREDICT_TRUE (is_v4_eid0 == is_v4_eid1))
277 get_two_tunnels_inline (lgm, b0, b1, &t0, &t1,
282 get_one_tunnel_inline (lgm, b0, &t0, is_v4_eid0 ? 1 : 0);
283 get_one_tunnel_inline (lgm, b1, &t1, is_v4_eid1 ? 1 : 0);
286 encap_two_inline (lgm, b0, b1, t0, t1, &next0, &next1);
288 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
290 lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
292 tr->tunnel_index = t0 - lgm->tunnels;
294 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
296 lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b1,
298 tr->tunnel_index = t1 - lgm->tunnels;
301 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
302 n_left_to_next, bi0, bi1, next0,
306 while (n_left_from > 0 && n_left_to_next > 0)
309 u32 bi0, next0 = LISP_GPE_TX_NEXT_IP4_LOOKUP;
310 lisp_gpe_tunnel_t *t0 = 0;
320 b0 = vlib_get_buffer (vm, bi0);
322 is_v4_0 = is_v4_packet (vlib_buffer_get_current (b0));
323 get_one_tunnel_inline (lgm, b0, &t0, is_v4_0 ? 1 : 0);
325 encap_one_inline (lgm, b0, t0, &next0);
327 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
329 lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
331 tr->tunnel_index = t0 - lgm->tunnels;
333 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
334 n_left_to_next, bi0, next0);
337 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
340 return from_frame->n_vectors;
344 format_lisp_gpe_name (u8 * s, va_list * args)
346 u32 dev_instance = va_arg (*args, u32);
347 return format (s, "lisp_gpe%d", dev_instance);
351 VNET_DEVICE_CLASS (lisp_gpe_device_class,static) = {
353 .format_device_name = format_lisp_gpe_name,
354 .format_tx_trace = format_lisp_gpe_tx_trace,
355 .tx_function = lisp_gpe_interface_tx,
356 .no_flatten_output_chains = 1,
361 dummy_set_rewrite (vnet_main_t * vnm, u32 sw_if_index, u32 l3_type,
362 void *dst_address, void *rewrite, uword max_rewrite_bytes)
368 format_lisp_gpe_header_with_length (u8 * s, va_list * args)
370 lisp_gpe_header_t *h = va_arg (*args, lisp_gpe_header_t *);
371 u32 max_header_bytes = va_arg (*args, u32);
374 header_bytes = sizeof (h[0]);
375 if (max_header_bytes != 0 && header_bytes > max_header_bytes)
376 return format (s, "lisp-gpe header truncated");
378 s = format (s, "flags: ");
379 #define _(n,v) if (h->flags & v) s = format (s, "%s ", #n);
380 foreach_lisp_gpe_flag_bit;
383 s = format (s, "\n ver_res %d res %d next_protocol %d iid %d(%x)",
384 h->ver_res, h->res, h->next_protocol,
385 clib_net_to_host_u32 (h->iid), clib_net_to_host_u32 (h->iid));
390 VNET_HW_INTERFACE_CLASS (lisp_gpe_hw_class) = {
392 .format_header = format_lisp_gpe_header_with_length,
393 .set_rewrite = dummy_set_rewrite,
398 add_del_ip_prefix_route (ip_prefix_t * dst_prefix, u32 table_id,
399 ip_adjacency_t * add_adj, u8 is_add, u32 * adj_index)
403 if (ip_prefix_version (dst_prefix) == IP4)
405 ip4_main_t *im4 = &ip4_main;
406 ip4_add_del_route_args_t a;
407 ip4_address_t addr = ip_prefix_v4 (dst_prefix);
409 memset (&a, 0, sizeof (a));
410 a.flags = IP4_ROUTE_FLAG_TABLE_ID;
411 a.table_index_or_table_id = table_id;
413 a.dst_address_length = ip_prefix_len (dst_prefix);
414 a.dst_address = addr;
415 a.flags |= is_add ? IP4_ROUTE_FLAG_ADD : IP4_ROUTE_FLAG_DEL;
417 a.n_add_adj = is_add ? 1 : 0;
419 ip4_add_del_route (im4, &a);
423 p = ip4_get_route (im4, table_id, 0, addr.as_u8,
424 ip_prefix_len (dst_prefix));
427 clib_warning ("Failed to insert route for eid %U!",
428 format_ip4_address_and_length, addr.as_u8,
429 ip_prefix_len (dst_prefix));
437 ip6_main_t *im6 = &ip6_main;
438 ip6_add_del_route_args_t a;
439 ip6_address_t addr = ip_prefix_v6 (dst_prefix);
441 memset (&a, 0, sizeof (a));
442 a.flags = IP6_ROUTE_FLAG_TABLE_ID;
443 a.table_index_or_table_id = table_id;
445 a.dst_address_length = ip_prefix_len (dst_prefix);
446 a.dst_address = addr;
447 a.flags |= is_add ? IP6_ROUTE_FLAG_ADD : IP6_ROUTE_FLAG_DEL;
449 a.n_add_adj = is_add ? 1 : 0;
451 ip6_add_del_route (im6, &a);
455 adj_index[0] = ip6_get_route (im6, table_id, 0, &addr,
456 ip_prefix_len (dst_prefix));
457 if (adj_index[0] == 0)
459 clib_warning ("Failed to insert route for eid %U!",
460 format_ip6_address_and_length, addr.as_u8,
461 ip_prefix_len (dst_prefix));
470 add_del_lisp_gpe_default_route (u32 table_id, u8 is_v4, u8 is_add)
472 lisp_gpe_main_t *lgm = &lisp_gpe_main;
477 /* setup adjacency */
478 memset (&adj, 0, sizeof (adj));
481 adj.explicit_fib_index = ~0;
482 adj.lookup_next_index = is_v4 ? lgm->ip4_lookup_next_lgpe_ip4_lookup :
483 lgm->ip6_lookup_next_lgpe_ip6_lookup;
484 /* default route has tunnel_index ~0 */
485 adj.rewrite_header.sw_if_index = ~0;
487 /* set prefix to 0/0 */
488 memset (&prefix, 0, sizeof (prefix));
489 ip_prefix_version (&prefix) = is_v4 ? IP4 : IP6;
491 /* add/delete route for prefix */
492 add_del_ip_prefix_route (&prefix, table_id, &adj, is_add, &adj_index);
496 lisp_gpe_iface_set_table (u32 sw_if_index, u32 table_id, u8 is_ip4)
500 ip4_main_t *im4 = &ip4_main;
502 fib = find_ip4_fib_by_table_index_or_id (im4, table_id,
503 IP4_ROUTE_FLAG_TABLE_ID);
505 /* fib's created if it doesn't exist */
508 vec_validate (im4->fib_index_by_sw_if_index, sw_if_index);
509 im4->fib_index_by_sw_if_index[sw_if_index] = fib->index;
513 ip6_main_t *im6 = &ip6_main;
515 fib = find_ip6_fib_by_table_index_or_id (im6, table_id,
516 IP6_ROUTE_FLAG_TABLE_ID);
518 /* fib's created if it doesn't exist */
521 vec_validate (im6->fib_index_by_sw_if_index, sw_if_index);
522 im6->fib_index_by_sw_if_index[sw_if_index] = fib->index;
526 #define foreach_l2_lisp_gpe_tx_next \
527 _(DROP, "error-drop") \
528 _(IP4_LOOKUP, "ip4-lookup") \
529 _(IP6_LOOKUP, "ip6-lookup") \
530 _(LISP_CP_LOOKUP, "lisp-cp-lookup")
534 #define _(sym,str) L2_LISP_GPE_TX_NEXT_##sym,
535 foreach_l2_lisp_gpe_tx_next
537 L2_LISP_GPE_TX_N_NEXT,
538 } l2_lisp_gpe_tx_next_t;
543 } l2_lisp_gpe_tx_trace_t;
546 format_l2_lisp_gpe_tx_trace (u8 * s, va_list * args)
548 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
549 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
550 l2_lisp_gpe_tx_trace_t *t = va_arg (*args, l2_lisp_gpe_tx_trace_t *);
552 s = format (s, "L2-LISP-GPE-TX: tunnel %d", t->tunnel_index);
557 l2_process_tunnel_action (vlib_buffer_t * b0, u8 action, u32 * next0)
559 if (LISP_SEND_MAP_REQUEST == action)
561 next0[0] = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
562 vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC;
566 next0[0] = L2_LISP_GPE_TX_NEXT_DROP;
571 ip_flow_hash (void *data)
573 ip4_header_t *iph = (ip4_header_t *) data;
575 if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
576 return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
578 return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
582 l2_flow_hash (vlib_buffer_t * b0)
584 ethernet_header_t *eh;
586 uword is_ip, eh_size;
589 eh = vlib_buffer_get_current (b0);
590 eh_type = clib_net_to_host_u16 (eh->type);
591 eh_size = ethernet_buffer_header_size (b0);
593 is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
595 /* since we have 2 cache lines, use them */
597 a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
601 b = mac_to_u64 ((u8 *) eh->dst_address);
602 c = mac_to_u64 ((u8 *) eh->src_address);
603 hash_mix64 (a, b, c);
609 l2_process_one (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, u32 ti0,
612 lisp_gpe_tunnel_t *t0;
614 t0 = pool_elt_at_index (lgm->tunnels, ti0);
617 if (PREDICT_TRUE (LISP_NO_ACTION == t0->action))
619 /* compute 'flow' hash */
620 if (PREDICT_TRUE (t0->sub_tunnels_lbv_count > 1))
621 vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
622 encap_one_inline (lgm, b0, t0, next0);
626 l2_process_tunnel_action (b0, t0->action, next0);
631 l2_process_two (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, vlib_buffer_t * b1,
632 u32 ti0, u32 ti1, u32 * next0, u32 * next1)
634 lisp_gpe_tunnel_t *t0, *t1;
636 t0 = pool_elt_at_index (lgm->tunnels, ti0);
637 t1 = pool_elt_at_index (lgm->tunnels, ti1);
639 ASSERT (0 != t0 && 0 != t1);
641 if (PREDICT_TRUE (LISP_NO_ACTION == t0->action
642 && LISP_NO_ACTION == t1->action))
644 if (PREDICT_TRUE (t0->sub_tunnels_lbv_count > 1))
645 vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
646 if (PREDICT_TRUE (t1->sub_tunnels_lbv_count > 1))
647 vnet_buffer (b1)->ip.flow_hash = l2_flow_hash (b1);
648 encap_two_inline (lgm, b0, b1, t0, t1, next0, next1);
652 if (LISP_NO_ACTION == t0->action)
654 if (PREDICT_TRUE (t0->sub_tunnels_lbv_count > 1))
655 vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
656 encap_one_inline (lgm, b0, t0, next0);
657 l2_process_tunnel_action (b1, t1->action, next1);
659 else if (LISP_NO_ACTION == t1->action)
661 if (PREDICT_TRUE (t1->sub_tunnels_lbv_count > 1))
662 vnet_buffer (b1)->ip.flow_hash = l2_flow_hash (b1);
663 encap_one_inline (lgm, b1, t1, next1);
664 l2_process_tunnel_action (b0, t0->action, next0);
668 l2_process_tunnel_action (b0, t0->action, next0);
669 l2_process_tunnel_action (b1, t1->action, next1);
675 * @brief LISP-GPE interface TX (encap) function for L2 overlays.
676 * @node l2_lisp_gpe_interface_tx
678 * The L2 LISP-GPE interface TX (encap) function.
680 * Uses bridge domain index, source and destination ethernet addresses to
681 * lookup tunnel. If the tunnel is multihomed a flow has is used to determine
682 * the sub-tunnel and therefore the rewrite string to be used to encapsulate
685 * @param[in] vm vlib_main_t corresponding to the current thread.
686 * @param[in] node vlib_node_runtime_t data for this node.
687 * @param[in] frame vlib_frame_t whose contents should be dispatched.
689 * @return number of vectors in frame.
692 l2_lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
693 vlib_frame_t * from_frame)
695 u32 n_left_from, next_index, *from, *to_next;
696 lisp_gpe_main_t *lgm = &lisp_gpe_main;
698 from = vlib_frame_vector_args (from_frame);
699 n_left_from = from_frame->n_vectors;
701 next_index = node->cached_next_index;
703 while (n_left_from > 0)
707 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
709 while (n_left_from >= 4 && n_left_to_next >= 2)
712 vlib_buffer_t *b0, *b1;
713 u32 next0, next1, ti0, ti1;
714 lisp_gpe_tunnel_t *t0 = 0, *t1 = 0;
715 ethernet_header_t *e0, *e1;
717 next0 = next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
719 /* Prefetch next iteration. */
721 vlib_buffer_t *p2, *p3;
723 p2 = vlib_get_buffer (vm, from[2]);
724 p3 = vlib_get_buffer (vm, from[3]);
726 vlib_prefetch_buffer_header (p2, LOAD);
727 vlib_prefetch_buffer_header (p3, LOAD);
729 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
730 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
742 b0 = vlib_get_buffer (vm, bi0);
743 b1 = vlib_get_buffer (vm, bi1);
745 e0 = vlib_buffer_get_current (b0);
746 e1 = vlib_buffer_get_current (b1);
748 /* lookup dst + src mac */
749 ti0 = lisp_l2_fib_lookup (lgm, vnet_buffer (b0)->l2.bd_index,
750 e0->src_address, e0->dst_address);
751 ti1 = lisp_l2_fib_lookup (lgm, vnet_buffer (b1)->l2.bd_index,
752 e1->src_address, e1->dst_address);
754 if (PREDICT_TRUE ((u32) ~ 0 != ti0) && (u32) ~ 0 != ti1)
756 /* process both tunnels */
757 l2_process_two (lgm, b0, b1, ti0, ti1, &next0, &next1);
761 if ((u32) ~ 0 != ti0)
763 /* process tunnel for b0 */
764 l2_process_one (lgm, b0, ti0, &next0);
766 /* no tunnel found for b1, send to control plane */
767 next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
768 vnet_buffer (b1)->lisp.overlay_afi = LISP_AFI_MAC;
770 else if ((u32) ~ 0 != ti1)
772 /* process tunnel for b1 */
773 l2_process_one (lgm, b1, ti1, &next1);
775 /* no tunnel found b0, send to control plane */
776 next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
777 vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC;
781 /* no tunnels found */
782 next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
783 vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC;
784 next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
785 vnet_buffer (b1)->lisp.overlay_afi = LISP_AFI_MAC;
789 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
791 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
793 tr->tunnel_index = t0 - lgm->tunnels;
795 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
797 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b1,
799 tr->tunnel_index = t1 - lgm->tunnels;
802 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
803 n_left_to_next, bi0, bi1, next0,
807 while (n_left_from > 0 && n_left_to_next > 0)
810 u32 bi0, ti0, next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
811 ethernet_header_t *e0;
820 b0 = vlib_get_buffer (vm, bi0);
821 e0 = vlib_buffer_get_current (b0);
823 /* lookup dst + src mac */
824 ti0 = lisp_l2_fib_lookup (lgm, vnet_buffer (b0)->l2.bd_index,
825 e0->src_address, e0->dst_address);
827 if (PREDICT_TRUE ((u32) ~ 0 != ti0))
829 l2_process_one (lgm, b0, ti0, &next0);
833 /* no tunnel found send to control plane */
834 next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
835 vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC;
838 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
840 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
842 tr->tunnel_index = ti0 ? ti0 : ~0;
844 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
845 n_left_to_next, bi0, next0);
848 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
851 return from_frame->n_vectors;
855 format_l2_lisp_gpe_name (u8 * s, va_list * args)
857 u32 dev_instance = va_arg (*args, u32);
858 return format (s, "l2_lisp_gpe%d", dev_instance);
862 VNET_DEVICE_CLASS (l2_lisp_gpe_device_class,static) = {
863 .name = "L2_LISP_GPE",
864 .format_device_name = format_l2_lisp_gpe_name,
865 .format_tx_trace = format_lisp_gpe_tx_trace,
866 .tx_function = l2_lisp_gpe_interface_tx,
867 .no_flatten_output_chains = 1,
871 static vnet_hw_interface_t *
872 create_lisp_gpe_iface (lisp_gpe_main_t * lgm, u32 vni, u32 dp_table,
873 vnet_device_class_t * dev_class,
874 tunnel_lookup_t * tuns)
877 u32 hw_if_index = ~0;
879 vnet_hw_interface_t *hi;
880 vnet_main_t *vnm = lgm->vnet_main;
882 /* create hw lisp_gpeX iface if needed, otherwise reuse existing */
883 flen = vec_len (lgm->free_tunnel_hw_if_indices);
886 hw_if_index = lgm->free_tunnel_hw_if_indices[flen - 1];
887 _vec_len (lgm->free_tunnel_hw_if_indices) -= 1;
889 hi = vnet_get_hw_interface (vnm, hw_if_index);
891 /* rename interface */
892 new_name = format (0, "%U", dev_class->format_device_name, vni);
894 vec_add1 (new_name, 0);
895 vnet_rename_interface (vnm, hw_if_index, (char *) new_name);
898 /* clear old stats of freed interface before reuse */
899 vnet_interface_main_t *im = &vnm->interface_main;
900 vnet_interface_counter_lock (im);
901 vlib_zero_combined_counter (&im->combined_sw_if_counters
902 [VNET_INTERFACE_COUNTER_TX],
904 vlib_zero_combined_counter (&im->combined_sw_if_counters
905 [VNET_INTERFACE_COUNTER_RX],
907 vlib_zero_simple_counter (&im->sw_if_counters
908 [VNET_INTERFACE_COUNTER_DROP],
910 vnet_interface_counter_unlock (im);
914 hw_if_index = vnet_register_interface (vnm, dev_class->index, vni,
915 lisp_gpe_hw_class.index, 0);
916 hi = vnet_get_hw_interface (vnm, hw_if_index);
919 hash_set (tuns->hw_if_index_by_dp_table, dp_table, hw_if_index);
921 /* set tunnel termination: post decap, packets are tagged as having been
922 * originated by lisp-gpe interface */
923 hash_set (tuns->sw_if_index_by_vni, vni, hi->sw_if_index);
924 hash_set (tuns->vni_by_sw_if_index, hi->sw_if_index, vni);
930 remove_lisp_gpe_iface (lisp_gpe_main_t * lgm, u32 hi_index, u32 dp_table,
931 tunnel_lookup_t * tuns)
933 vnet_main_t *vnm = lgm->vnet_main;
934 vnet_hw_interface_t *hi;
937 hi = vnet_get_hw_interface (vnm, hi_index);
939 /* disable interface */
940 vnet_sw_interface_set_flags (vnm, hi->sw_if_index, 0 /* down */ );
941 vnet_hw_interface_set_flags (vnm, hi->hw_if_index, 0 /* down */ );
942 hash_unset (tuns->hw_if_index_by_dp_table, dp_table);
943 vec_add1 (lgm->free_tunnel_hw_if_indices, hi->hw_if_index);
945 /* clean tunnel termination and vni to sw_if_index binding */
946 vnip = hash_get (tuns->vni_by_sw_if_index, hi->sw_if_index);
949 clib_warning ("No vni associated to interface %d", hi->sw_if_index);
952 hash_unset (tuns->sw_if_index_by_vni, vnip[0]);
953 hash_unset (tuns->vni_by_sw_if_index, hi->sw_if_index);
957 * @brief Add/del LISP-GPE L3 interface.
959 * Creates LISP-GPE interface, sets ingress arcs from lisp_gpeX_lookup,
960 * installs default routes that attract all traffic with no more specific
961 * routes to lgpe-ipx-lookup, set egress arcs to ipx-lookup, sets
962 * the interface in the right vrf and enables it.
964 * @param[in] lgm Reference to @ref lisp_gpe_main_t.
965 * @param[in] a Parameters to create interface.
967 * @return number of vectors in frame.
970 lisp_gpe_add_del_l3_iface (lisp_gpe_main_t * lgm,
971 vnet_lisp_gpe_add_del_iface_args_t * a)
973 vnet_main_t *vnm = lgm->vnet_main;
974 tunnel_lookup_t *l3_ifaces = &lgm->l3_ifaces;
975 vnet_hw_interface_t *hi;
976 u32 lookup_next_index4, lookup_next_index6;
979 hip = hash_get (l3_ifaces->hw_if_index_by_dp_table, a->table_id);
985 clib_warning ("vrf %d already mapped to a vni", a->table_id);
989 si = hash_get (l3_ifaces->sw_if_index_by_vni, a->vni);
992 clib_warning ("Interface for vni %d already exists", a->vni);
996 /* create lisp iface and populate tunnel tables */
997 hi = create_lisp_gpe_iface (lgm, a->vni, a->table_id,
998 &lisp_gpe_device_class, l3_ifaces);
1000 /* set ingress arc from lgpe_ipX_lookup */
1001 lookup_next_index4 = vlib_node_add_next (lgm->vlib_main,
1002 lgpe_ip4_lookup_node.index,
1003 hi->output_node_index);
1004 lookup_next_index6 = vlib_node_add_next (lgm->vlib_main,
1005 lgpe_ip6_lookup_node.index,
1006 hi->output_node_index);
1007 hash_set (lgm->lgpe_ip4_lookup_next_index_by_table_id, a->table_id,
1008 lookup_next_index4);
1009 hash_set (lgm->lgpe_ip6_lookup_next_index_by_table_id, a->table_id,
1010 lookup_next_index6);
1012 /* insert default routes that point to lgpe-ipx-lookup */
1013 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */ 1, 1);
1014 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */ 0, 1);
1016 /* set egress arcs */
1017 #define _(sym,str) vlib_node_add_named_next_with_slot (vnm->vlib_main, \
1018 hi->tx_node_index, str, LISP_GPE_TX_NEXT_##sym);
1019 foreach_lisp_gpe_tx_next
1021 /* set interface in appropriate v4 and v6 FIBs */
1022 lisp_gpe_iface_set_table (hi->sw_if_index, a->table_id, 1);
1023 lisp_gpe_iface_set_table (hi->sw_if_index, a->table_id, 0);
1025 /* enable interface */
1026 vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
1027 VNET_SW_INTERFACE_FLAG_ADMIN_UP);
1028 vnet_hw_interface_set_flags (vnm, hi->hw_if_index,
1029 VNET_HW_INTERFACE_FLAG_LINK_UP);
1035 clib_warning ("The interface for vrf %d doesn't exist",
1040 remove_lisp_gpe_iface (lgm, hip[0], a->table_id, &lgm->l3_ifaces);
1042 /* unset default routes */
1043 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */ 1, 0);
1044 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */ 0, 0);
1051 * @brief Add/del LISP-GPE L2 interface.
1053 * Creates LISP-GPE interface, sets it in L2 mode in the appropriate
1054 * bridge domain, sets egress arcs and enables it.
1056 * @param[in] lgm Reference to @ref lisp_gpe_main_t.
1057 * @param[in] a Parameters to create interface.
1059 * @return number of vectors in frame.
1062 lisp_gpe_add_del_l2_iface (lisp_gpe_main_t * lgm,
1063 vnet_lisp_gpe_add_del_iface_args_t * a)
1065 vnet_main_t *vnm = lgm->vnet_main;
1066 tunnel_lookup_t *l2_ifaces = &lgm->l2_ifaces;
1067 vnet_hw_interface_t *hi;
1071 bd_index = bd_find_or_add_bd_index (&bd_main, a->bd_id);
1072 hip = hash_get (l2_ifaces->hw_if_index_by_dp_table, bd_index);
1078 clib_warning ("bridge domain %d already mapped to a vni", a->bd_id);
1082 si = hash_get (l2_ifaces->sw_if_index_by_vni, a->vni);
1085 clib_warning ("Interface for vni %d already exists", a->vni);
1089 /* create lisp iface and populate tunnel tables */
1090 hi = create_lisp_gpe_iface (lgm, a->vni, bd_index,
1091 &l2_lisp_gpe_device_class, &lgm->l2_ifaces);
1093 /* add iface to l2 bridge domain */
1094 set_int_l2_mode (lgm->vlib_main, vnm, MODE_L2_BRIDGE, hi->sw_if_index,
1097 /* set egress arcs */
1098 #define _(sym,str) vlib_node_add_named_next_with_slot (vnm->vlib_main, \
1099 hi->tx_node_index, str, L2_LISP_GPE_TX_NEXT_##sym);
1100 foreach_l2_lisp_gpe_tx_next
1102 /* enable interface */
1103 vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
1104 VNET_SW_INTERFACE_FLAG_ADMIN_UP);
1105 vnet_hw_interface_set_flags (vnm, hi->hw_if_index,
1106 VNET_HW_INTERFACE_FLAG_LINK_UP);
1112 clib_warning ("The interface for bridge domain %d doesn't exist",
1116 remove_lisp_gpe_iface (lgm, hip[0], bd_index, &lgm->l2_ifaces);
1122 /** Add/del L2 or L3 LISP-GPE interface. */
1124 vnet_lisp_gpe_add_del_iface (vnet_lisp_gpe_add_del_iface_args_t * a,
1127 lisp_gpe_main_t *lgm = &lisp_gpe_main;
1129 if (vnet_lisp_gpe_enable_disable_status () == 0)
1131 clib_warning ("LISP is disabled!");
1132 return VNET_API_ERROR_LISP_DISABLED;
1136 return lisp_gpe_add_del_l3_iface (lgm, a);
1138 return lisp_gpe_add_del_l2_iface (lgm, a);
1141 static clib_error_t *
1142 lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input,
1143 vlib_cli_command_t * cmd)
1145 unformat_input_t _line_input, *line_input = &_line_input;
1147 clib_error_t *error = 0;
1149 u32 table_id, vni, bd_id;
1150 u8 vni_is_set = 0, vrf_is_set = 0, bd_index_is_set = 0;
1152 vnet_lisp_gpe_add_del_iface_args_t _a, *a = &_a;
1154 /* Get a line of input. */
1155 if (!unformat_user (input, unformat_line_input, line_input))
1158 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1160 if (unformat (line_input, "add"))
1162 else if (unformat (line_input, "del"))
1164 else if (unformat (line_input, "vrf %d", &table_id))
1168 else if (unformat (line_input, "vni %d", &vni))
1172 else if (unformat (line_input, "bd %d", &bd_id))
1174 bd_index_is_set = 1;
1178 return clib_error_return (0, "parse error: '%U'",
1179 format_unformat_error, line_input);
1183 if (vrf_is_set && bd_index_is_set)
1184 return clib_error_return (0,
1185 "Cannot set both vrf and brdige domain index!");
1188 return clib_error_return (0, "vni must be set!");
1190 if (!vrf_is_set && !bd_index_is_set)
1191 return clib_error_return (0, "vrf or bridge domain index must be set!");
1194 a->dp_table = vrf_is_set ? table_id : bd_id;
1196 a->is_l2 = bd_index_is_set;
1198 rv = vnet_lisp_gpe_add_del_iface (a, 0);
1201 error = clib_error_return (0, "failed to %s gpe iface!",
1202 is_add ? "add" : "delete");
1209 VLIB_CLI_COMMAND (add_del_lisp_gpe_iface_command, static) = {
1210 .path = "lisp gpe iface",
1211 .short_help = "lisp gpe iface add/del vni <vni> vrf <vrf>",
1212 .function = lisp_gpe_add_del_iface_command_fn,
1217 * fd.io coding-style-patch-verification: ON
1220 * eval: (c-set-style "gnu")