2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/error.h>
17 #include <vppinfra/hash.h>
18 #include <vnet/vnet.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/ip/udp.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/lisp-gpe/lisp_gpe.h>
24 #define foreach_lisp_gpe_tx_next \
25 _(DROP, "error-drop") \
26 _(IP4_LOOKUP, "ip4-lookup") \
27 _(IP6_LOOKUP, "ip6-lookup")
31 #define _(sym,str) LISP_GPE_TX_NEXT_##sym,
32 foreach_lisp_gpe_tx_next
40 } lisp_gpe_tx_trace_t;
43 format_lisp_gpe_tx_trace (u8 * s, va_list * args)
45 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
46 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
47 lisp_gpe_tx_trace_t *t = va_arg (*args, lisp_gpe_tx_trace_t *);
49 s = format (s, "LISP-GPE-TX: tunnel %d", t->tunnel_index);
54 get_one_tunnel_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0,
55 lisp_gpe_tunnel_t ** t0, u8 is_v4)
57 u32 adj_index0, tunnel_index0;
60 /* Get adjacency and from it the tunnel_index */
61 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
64 adj0 = ip_get_adjacency (lgm->lm4, adj_index0);
66 adj0 = ip_get_adjacency (lgm->lm6, adj_index0);
68 tunnel_index0 = adj0->if_address_index;
69 t0[0] = pool_elt_at_index (lgm->tunnels, tunnel_index0);
75 encap_one_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0,
76 lisp_gpe_tunnel_t * t0, u32 * next0)
78 ASSERT (sizeof (ip4_udp_lisp_gpe_header_t) == 36);
79 ASSERT (sizeof (ip6_udp_lisp_gpe_header_t) == 56);
81 lisp_gpe_sub_tunnel_t *st0;
84 sti0 = vec_elt_at_index (t0->sub_tunnels_lbv,
85 vnet_buffer (b0)->ip.flow_hash %
86 t0->sub_tunnels_lbv_count);
87 st0 = vec_elt_at_index (t0->sub_tunnels, sti0[0]);
90 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 36, 1);
91 next0[0] = LISP_GPE_TX_NEXT_IP4_LOOKUP;
95 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 56, 0);
96 next0[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP;
99 /* Reset to look up tunnel partner in the configured FIB */
100 vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
104 get_two_tunnels_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0,
105 vlib_buffer_t * b1, lisp_gpe_tunnel_t ** t0,
106 lisp_gpe_tunnel_t ** t1, u8 is_v4)
108 u32 adj_index0, adj_index1, tunnel_index0, tunnel_index1;
109 ip_adjacency_t *adj0, *adj1;
111 /* Get adjacency and from it the tunnel_index */
112 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
113 adj_index1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
117 adj0 = ip_get_adjacency (lgm->lm4, adj_index0);
118 adj1 = ip_get_adjacency (lgm->lm4, adj_index1);
122 adj0 = ip_get_adjacency (lgm->lm6, adj_index0);
123 adj1 = ip_get_adjacency (lgm->lm6, adj_index1);
126 tunnel_index0 = adj0->if_address_index;
127 tunnel_index1 = adj1->if_address_index;
129 t0[0] = pool_elt_at_index (lgm->tunnels, tunnel_index0);
130 t1[0] = pool_elt_at_index (lgm->tunnels, tunnel_index1);
137 encap_two_inline (lisp_gpe_main_t * lgm, vlib_buffer_t * b0,
138 vlib_buffer_t * b1, lisp_gpe_tunnel_t * t0,
139 lisp_gpe_tunnel_t * t1, u32 * next0, u32 * next1)
141 ASSERT (sizeof (ip4_udp_lisp_gpe_header_t) == 36);
142 ASSERT (sizeof (ip6_udp_lisp_gpe_header_t) == 56);
144 lisp_gpe_sub_tunnel_t *st0, *st1;
146 sti0 = vec_elt_at_index (t0->sub_tunnels_lbv,
147 vnet_buffer (b0)->ip.flow_hash %
148 t0->sub_tunnels_lbv_count);
150 vec_elt_at_index (t1->sub_tunnels_lbv,
151 vnet_buffer (b1)->ip.flow_hash %
152 t1->sub_tunnels_lbv_count);
153 st0 = vec_elt_at_index (t0->sub_tunnels, sti0[0]);
154 st1 = vec_elt_at_index (t1->sub_tunnels, sti1[0]);
156 if (PREDICT_TRUE (st0->is_ip4 == st1->is_ip4))
160 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 36, 1);
161 ip_udp_encap_one (lgm->vlib_main, b1, st1->rewrite, 36, 1);
162 next0[0] = next1[0] = LISP_GPE_TX_NEXT_IP4_LOOKUP;
166 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 56, 0);
167 ip_udp_encap_one (lgm->vlib_main, b1, st1->rewrite, 56, 0);
168 next0[0] = next1[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP;
175 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 36, 1);
176 ip_udp_encap_one (lgm->vlib_main, b1, st1->rewrite, 56, 1);
177 next0[0] = LISP_GPE_TX_NEXT_IP4_LOOKUP;
178 next1[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP;
182 ip_udp_encap_one (lgm->vlib_main, b0, st0->rewrite, 56, 1);
183 ip_udp_encap_one (lgm->vlib_main, b1, st1->rewrite, 36, 1);
184 next0[0] = LISP_GPE_TX_NEXT_IP6_LOOKUP;
185 next1[0] = LISP_GPE_TX_NEXT_IP4_LOOKUP;
189 /* Reset to look up tunnel partner in the configured FIB */
190 vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
191 vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
194 #define is_v4_packet(_h) ((*(u8*) _h) & 0xF0) == 0x40
197 lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
198 vlib_frame_t * from_frame)
200 u32 n_left_from, next_index, *from, *to_next;
201 lisp_gpe_main_t *lgm = &lisp_gpe_main;
203 from = vlib_frame_vector_args (from_frame);
204 n_left_from = from_frame->n_vectors;
206 next_index = node->cached_next_index;
208 while (n_left_from > 0)
212 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
214 while (n_left_from >= 4 && n_left_to_next >= 2)
217 vlib_buffer_t *b0, *b1;
219 lisp_gpe_tunnel_t *t0 = 0, *t1 = 0;
220 u8 is_v4_eid0, is_v4_eid1;
222 next0 = next1 = LISP_GPE_TX_NEXT_IP4_LOOKUP;
224 /* Prefetch next iteration. */
226 vlib_buffer_t *p2, *p3;
228 p2 = vlib_get_buffer (vm, from[2]);
229 p3 = vlib_get_buffer (vm, from[3]);
231 vlib_prefetch_buffer_header (p2, LOAD);
232 vlib_prefetch_buffer_header (p3, LOAD);
234 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
235 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
247 b0 = vlib_get_buffer (vm, bi0);
248 b1 = vlib_get_buffer (vm, bi1);
250 is_v4_eid0 = is_v4_packet (vlib_buffer_get_current (b0));
251 is_v4_eid1 = is_v4_packet (vlib_buffer_get_current (b1));
253 if (PREDICT_TRUE (is_v4_eid0 == is_v4_eid1))
255 get_two_tunnels_inline (lgm, b0, b1, &t0, &t1,
260 get_one_tunnel_inline (lgm, b0, &t0, is_v4_eid0 ? 1 : 0);
261 get_one_tunnel_inline (lgm, b1, &t1, is_v4_eid1 ? 1 : 0);
264 encap_two_inline (lgm, b0, b1, t0, t1, &next0, &next1);
266 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
268 lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
270 tr->tunnel_index = t0 - lgm->tunnels;
272 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
274 lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b1,
276 tr->tunnel_index = t1 - lgm->tunnels;
279 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
280 n_left_to_next, bi0, bi1, next0,
284 while (n_left_from > 0 && n_left_to_next > 0)
287 u32 bi0, next0 = LISP_GPE_TX_NEXT_IP4_LOOKUP;
288 lisp_gpe_tunnel_t *t0 = 0;
298 b0 = vlib_get_buffer (vm, bi0);
300 is_v4_0 = is_v4_packet (vlib_buffer_get_current (b0));
301 get_one_tunnel_inline (lgm, b0, &t0, is_v4_0 ? 1 : 0);
303 encap_one_inline (lgm, b0, t0, &next0);
305 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
307 lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
309 tr->tunnel_index = t0 - lgm->tunnels;
311 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
312 n_left_to_next, bi0, next0);
315 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
318 return from_frame->n_vectors;
322 format_lisp_gpe_name (u8 * s, va_list * args)
324 u32 dev_instance = va_arg (*args, u32);
325 return format (s, "lisp_gpe%d", dev_instance);
329 VNET_DEVICE_CLASS (lisp_gpe_device_class,static) = {
331 .format_device_name = format_lisp_gpe_name,
332 .format_tx_trace = format_lisp_gpe_tx_trace,
333 .tx_function = lisp_gpe_interface_tx,
334 .no_flatten_output_chains = 1,
339 dummy_set_rewrite (vnet_main_t * vnm, u32 sw_if_index, u32 l3_type,
340 void *dst_address, void *rewrite, uword max_rewrite_bytes)
346 format_lisp_gpe_header_with_length (u8 * s, va_list * args)
348 lisp_gpe_header_t *h = va_arg (*args, lisp_gpe_header_t *);
349 u32 max_header_bytes = va_arg (*args, u32);
352 header_bytes = sizeof (h[0]);
353 if (max_header_bytes != 0 && header_bytes > max_header_bytes)
354 return format (s, "lisp-gpe header truncated");
356 s = format (s, "flags: ");
357 #define _(n,v) if (h->flags & v) s = format (s, "%s ", #n);
358 foreach_lisp_gpe_flag_bit;
361 s = format (s, "\n ver_res %d res %d next_protocol %d iid %d(%x)",
362 h->ver_res, h->res, h->next_protocol,
363 clib_net_to_host_u32 (h->iid), clib_net_to_host_u32 (h->iid));
368 VNET_HW_INTERFACE_CLASS (lisp_gpe_hw_class) = {
370 .format_header = format_lisp_gpe_header_with_length,
371 .set_rewrite = dummy_set_rewrite,
376 add_del_ip_prefix_route (ip_prefix_t * dst_prefix, u32 table_id,
377 ip_adjacency_t * add_adj, u8 is_add, u32 * adj_index)
381 if (ip_prefix_version (dst_prefix) == IP4)
383 ip4_main_t *im4 = &ip4_main;
384 ip4_add_del_route_args_t a;
385 ip4_address_t addr = ip_prefix_v4 (dst_prefix);
387 memset (&a, 0, sizeof (a));
388 a.flags = IP4_ROUTE_FLAG_TABLE_ID;
389 a.table_index_or_table_id = table_id;
391 a.dst_address_length = ip_prefix_len (dst_prefix);
392 a.dst_address = addr;
393 a.flags |= is_add ? IP4_ROUTE_FLAG_ADD : IP4_ROUTE_FLAG_DEL;
395 a.n_add_adj = is_add ? 1 : 0;
397 ip4_add_del_route (im4, &a);
401 p = ip4_get_route (im4, table_id, 0, addr.as_u8,
402 ip_prefix_len (dst_prefix));
405 clib_warning ("Failed to insert route for eid %U!",
406 format_ip4_address_and_length, addr.as_u8,
407 ip_prefix_len (dst_prefix));
415 ip6_main_t *im6 = &ip6_main;
416 ip6_add_del_route_args_t a;
417 ip6_address_t addr = ip_prefix_v6 (dst_prefix);
419 memset (&a, 0, sizeof (a));
420 a.flags = IP6_ROUTE_FLAG_TABLE_ID;
421 a.table_index_or_table_id = table_id;
423 a.dst_address_length = ip_prefix_len (dst_prefix);
424 a.dst_address = addr;
425 a.flags |= is_add ? IP6_ROUTE_FLAG_ADD : IP6_ROUTE_FLAG_DEL;
427 a.n_add_adj = is_add ? 1 : 0;
429 ip6_add_del_route (im6, &a);
433 adj_index[0] = ip6_get_route (im6, table_id, 0, &addr,
434 ip_prefix_len (dst_prefix));
435 if (adj_index[0] == 0)
437 clib_warning ("Failed to insert route for eid %U!",
438 format_ip6_address_and_length, addr.as_u8,
439 ip_prefix_len (dst_prefix));
448 add_del_lisp_gpe_default_route (u32 table_id, u8 is_v4, u8 is_add)
450 lisp_gpe_main_t *lgm = &lisp_gpe_main;
455 /* setup adjacency */
456 memset (&adj, 0, sizeof (adj));
459 adj.explicit_fib_index = ~0;
460 adj.lookup_next_index = is_v4 ? lgm->ip4_lookup_next_lgpe_ip4_lookup :
461 lgm->ip6_lookup_next_lgpe_ip6_lookup;
462 /* default route has tunnel_index ~0 */
463 adj.rewrite_header.sw_if_index = ~0;
465 /* set prefix to 0/0 */
466 memset (&prefix, 0, sizeof (prefix));
467 ip_prefix_version (&prefix) = is_v4 ? IP4 : IP6;
469 /* add/delete route for prefix */
470 add_del_ip_prefix_route (&prefix, table_id, &adj, is_add, &adj_index);
474 lisp_gpe_iface_set_table (u32 sw_if_index, u32 table_id, u8 is_ip4)
478 ip4_main_t *im4 = &ip4_main;
480 fib = find_ip4_fib_by_table_index_or_id (im4, table_id,
481 IP4_ROUTE_FLAG_TABLE_ID);
483 /* fib's created if it doesn't exist */
486 vec_validate (im4->fib_index_by_sw_if_index, sw_if_index);
487 im4->fib_index_by_sw_if_index[sw_if_index] = fib->index;
491 ip6_main_t *im6 = &ip6_main;
493 fib = find_ip6_fib_by_table_index_or_id (im6, table_id,
494 IP6_ROUTE_FLAG_TABLE_ID);
496 /* fib's created if it doesn't exist */
499 vec_validate (im6->fib_index_by_sw_if_index, sw_if_index);
500 im6->fib_index_by_sw_if_index[sw_if_index] = fib->index;
504 #define foreach_l2_lisp_gpe_tx_next \
505 _(DROP, "error-drop") \
506 _(IP4_LOOKUP, "ip4-lookup") \
507 _(IP6_LOOKUP, "ip6-lookup") \
508 _(LISP_CP_LOOKUP, "lisp-cp-lookup")
512 #define _(sym,str) L2_LISP_GPE_TX_NEXT_##sym,
513 foreach_l2_lisp_gpe_tx_next
515 L2_LISP_GPE_TX_N_NEXT,
516 } l2_lisp_gpe_tx_next_t;
521 } l2_lisp_gpe_tx_trace_t;
524 format_l2_lisp_gpe_tx_trace (u8 * s, va_list * args)
526 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
527 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
528 l2_lisp_gpe_tx_trace_t *t = va_arg (*args, l2_lisp_gpe_tx_trace_t *);
530 s = format (s, "L2-LISP-GPE-TX: tunnel %d", t->tunnel_index);
535 l2_process_tunnel_action (vlib_buffer_t * b0, u8 action, u32 * next0)
537 if (LISP_SEND_MAP_REQUEST == action)
539 next0[0] = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
540 vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC;
544 next0[0] = L2_LISP_GPE_TX_NEXT_DROP;
549 ip_flow_hash (void *data)
551 ip4_header_t *iph = (ip4_header_t *) data;
553 if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
554 return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
556 return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
560 l2_flow_hash (vlib_buffer_t * b0)
562 ethernet_header_t *eh;
564 uword is_ip, eh_size;
567 eh = vlib_buffer_get_current (b0);
568 eh_type = clib_net_to_host_u16 (eh->type);
569 eh_size = ethernet_buffer_header_size (b0);
571 is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
573 /* since we have 2 cache lines, use them */
575 a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
579 b = mac_to_u64 ((u8 *) eh->dst_address);
580 c = mac_to_u64 ((u8 *) eh->src_address);
581 hash_mix64 (a, b, c);
587 l2_process_one (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, u32 ti0,
590 lisp_gpe_tunnel_t *t0;
592 t0 = pool_elt_at_index (lgm->tunnels, ti0);
595 if (PREDICT_TRUE (LISP_NO_ACTION == t0->action))
597 /* compute 'flow' hash */
598 if (PREDICT_TRUE (t0->sub_tunnels_lbv_count > 1))
599 vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
600 encap_one_inline (lgm, b0, t0, next0);
604 l2_process_tunnel_action (b0, t0->action, next0);
609 l2_process_two (lisp_gpe_main_t * lgm, vlib_buffer_t * b0, vlib_buffer_t * b1,
610 u32 ti0, u32 ti1, u32 * next0, u32 * next1)
612 lisp_gpe_tunnel_t *t0, *t1;
614 t0 = pool_elt_at_index (lgm->tunnels, ti0);
615 t1 = pool_elt_at_index (lgm->tunnels, ti1);
617 ASSERT (0 != t0 && 0 != t1);
619 if (PREDICT_TRUE (LISP_NO_ACTION == t0->action
620 && LISP_NO_ACTION == t1->action))
622 if (PREDICT_TRUE (t0->sub_tunnels_lbv_count > 1))
623 vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
624 if (PREDICT_TRUE (t1->sub_tunnels_lbv_count > 1))
625 vnet_buffer (b1)->ip.flow_hash = l2_flow_hash (b1);
626 encap_two_inline (lgm, b0, b1, t0, t1, next0, next1);
630 if (LISP_NO_ACTION == t0->action)
632 if (PREDICT_TRUE (t0->sub_tunnels_lbv_count > 1))
633 vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0);
634 encap_one_inline (lgm, b0, t0, next0);
635 l2_process_tunnel_action (b1, t1->action, next1);
637 else if (LISP_NO_ACTION == t1->action)
639 if (PREDICT_TRUE (t1->sub_tunnels_lbv_count > 1))
640 vnet_buffer (b1)->ip.flow_hash = l2_flow_hash (b1);
641 encap_one_inline (lgm, b1, t1, next1);
642 l2_process_tunnel_action (b0, t0->action, next0);
646 l2_process_tunnel_action (b0, t0->action, next0);
647 l2_process_tunnel_action (b1, t1->action, next1);
653 l2_lisp_gpe_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
654 vlib_frame_t * from_frame)
656 u32 n_left_from, next_index, *from, *to_next;
657 lisp_gpe_main_t *lgm = &lisp_gpe_main;
659 from = vlib_frame_vector_args (from_frame);
660 n_left_from = from_frame->n_vectors;
662 next_index = node->cached_next_index;
664 while (n_left_from > 0)
668 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
670 while (n_left_from >= 4 && n_left_to_next >= 2)
673 vlib_buffer_t *b0, *b1;
674 u32 next0, next1, ti0, ti1;
675 lisp_gpe_tunnel_t *t0 = 0, *t1 = 0;
676 ethernet_header_t *e0, *e1;
678 next0 = next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
680 /* Prefetch next iteration. */
682 vlib_buffer_t *p2, *p3;
684 p2 = vlib_get_buffer (vm, from[2]);
685 p3 = vlib_get_buffer (vm, from[3]);
687 vlib_prefetch_buffer_header (p2, LOAD);
688 vlib_prefetch_buffer_header (p3, LOAD);
690 CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
691 CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
703 b0 = vlib_get_buffer (vm, bi0);
704 b1 = vlib_get_buffer (vm, bi1);
706 e0 = vlib_buffer_get_current (b0);
707 e1 = vlib_buffer_get_current (b1);
709 /* lookup dst + src mac */
710 ti0 = lisp_l2_fib_lookup (lgm, vnet_buffer (b0)->l2.bd_index,
711 e0->src_address, e0->dst_address);
712 ti1 = lisp_l2_fib_lookup (lgm, vnet_buffer (b1)->l2.bd_index,
713 e1->src_address, e1->dst_address);
715 if (PREDICT_TRUE ((u32) ~ 0 != ti0) && (u32) ~ 0 != ti1)
717 /* process both tunnels */
718 l2_process_two (lgm, b0, b1, ti0, ti1, &next0, &next1);
722 if ((u32) ~ 0 != ti0)
724 /* process tunnel for b0 */
725 l2_process_one (lgm, b0, ti0, &next0);
727 /* no tunnel found for b1, send to control plane */
728 next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
729 vnet_buffer (b1)->lisp.overlay_afi = LISP_AFI_MAC;
731 else if ((u32) ~ 0 != ti1)
733 /* process tunnel for b1 */
734 l2_process_one (lgm, b1, ti1, &next1);
736 /* no tunnel found b0, send to control plane */
737 next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
738 vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC;
742 /* no tunnels found */
743 next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
744 vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC;
745 next1 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
746 vnet_buffer (b1)->lisp.overlay_afi = LISP_AFI_MAC;
750 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
752 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
754 tr->tunnel_index = t0 - lgm->tunnels;
756 if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
758 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b1,
760 tr->tunnel_index = t1 - lgm->tunnels;
763 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
764 n_left_to_next, bi0, bi1, next0,
768 while (n_left_from > 0 && n_left_to_next > 0)
771 u32 bi0, ti0, next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
772 ethernet_header_t *e0;
781 b0 = vlib_get_buffer (vm, bi0);
782 e0 = vlib_buffer_get_current (b0);
784 /* lookup dst + src mac */
785 ti0 = lisp_l2_fib_lookup (lgm, vnet_buffer (b0)->l2.bd_index,
786 e0->src_address, e0->dst_address);
788 if (PREDICT_TRUE ((u32) ~ 0 != ti0))
790 l2_process_one (lgm, b0, ti0, &next0);
794 /* no tunnel found send to control plane */
795 next0 = L2_LISP_GPE_TX_NEXT_LISP_CP_LOOKUP;
796 vnet_buffer (b0)->lisp.overlay_afi = LISP_AFI_MAC;
799 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
801 l2_lisp_gpe_tx_trace_t *tr = vlib_add_trace (vm, node, b0,
803 tr->tunnel_index = ti0 ? ti0 : ~0;
805 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
806 n_left_to_next, bi0, next0);
809 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
812 return from_frame->n_vectors;
816 format_l2_lisp_gpe_name (u8 * s, va_list * args)
818 u32 dev_instance = va_arg (*args, u32);
819 return format (s, "l2_lisp_gpe%d", dev_instance);
823 VNET_DEVICE_CLASS (l2_lisp_gpe_device_class,static) = {
824 .name = "L2_LISP_GPE",
825 .format_device_name = format_l2_lisp_gpe_name,
826 .format_tx_trace = format_lisp_gpe_tx_trace,
827 .tx_function = l2_lisp_gpe_interface_tx,
828 .no_flatten_output_chains = 1,
832 static vnet_hw_interface_t *
833 create_lisp_gpe_iface (lisp_gpe_main_t * lgm, u32 vni, u32 dp_table,
834 vnet_device_class_t * dev_class,
835 tunnel_lookup_t * tuns)
838 u32 hw_if_index = ~0;
840 vnet_hw_interface_t *hi;
841 vnet_main_t *vnm = lgm->vnet_main;
843 /* create hw lisp_gpeX iface if needed, otherwise reuse existing */
844 flen = vec_len (lgm->free_tunnel_hw_if_indices);
847 hw_if_index = lgm->free_tunnel_hw_if_indices[flen - 1];
848 _vec_len (lgm->free_tunnel_hw_if_indices) -= 1;
850 hi = vnet_get_hw_interface (vnm, hw_if_index);
852 /* rename interface */
853 new_name = format (0, "%U", dev_class->format_device_name, vni);
855 vec_add1 (new_name, 0);
856 vnet_rename_interface (vnm, hw_if_index, (char *) new_name);
859 /* clear old stats of freed interface before reuse */
860 vnet_interface_main_t *im = &vnm->interface_main;
861 vnet_interface_counter_lock (im);
862 vlib_zero_combined_counter (&im->combined_sw_if_counters
863 [VNET_INTERFACE_COUNTER_TX],
865 vlib_zero_combined_counter (&im->combined_sw_if_counters
866 [VNET_INTERFACE_COUNTER_RX],
868 vlib_zero_simple_counter (&im->sw_if_counters
869 [VNET_INTERFACE_COUNTER_DROP],
871 vnet_interface_counter_unlock (im);
875 hw_if_index = vnet_register_interface (vnm, dev_class->index, vni,
876 lisp_gpe_hw_class.index, 0);
877 hi = vnet_get_hw_interface (vnm, hw_if_index);
880 hash_set (tuns->hw_if_index_by_dp_table, dp_table, hw_if_index);
882 /* set tunnel termination: post decap, packets are tagged as having been
883 * originated by lisp-gpe interface */
884 hash_set (tuns->sw_if_index_by_vni, vni, hi->sw_if_index);
885 hash_set (tuns->vni_by_sw_if_index, hi->sw_if_index, vni);
891 remove_lisp_gpe_iface (lisp_gpe_main_t * lgm, u32 hi_index, u32 dp_table,
892 tunnel_lookup_t * tuns)
894 vnet_main_t *vnm = lgm->vnet_main;
895 vnet_hw_interface_t *hi;
898 hi = vnet_get_hw_interface (vnm, hi_index);
900 /* disable interface */
901 vnet_sw_interface_set_flags (vnm, hi->sw_if_index, 0 /* down */ );
902 vnet_hw_interface_set_flags (vnm, hi->hw_if_index, 0 /* down */ );
903 hash_unset (tuns->hw_if_index_by_dp_table, dp_table);
904 vec_add1 (lgm->free_tunnel_hw_if_indices, hi->hw_if_index);
906 /* clean tunnel termination and vni to sw_if_index binding */
907 vnip = hash_get (tuns->vni_by_sw_if_index, hi->sw_if_index);
910 clib_warning ("No vni associated to interface %d", hi->sw_if_index);
913 hash_unset (tuns->sw_if_index_by_vni, vnip[0]);
914 hash_unset (tuns->vni_by_sw_if_index, hi->sw_if_index);
918 lisp_gpe_add_del_l3_iface (lisp_gpe_main_t * lgm,
919 vnet_lisp_gpe_add_del_iface_args_t * a)
921 vnet_main_t *vnm = lgm->vnet_main;
922 tunnel_lookup_t *l3_ifaces = &lgm->l3_ifaces;
923 vnet_hw_interface_t *hi;
924 u32 lookup_next_index4, lookup_next_index6;
927 hip = hash_get (l3_ifaces->hw_if_index_by_dp_table, a->table_id);
933 clib_warning ("vrf %d already mapped to a vni", a->table_id);
937 si = hash_get (l3_ifaces->sw_if_index_by_vni, a->vni);
940 clib_warning ("Interface for vni %d already exists", a->vni);
944 /* create lisp iface and populate tunnel tables */
945 hi = create_lisp_gpe_iface (lgm, a->vni, a->table_id,
946 &lisp_gpe_device_class, l3_ifaces);
948 /* set ingress arc from lgpe_ipX_lookup */
949 lookup_next_index4 = vlib_node_add_next (lgm->vlib_main,
950 lgpe_ip4_lookup_node.index,
951 hi->output_node_index);
952 lookup_next_index6 = vlib_node_add_next (lgm->vlib_main,
953 lgpe_ip6_lookup_node.index,
954 hi->output_node_index);
955 hash_set (lgm->lgpe_ip4_lookup_next_index_by_table_id, a->table_id,
957 hash_set (lgm->lgpe_ip6_lookup_next_index_by_table_id, a->table_id,
960 /* insert default routes that point to lgpe-ipx-lookup */
961 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */ 1, 1);
962 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */ 0, 1);
964 /* set egress arcs */
965 #define _(sym,str) vlib_node_add_named_next_with_slot (vnm->vlib_main, \
966 hi->tx_node_index, str, LISP_GPE_TX_NEXT_##sym);
967 foreach_lisp_gpe_tx_next
969 /* set interface in appropriate v4 and v6 FIBs */
970 lisp_gpe_iface_set_table (hi->sw_if_index, a->table_id, 1);
971 lisp_gpe_iface_set_table (hi->sw_if_index, a->table_id, 0);
973 /* enable interface */
974 vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
975 VNET_SW_INTERFACE_FLAG_ADMIN_UP);
976 vnet_hw_interface_set_flags (vnm, hi->hw_if_index,
977 VNET_HW_INTERFACE_FLAG_LINK_UP);
983 clib_warning ("The interface for vrf %d doesn't exist",
988 remove_lisp_gpe_iface (lgm, hip[0], a->table_id, &lgm->l3_ifaces);
990 /* unset default routes */
991 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */ 1, 0);
992 add_del_lisp_gpe_default_route (a->table_id, /* is_v4 */ 0, 0);
999 lisp_gpe_add_del_l2_iface (lisp_gpe_main_t * lgm,
1000 vnet_lisp_gpe_add_del_iface_args_t * a)
1002 vnet_main_t *vnm = lgm->vnet_main;
1003 tunnel_lookup_t *l2_ifaces = &lgm->l2_ifaces;
1004 vnet_hw_interface_t *hi;
1008 bd_index = bd_find_or_add_bd_index (&bd_main, a->bd_id);
1009 hip = hash_get (l2_ifaces->hw_if_index_by_dp_table, bd_index);
1015 clib_warning ("bridge domain %d already mapped to a vni", a->bd_id);
1019 si = hash_get (l2_ifaces->sw_if_index_by_vni, a->vni);
1022 clib_warning ("Interface for vni %d already exists", a->vni);
1026 /* create lisp iface and populate tunnel tables */
1027 hi = create_lisp_gpe_iface (lgm, a->vni, bd_index,
1028 &l2_lisp_gpe_device_class, &lgm->l2_ifaces);
1030 /* add iface to l2 bridge domain */
1031 set_int_l2_mode (lgm->vlib_main, vnm, MODE_L2_BRIDGE, hi->sw_if_index,
1034 /* set egress arcs */
1035 #define _(sym,str) vlib_node_add_named_next_with_slot (vnm->vlib_main, \
1036 hi->tx_node_index, str, L2_LISP_GPE_TX_NEXT_##sym);
1037 foreach_l2_lisp_gpe_tx_next
1039 /* enable interface */
1040 vnet_sw_interface_set_flags (vnm, hi->sw_if_index,
1041 VNET_SW_INTERFACE_FLAG_ADMIN_UP);
1042 vnet_hw_interface_set_flags (vnm, hi->hw_if_index,
1043 VNET_HW_INTERFACE_FLAG_LINK_UP);
1049 clib_warning ("The interface for bridge domain %d doesn't exist",
1053 remove_lisp_gpe_iface (lgm, hip[0], bd_index, &lgm->l2_ifaces);
1060 vnet_lisp_gpe_add_del_iface (vnet_lisp_gpe_add_del_iface_args_t * a,
1063 lisp_gpe_main_t *lgm = &lisp_gpe_main;
1065 if (vnet_lisp_gpe_enable_disable_status () == 0)
1067 clib_warning ("LISP is disabled!");
1068 return VNET_API_ERROR_LISP_DISABLED;
1072 return lisp_gpe_add_del_l3_iface (lgm, a);
1074 return lisp_gpe_add_del_l2_iface (lgm, a);
1077 static clib_error_t *
1078 lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input,
1079 vlib_cli_command_t * cmd)
1081 unformat_input_t _line_input, *line_input = &_line_input;
1083 clib_error_t *error = 0;
1085 u32 table_id, vni, bd_id;
1086 u8 vni_is_set = 0, vrf_is_set = 0, bd_index_is_set = 0;
1088 vnet_lisp_gpe_add_del_iface_args_t _a, *a = &_a;
1090 /* Get a line of input. */
1091 if (!unformat_user (input, unformat_line_input, line_input))
1094 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1096 if (unformat (line_input, "add"))
1098 else if (unformat (line_input, "del"))
1100 else if (unformat (line_input, "vrf %d", &table_id))
1104 else if (unformat (line_input, "vni %d", &vni))
1108 else if (unformat (line_input, "bd %d", &bd_id))
1110 bd_index_is_set = 1;
1114 return clib_error_return (0, "parse error: '%U'",
1115 format_unformat_error, line_input);
1119 if (vrf_is_set && bd_index_is_set)
1120 return clib_error_return (0,
1121 "Cannot set both vrf and brdige domain index!");
1124 return clib_error_return (0, "vni must be set!");
1126 if (!vrf_is_set && !bd_index_is_set)
1127 return clib_error_return (0, "vrf or bridge domain index must be set!");
1130 a->dp_table = vrf_is_set ? table_id : bd_id;
1132 a->is_l2 = bd_index_is_set;
1134 rv = vnet_lisp_gpe_add_del_iface (a, 0);
1137 error = clib_error_return (0, "failed to %s gpe iface!",
1138 is_add ? "add" : "delete");
1145 VLIB_CLI_COMMAND (add_del_lisp_gpe_iface_command, static) = {
1146 .path = "lisp gpe iface",
1147 .short_help = "lisp gpe iface add/del vni <vni> vrf <vrf>",
1148 .function = lisp_gpe_add_del_iface_command_fn,
1153 * fd.io coding-style-patch-verification: ON
1156 * eval: (c-set-style "gnu")