2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ip/ip.h>
17 #include <vnet/dpo/lookup_dpo.h>
18 #include <vnet/dpo/load_balance_map.h>
19 #include <vnet/mpls/mpls_lookup.h>
20 #include <vnet/fib/fib_table.h>
21 #include <vnet/fib/ip4_fib.h>
22 #include <vnet/fib/ip6_fib.h>
23 #include <vnet/fib/mpls_fib.h>
24 #include <vnet/mfib/mfib_table.h>
25 #include <vnet/mfib/ip4_mfib.h>
26 #include <vnet/mfib/ip6_mfib.h>
28 static const char *const lookup_input_names[] = LOOKUP_INPUTS;
29 static const char *const lookup_cast_names[] = LOOKUP_CASTS;
32 * @brief Enumeration of the lookup subtypes
34 typedef enum lookup_sub_type_t_
38 LOOKUP_SUB_TYPE_DST_MCAST,
39 LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE,
41 #define LOOKUP_SUB_TYPE_NUM (LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE+1)
43 #define FOR_EACH_LOOKUP_SUB_TYPE(_st) \
44 for (_st = LOOKUP_SUB_TYPE_IP4_SRC; _st < LOOKUP_SUB_TYPE_NUM; _st++)
47 * @brief pool of all MPLS Label DPOs
49 lookup_dpo_t *lookup_dpo_pool;
52 * @brief An array of registered DPO type values for the sub-types
54 static dpo_type_t lookup_dpo_sub_types[LOOKUP_SUB_TYPE_NUM];
57 lookup_dpo_alloc (void)
61 pool_get_aligned(lookup_dpo_pool, lkd, CLIB_CACHE_LINE_BYTES);
67 lookup_dpo_get_index (lookup_dpo_t *lkd)
69 return (lkd - lookup_dpo_pool);
73 lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
77 lookup_table_t table_config,
83 lkd = lookup_dpo_alloc();
84 lkd->lkd_fib_index = fib_index;
85 lkd->lkd_proto = proto;
86 lkd->lkd_input = input;
87 lkd->lkd_table = table_config;
91 * use the input type to select the lookup sub-type
97 case LOOKUP_INPUT_SRC_ADDR:
98 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC];
100 case LOOKUP_INPUT_DST_ADDR:
101 switch (table_config)
103 case LOOKUP_TABLE_FROM_INPUT_INTERFACE:
104 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE];
106 case LOOKUP_TABLE_FROM_CONFIG:
107 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST];
110 if (LOOKUP_MULTICAST == cast)
112 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST];
122 dpo_set(dpo, type, proto, lookup_dpo_get_index(lkd));
127 lookup_dpo_add_or_lock_w_fib_index (fib_node_index_t fib_index,
130 lookup_input_t input,
131 lookup_table_t table_config,
134 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
136 if (LOOKUP_UNICAST == cast)
138 fib_table_lock(fib_index,
139 dpo_proto_to_fib(proto),
144 mfib_table_lock(fib_index,
145 dpo_proto_to_fib(proto),
149 lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
153 lookup_dpo_add_or_lock_w_table_id (u32 table_id,
156 lookup_input_t input,
157 lookup_table_t table_config,
160 fib_node_index_t fib_index = FIB_NODE_INDEX_INVALID;
162 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
164 if (LOOKUP_UNICAST == cast)
167 fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
174 mfib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
180 ASSERT(FIB_NODE_INDEX_INVALID != fib_index);
181 lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
185 format_lookup_dpo (u8 *s, va_list *args)
187 index_t index = va_arg (*args, index_t);
190 lkd = lookup_dpo_get(index);
192 if (LOOKUP_TABLE_FROM_INPUT_INTERFACE == lkd->lkd_table)
194 s = format(s, "%s,%s lookup in interface's %U table",
195 lookup_input_names[lkd->lkd_input],
196 lookup_cast_names[lkd->lkd_cast],
197 format_dpo_proto, lkd->lkd_proto);
201 if (LOOKUP_UNICAST == lkd->lkd_cast)
203 s = format(s, "%s,%s lookup in %U",
204 lookup_input_names[lkd->lkd_input],
205 lookup_cast_names[lkd->lkd_cast],
206 format_fib_table_name, lkd->lkd_fib_index,
207 dpo_proto_to_fib(lkd->lkd_proto));
211 s = format(s, "%s,%s lookup in %U",
212 lookup_input_names[lkd->lkd_input],
213 lookup_cast_names[lkd->lkd_cast],
214 format_mfib_table_name, lkd->lkd_fib_index,
215 dpo_proto_to_fib(lkd->lkd_proto));
222 lookup_dpo_lock (dpo_id_t *dpo)
226 lkd = lookup_dpo_get(dpo->dpoi_index);
232 lookup_dpo_unlock (dpo_id_t *dpo)
236 lkd = lookup_dpo_get(dpo->dpoi_index);
240 if (0 == lkd->lkd_locks)
242 if (LOOKUP_TABLE_FROM_CONFIG == lkd->lkd_table)
244 if (LOOKUP_UNICAST == lkd->lkd_cast)
246 fib_table_unlock(lkd->lkd_fib_index,
247 dpo_proto_to_fib(lkd->lkd_proto),
252 mfib_table_unlock(lkd->lkd_fib_index,
253 dpo_proto_to_fib(lkd->lkd_proto),
257 pool_put(lookup_dpo_pool, lkd);
262 ip4_src_fib_lookup_one (u32 src_fib_index0,
263 const ip4_address_t * addr0,
264 u32 * src_adj_index0)
266 ip4_fib_mtrie_leaf_t leaf0;
267 ip4_fib_mtrie_t * mtrie0;
269 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
271 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0);
272 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
273 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
275 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
279 ip4_src_fib_lookup_two (u32 src_fib_index0,
281 const ip4_address_t * addr0,
282 const ip4_address_t * addr1,
283 u32 * src_adj_index0,
284 u32 * src_adj_index1)
286 ip4_fib_mtrie_leaf_t leaf0, leaf1;
287 ip4_fib_mtrie_t * mtrie0, * mtrie1;
289 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
290 mtrie1 = &ip4_fib_get (src_fib_index1)->mtrie;
292 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0);
293 leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, addr1);
295 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
296 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 2);
298 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
299 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 3);
301 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
302 src_adj_index1[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
306 * @brief Lookup trace data
308 typedef struct lookup_trace_t_
312 mpls_unicast_header_t hdr;
314 fib_node_index_t fib_index;
320 lookup_dpo_ip4_inline (vlib_main_t * vm,
321 vlib_node_runtime_t * node,
322 vlib_frame_t * from_frame,
324 int table_from_interface)
326 u32 n_left_from, next_index, * from, * to_next;
327 u32 thread_index = vlib_get_thread_index();
328 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
330 from = vlib_frame_vector_args (from_frame);
331 n_left_from = from_frame->n_vectors;
333 next_index = node->cached_next_index;
335 while (n_left_from > 0)
339 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
341 while (n_left_from >= 4 && n_left_to_next > 2)
343 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
344 flow_hash_config_t flow_hash_config0;
345 const ip4_address_t *input_addr0;
346 const load_balance_t *lb0;
347 const lookup_dpo_t * lkd0;
348 const ip4_header_t * ip0;
349 const dpo_id_t *dpo0;
351 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
352 flow_hash_config_t flow_hash_config1;
353 const ip4_address_t *input_addr1;
354 const load_balance_t *lb1;
355 const lookup_dpo_t * lkd1;
356 const ip4_header_t * ip1;
357 const dpo_id_t *dpo1;
360 /* Prefetch next iteration. */
362 vlib_buffer_t * p2, * p3;
364 p2 = vlib_get_buffer (vm, from[2]);
365 p3 = vlib_get_buffer (vm, from[3]);
367 vlib_prefetch_buffer_header (p2, LOAD);
368 vlib_prefetch_buffer_header (p3, LOAD);
370 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
371 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
383 b0 = vlib_get_buffer (vm, bi0);
384 ip0 = vlib_buffer_get_current (b0);
385 b1 = vlib_get_buffer (vm, bi1);
386 ip1 = vlib_buffer_get_current (b1);
388 /* dst lookup was done by ip4 lookup */
389 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
390 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
391 lkd0 = lookup_dpo_get(lkdi0);
392 lkd1 = lookup_dpo_get(lkdi1);
395 * choose between a lookup using the fib index in the DPO
396 * or getting the FIB index from the interface.
398 if (table_from_interface)
401 ip4_fib_table_get_index_for_sw_if_index(
402 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
404 ip4_fib_table_get_index_for_sw_if_index(
405 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
409 fib_index0 = lkd0->lkd_fib_index;
410 fib_index1 = lkd1->lkd_fib_index;
414 * choose between a source or destination address lookup in the table
418 input_addr0 = &ip0->src_address;
419 input_addr1 = &ip1->src_address;
423 input_addr0 = &ip0->dst_address;
424 input_addr1 = &ip1->dst_address;
428 ip4_src_fib_lookup_two (fib_index0, fib_index1,
429 input_addr0, input_addr1,
431 lb0 = load_balance_get(lbi0);
432 lb1 = load_balance_get(lbi1);
434 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
435 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
437 /* Use flow hash to compute multipath adjacency. */
438 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
439 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
441 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
443 flow_hash_config0 = lb0->lb_hash_config;
444 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
445 ip4_compute_flow_hash (ip0, flow_hash_config0);
448 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
450 flow_hash_config1 = lb1->lb_hash_config;
451 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
452 ip4_compute_flow_hash (ip1, flow_hash_config1);
455 dpo0 = load_balance_get_bucket_i(lb0,
457 (lb0->lb_n_buckets_minus_1)));
458 dpo1 = load_balance_get_bucket_i(lb1,
460 (lb1->lb_n_buckets_minus_1)));
462 next0 = dpo0->dpoi_next_node;
463 next1 = dpo1->dpoi_next_node;
464 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
465 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
467 vlib_increment_combined_counter
468 (cm, thread_index, lbi0, 1,
469 vlib_buffer_length_in_chain (vm, b0));
470 vlib_increment_combined_counter
471 (cm, thread_index, lbi1, 1,
472 vlib_buffer_length_in_chain (vm, b1));
474 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
476 lookup_trace_t *tr = vlib_add_trace (vm, node,
478 tr->fib_index = fib_index0;
480 tr->addr.ip4 = *input_addr0;
482 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
484 lookup_trace_t *tr = vlib_add_trace (vm, node,
486 tr->fib_index = fib_index1;
488 tr->addr.ip4 = *input_addr1;
491 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
492 to_next, n_left_to_next,
493 bi0, bi1, next0, next1);
496 while (n_left_from > 0 && n_left_to_next > 0)
498 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
499 flow_hash_config_t flow_hash_config0;
500 const ip4_address_t *input_addr;
501 const load_balance_t *lb0;
502 const lookup_dpo_t * lkd0;
503 const ip4_header_t * ip0;
504 const dpo_id_t *dpo0;
514 b0 = vlib_get_buffer (vm, bi0);
515 ip0 = vlib_buffer_get_current (b0);
517 /* dst lookup was done by ip4 lookup */
518 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
519 lkd0 = lookup_dpo_get(lkdi0);
522 * choose between a lookup using the fib index in the DPO
523 * or getting the FIB index from the interface.
525 if (table_from_interface)
528 ip4_fib_table_get_index_for_sw_if_index(
529 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
533 fib_index0 = lkd0->lkd_fib_index;
537 * choose between a source or destination address lookup in the table
541 input_addr = &ip0->src_address;
545 input_addr = &ip0->dst_address;
549 ip4_src_fib_lookup_one (fib_index0, input_addr, &lbi0);
550 lb0 = load_balance_get(lbi0);
552 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
554 /* Use flow hash to compute multipath adjacency. */
555 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
557 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
559 flow_hash_config0 = lb0->lb_hash_config;
560 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
561 ip4_compute_flow_hash (ip0, flow_hash_config0);
564 dpo0 = load_balance_get_bucket_i(lb0,
566 (lb0->lb_n_buckets_minus_1)));
568 next0 = dpo0->dpoi_next_node;
569 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
571 vlib_increment_combined_counter
572 (cm, thread_index, lbi0, 1,
573 vlib_buffer_length_in_chain (vm, b0));
575 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
577 lookup_trace_t *tr = vlib_add_trace (vm, node,
579 tr->fib_index = fib_index0;
581 tr->addr.ip4 = *input_addr;
584 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
585 n_left_to_next, bi0, next0);
587 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
589 return from_frame->n_vectors;
593 format_lookup_trace (u8 * s, va_list * args)
595 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
596 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
597 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
598 u32 indent = format_get_indent (s);
599 s = format (s, "%U fib-index:%d addr:%U load-balance:%d",
600 format_white_space, indent,
602 format_ip46_address, &t->addr, IP46_TYPE_ANY,
608 lookup_ip4_dst (vlib_main_t * vm,
609 vlib_node_runtime_t * node,
610 vlib_frame_t * from_frame)
612 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 0));
615 VLIB_REGISTER_NODE (lookup_ip4_dst_node) = {
616 .function = lookup_ip4_dst,
617 .name = "lookup-ip4-dst",
618 .vector_size = sizeof (u32),
619 .sibling_of = "ip4-lookup",
620 .format_trace = format_lookup_trace,
622 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_node, lookup_ip4_dst)
625 lookup_ip4_dst_itf (vlib_main_t * vm,
626 vlib_node_runtime_t * node,
627 vlib_frame_t * from_frame)
629 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 1));
632 VLIB_REGISTER_NODE (lookup_ip4_dst_itf_node) = {
633 .function = lookup_ip4_dst_itf,
634 .name = "lookup-ip4-dst-itf",
635 .vector_size = sizeof (u32),
636 .sibling_of = "ip4-lookup",
637 .format_trace = format_lookup_trace,
639 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_itf_node, lookup_ip4_dst_itf)
642 lookup_ip4_src (vlib_main_t * vm,
643 vlib_node_runtime_t * node,
644 vlib_frame_t * from_frame)
646 return (lookup_dpo_ip4_inline(vm, node, from_frame, 1, 0));
649 VLIB_REGISTER_NODE (lookup_ip4_src_node) = {
650 .function = lookup_ip4_src,
651 .name = "lookup-ip4-src",
652 .vector_size = sizeof (u32),
653 .format_trace = format_lookup_trace,
654 .sibling_of = "ip4-lookup",
656 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_src_node, lookup_ip4_src)
659 lookup_dpo_ip6_inline (vlib_main_t * vm,
660 vlib_node_runtime_t * node,
661 vlib_frame_t * from_frame,
663 int table_from_interface)
665 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
666 u32 n_left_from, next_index, * from, * to_next;
667 u32 thread_index = vlib_get_thread_index();
669 from = vlib_frame_vector_args (from_frame);
670 n_left_from = from_frame->n_vectors;
672 next_index = node->cached_next_index;
674 while (n_left_from > 0)
678 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
680 while (n_left_from >= 4 && n_left_to_next > 2)
682 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
683 flow_hash_config_t flow_hash_config0;
684 const ip6_address_t *input_addr0;
685 const load_balance_t *lb0;
686 const lookup_dpo_t * lkd0;
687 const ip6_header_t * ip0;
688 const dpo_id_t *dpo0;
690 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
691 flow_hash_config_t flow_hash_config1;
692 const ip6_address_t *input_addr1;
693 const load_balance_t *lb1;
694 const lookup_dpo_t * lkd1;
695 const ip6_header_t * ip1;
696 const dpo_id_t *dpo1;
699 /* Prefetch next iteration. */
701 vlib_buffer_t * p2, * p3;
703 p2 = vlib_get_buffer (vm, from[2]);
704 p3 = vlib_get_buffer (vm, from[3]);
706 vlib_prefetch_buffer_header (p2, LOAD);
707 vlib_prefetch_buffer_header (p3, LOAD);
709 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
710 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
722 b0 = vlib_get_buffer (vm, bi0);
723 ip0 = vlib_buffer_get_current (b0);
724 b1 = vlib_get_buffer (vm, bi1);
725 ip1 = vlib_buffer_get_current (b1);
727 /* dst lookup was done by ip6 lookup */
728 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
729 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
730 lkd0 = lookup_dpo_get(lkdi0);
731 lkd1 = lookup_dpo_get(lkdi1);
734 * choose between a lookup using the fib index in the DPO
735 * or getting the FIB index from the interface.
737 if (table_from_interface)
740 ip6_fib_table_get_index_for_sw_if_index(
741 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
743 ip6_fib_table_get_index_for_sw_if_index(
744 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
748 fib_index0 = lkd0->lkd_fib_index;
749 fib_index1 = lkd1->lkd_fib_index;
753 * choose between a source or destination address lookup in the table
757 input_addr0 = &ip0->src_address;
758 input_addr1 = &ip1->src_address;
762 input_addr0 = &ip0->dst_address;
763 input_addr1 = &ip1->dst_address;
767 lbi0 = ip6_fib_table_fwding_lookup(&ip6_main,
770 lbi1 = ip6_fib_table_fwding_lookup(&ip6_main,
773 lb0 = load_balance_get(lbi0);
774 lb1 = load_balance_get(lbi1);
776 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
777 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
779 /* Use flow hash to compute multipath adjacency. */
780 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
781 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
783 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
785 flow_hash_config0 = lb0->lb_hash_config;
786 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
787 ip6_compute_flow_hash (ip0, flow_hash_config0);
790 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
792 flow_hash_config1 = lb1->lb_hash_config;
793 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
794 ip6_compute_flow_hash (ip1, flow_hash_config1);
797 dpo0 = load_balance_get_bucket_i(lb0,
799 (lb0->lb_n_buckets_minus_1)));
800 dpo1 = load_balance_get_bucket_i(lb1,
802 (lb1->lb_n_buckets_minus_1)));
804 next0 = dpo0->dpoi_next_node;
805 next1 = dpo1->dpoi_next_node;
806 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
807 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
809 vlib_increment_combined_counter
810 (cm, thread_index, lbi0, 1,
811 vlib_buffer_length_in_chain (vm, b0));
812 vlib_increment_combined_counter
813 (cm, thread_index, lbi1, 1,
814 vlib_buffer_length_in_chain (vm, b1));
816 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
818 lookup_trace_t *tr = vlib_add_trace (vm, node,
820 tr->fib_index = fib_index0;
822 tr->addr.ip6 = *input_addr0;
824 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
826 lookup_trace_t *tr = vlib_add_trace (vm, node,
828 tr->fib_index = fib_index1;
830 tr->addr.ip6 = *input_addr1;
832 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
833 n_left_to_next, bi0, bi1,
836 while (n_left_from > 0 && n_left_to_next > 0)
838 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
839 flow_hash_config_t flow_hash_config0;
840 const ip6_address_t *input_addr0;
841 const load_balance_t *lb0;
842 const lookup_dpo_t * lkd0;
843 const ip6_header_t * ip0;
844 const dpo_id_t *dpo0;
854 b0 = vlib_get_buffer (vm, bi0);
855 ip0 = vlib_buffer_get_current (b0);
857 /* dst lookup was done by ip6 lookup */
858 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
859 lkd0 = lookup_dpo_get(lkdi0);
862 * choose between a lookup using the fib index in the DPO
863 * or getting the FIB index from the interface.
865 if (table_from_interface)
868 ip6_fib_table_get_index_for_sw_if_index(
869 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
873 fib_index0 = lkd0->lkd_fib_index;
877 * choose between a source or destination address lookup in the table
881 input_addr0 = &ip0->src_address;
885 input_addr0 = &ip0->dst_address;
889 lbi0 = ip6_fib_table_fwding_lookup(&ip6_main,
892 lb0 = load_balance_get(lbi0);
894 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
896 /* Use flow hash to compute multipath adjacency. */
897 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
899 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
901 flow_hash_config0 = lb0->lb_hash_config;
902 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
903 ip6_compute_flow_hash (ip0, flow_hash_config0);
906 dpo0 = load_balance_get_bucket_i(lb0,
908 (lb0->lb_n_buckets_minus_1)));
910 next0 = dpo0->dpoi_next_node;
911 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
913 vlib_increment_combined_counter
914 (cm, thread_index, lbi0, 1,
915 vlib_buffer_length_in_chain (vm, b0));
917 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
919 lookup_trace_t *tr = vlib_add_trace (vm, node,
921 tr->fib_index = fib_index0;
923 tr->addr.ip6 = *input_addr0;
925 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
926 n_left_to_next, bi0, next0);
928 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
930 return from_frame->n_vectors;
934 lookup_ip6_dst (vlib_main_t * vm,
935 vlib_node_runtime_t * node,
936 vlib_frame_t * from_frame)
938 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 0));
941 VLIB_REGISTER_NODE (lookup_ip6_dst_node) = {
942 .function = lookup_ip6_dst,
943 .name = "lookup-ip6-dst",
944 .vector_size = sizeof (u32),
945 .format_trace = format_lookup_trace,
946 .sibling_of = "ip6-lookup",
948 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_node, lookup_ip6_dst)
951 lookup_ip6_dst_itf (vlib_main_t * vm,
952 vlib_node_runtime_t * node,
953 vlib_frame_t * from_frame)
955 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 1));
958 VLIB_REGISTER_NODE (lookup_ip6_dst_itf_node) = {
959 .function = lookup_ip6_dst_itf,
960 .name = "lookup-ip6-dst-itf",
961 .vector_size = sizeof (u32),
962 .format_trace = format_lookup_trace,
963 .sibling_of = "ip6-lookup",
965 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_itf_node, lookup_ip6_dst_itf)
968 lookup_ip6_src (vlib_main_t * vm,
969 vlib_node_runtime_t * node,
970 vlib_frame_t * from_frame)
972 return (lookup_dpo_ip6_inline(vm, node, from_frame, 1, 0));
975 VLIB_REGISTER_NODE (lookup_ip6_src_node) = {
976 .function = lookup_ip6_src,
977 .name = "lookup-ip6-src",
978 .vector_size = sizeof (u32),
979 .format_trace = format_lookup_trace,
980 .sibling_of = "ip6-lookup",
982 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_src_node, lookup_ip6_src)
985 lookup_dpo_mpls_inline (vlib_main_t * vm,
986 vlib_node_runtime_t * node,
987 vlib_frame_t * from_frame,
988 int table_from_interface)
990 u32 n_left_from, next_index, * from, * to_next;
991 u32 thread_index = vlib_get_thread_index();
992 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
994 from = vlib_frame_vector_args (from_frame);
995 n_left_from = from_frame->n_vectors;
997 next_index = node->cached_next_index;
999 while (n_left_from > 0)
1003 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
1005 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
1008 while (n_left_from > 0 && n_left_to_next > 0)
1010 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash0;
1011 const mpls_unicast_header_t * hdr0;
1012 const load_balance_t *lb0;
1013 const lookup_dpo_t * lkd0;
1014 const dpo_id_t *dpo0;
1022 n_left_to_next -= 1;
1024 b0 = vlib_get_buffer (vm, bi0);
1025 hdr0 = vlib_buffer_get_current (b0);
1027 /* dst lookup was done by mpls lookup */
1028 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
1029 lkd0 = lookup_dpo_get(lkdi0);
1032 * choose between a lookup using the fib index in the DPO
1033 * or getting the FIB index from the interface.
1035 if (table_from_interface)
1038 mpls_fib_table_get_index_for_sw_if_index(
1039 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
1043 fib_index0 = lkd0->lkd_fib_index;
1047 lbi0 = mpls_fib_table_forwarding_lookup (fib_index0, hdr0);
1048 lb0 = load_balance_get(lbi0);
1049 dpo0 = load_balance_get_bucket_i(lb0, 0);
1051 next0 = dpo0->dpoi_next_node;
1052 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1055 if (MPLS_IS_REPLICATE & lbi0)
1057 next0 = mpls_lookup_to_replicate_edge;
1058 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
1059 (lbi0 & ~MPLS_IS_REPLICATE);
1063 lb0 = load_balance_get(lbi0);
1064 ASSERT (lb0->lb_n_buckets > 0);
1065 ASSERT (is_pow2 (lb0->lb_n_buckets));
1067 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
1069 hash0 = vnet_buffer (b0)->ip.flow_hash =
1070 mpls_compute_flow_hash(hdr0, lb0->lb_hash_config);
1071 dpo0 = load_balance_get_fwd_bucket
1073 (hash0 & (lb0->lb_n_buckets_minus_1)));
1077 dpo0 = load_balance_get_bucket_i (lb0, 0);
1079 next0 = dpo0->dpoi_next_node;
1081 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1083 vlib_increment_combined_counter
1084 (cm, thread_index, lbi0, 1,
1085 vlib_buffer_length_in_chain (vm, b0));
1088 vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3];
1089 vnet_buffer (b0)->mpls.exp = (((char*)hdr0)[2] & 0xe) >> 1;
1090 vnet_buffer (b0)->mpls.first = 1;
1091 vlib_buffer_advance(b0, sizeof(*hdr0));
1093 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1095 lookup_trace_t *tr = vlib_add_trace (vm, node,
1097 tr->fib_index = fib_index0;
1102 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1103 n_left_to_next, bi0, next0);
1105 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1107 return from_frame->n_vectors;
1111 format_lookup_mpls_trace (u8 * s, va_list * args)
1113 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1114 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1115 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
1116 u32 indent = format_get_indent (s);
1117 mpls_unicast_header_t hdr;
1119 hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
1121 s = format (s, "%U fib-index:%d hdr:%U load-balance:%d",
1122 format_white_space, indent,
1124 format_mpls_header, hdr,
1130 lookup_mpls_dst (vlib_main_t * vm,
1131 vlib_node_runtime_t * node,
1132 vlib_frame_t * from_frame)
1134 return (lookup_dpo_mpls_inline(vm, node, from_frame, 0));
1137 VLIB_REGISTER_NODE (lookup_mpls_dst_node) = {
1138 .function = lookup_mpls_dst,
1139 .name = "lookup-mpls-dst",
1140 .vector_size = sizeof (u32),
1141 .sibling_of = "mpls-lookup",
1142 .format_trace = format_lookup_mpls_trace,
1145 VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_node, lookup_mpls_dst)
1148 lookup_mpls_dst_itf (vlib_main_t * vm,
1149 vlib_node_runtime_t * node,
1150 vlib_frame_t * from_frame)
1152 return (lookup_dpo_mpls_inline(vm, node, from_frame, 1));
1155 VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = {
1156 .function = lookup_mpls_dst_itf,
1157 .name = "lookup-mpls-dst-itf",
1158 .vector_size = sizeof (u32),
1159 .sibling_of = "mpls-lookup",
1160 .format_trace = format_lookup_mpls_trace,
1163 VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_itf_node, lookup_mpls_dst_itf)
1165 typedef enum lookup_ip_dst_mcast_next_t_ {
1166 LOOKUP_IP_DST_MCAST_NEXT_RPF,
1167 LOOKUP_IP_DST_MCAST_N_NEXT,
1168 } mfib_forward_lookup_next_t;
1171 lookup_dpo_ip_dst_mcast_inline (vlib_main_t * vm,
1172 vlib_node_runtime_t * node,
1173 vlib_frame_t * from_frame,
1176 u32 n_left_from, next_index, * from, * to_next;
1178 from = vlib_frame_vector_args (from_frame);
1179 n_left_from = from_frame->n_vectors;
1181 next_index = LOOKUP_IP_DST_MCAST_NEXT_RPF;
1183 while (n_left_from > 0)
1187 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
1189 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
1192 while (n_left_from > 0 && n_left_to_next > 0)
1194 u32 bi0, lkdi0, fib_index0, next0;
1195 const lookup_dpo_t * lkd0;
1196 fib_node_index_t mfei0;
1204 n_left_to_next -= 1;
1206 b0 = vlib_get_buffer (vm, bi0);
1208 /* dst lookup was done by mpls lookup */
1209 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
1210 lkd0 = lookup_dpo_get(lkdi0);
1211 fib_index0 = lkd0->lkd_fib_index;
1212 next0 = LOOKUP_IP_DST_MCAST_NEXT_RPF;
1218 ip0 = vlib_buffer_get_current (b0);
1219 mfei0 = ip4_mfib_table_lookup(ip4_mfib_get(fib_index0),
1223 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1225 lookup_trace_t *tr = vlib_add_trace (vm, node,
1227 tr->fib_index = fib_index0;
1229 tr->addr.ip4 = ip0->dst_address;
1236 ip0 = vlib_buffer_get_current (b0);
1237 mfei0 = ip6_mfib_table_lookup2(ip6_mfib_get(fib_index0),
1240 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1242 lookup_trace_t *tr = vlib_add_trace (vm, node,
1244 tr->fib_index = fib_index0;
1246 tr->addr.ip6 = ip0->dst_address;
1250 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = mfei0;
1252 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1253 n_left_to_next, bi0, next0);
1255 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1257 return from_frame->n_vectors;
1261 lookup_ip4_dst_mcast (vlib_main_t * vm,
1262 vlib_node_runtime_t * node,
1263 vlib_frame_t * from_frame)
1265 return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 1));
1268 VLIB_REGISTER_NODE (lookup_ip4_dst_mcast_node) = {
1269 .function = lookup_ip4_dst_mcast,
1270 .name = "lookup-ip4-dst-mcast",
1271 .vector_size = sizeof (u32),
1273 .format_trace = format_lookup_trace,
1274 .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
1276 [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip4-mfib-forward-rpf",
1279 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_mcast_node,
1280 lookup_ip4_dst_mcast)
1283 lookup_ip6_dst_mcast (vlib_main_t * vm,
1284 vlib_node_runtime_t * node,
1285 vlib_frame_t * from_frame)
1287 return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 0));
1290 VLIB_REGISTER_NODE (lookup_ip6_dst_mcast_node) = {
1291 .function = lookup_ip6_dst_mcast,
1292 .name = "lookup-ip6-dst-mcast",
1293 .vector_size = sizeof (u32),
1295 .format_trace = format_lookup_trace,
1296 .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
1298 [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip6-mfib-forward-rpf",
1301 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_mcast_node,
1302 lookup_ip6_dst_mcast)
1305 lookup_dpo_mem_show (void)
1307 fib_show_memory_usage("Lookup",
1308 pool_elts(lookup_dpo_pool),
1309 pool_len(lookup_dpo_pool),
1310 sizeof(lookup_dpo_t));
1313 const static dpo_vft_t lkd_vft = {
1314 .dv_lock = lookup_dpo_lock,
1315 .dv_unlock = lookup_dpo_unlock,
1316 .dv_format = format_lookup_dpo,
1318 const static dpo_vft_t lkd_vft_w_mem_show = {
1319 .dv_lock = lookup_dpo_lock,
1320 .dv_unlock = lookup_dpo_unlock,
1321 .dv_format = format_lookup_dpo,
1322 .dv_mem_show = lookup_dpo_mem_show,
1325 const static char* const lookup_src_ip4_nodes[] =
1330 const static char* const lookup_src_ip6_nodes[] =
1335 const static char* const * const lookup_src_nodes[DPO_PROTO_NUM] =
1337 [DPO_PROTO_IP4] = lookup_src_ip4_nodes,
1338 [DPO_PROTO_IP6] = lookup_src_ip6_nodes,
1339 [DPO_PROTO_MPLS] = NULL,
1342 const static char* const lookup_dst_ip4_nodes[] =
1347 const static char* const lookup_dst_ip6_nodes[] =
1352 const static char* const lookup_dst_mpls_nodes[] =
1357 const static char* const * const lookup_dst_nodes[DPO_PROTO_NUM] =
1359 [DPO_PROTO_IP4] = lookup_dst_ip4_nodes,
1360 [DPO_PROTO_IP6] = lookup_dst_ip6_nodes,
1361 [DPO_PROTO_MPLS] = lookup_dst_mpls_nodes,
1364 const static char* const lookup_dst_mcast_ip4_nodes[] =
1366 "lookup-ip4-dst-mcast",
1369 const static char* const lookup_dst_mcast_ip6_nodes[] =
1371 "lookup-ip6-dst-mcast",
1374 const static char* const * const lookup_dst_mcast_nodes[DPO_PROTO_NUM] =
1376 [DPO_PROTO_IP4] = lookup_dst_mcast_ip4_nodes,
1377 [DPO_PROTO_IP6] = lookup_dst_mcast_ip6_nodes,
1380 const static char* const lookup_dst_from_interface_ip4_nodes[] =
1382 "lookup-ip4-dst-itf",
1385 const static char* const lookup_dst_from_interface_ip6_nodes[] =
1387 "lookup-ip6-dst-itf",
1390 const static char* const lookup_dst_from_interface_mpls_nodes[] =
1392 "lookup-mpls-dst-itf",
1395 const static char* const * const lookup_dst_from_interface_nodes[DPO_PROTO_NUM] =
1397 [DPO_PROTO_IP4] = lookup_dst_from_interface_ip4_nodes,
1398 [DPO_PROTO_IP6] = lookup_dst_from_interface_ip6_nodes,
1399 [DPO_PROTO_MPLS] = lookup_dst_from_interface_mpls_nodes,
1404 lookup_dpo_module_init (void)
1406 dpo_register(DPO_LOOKUP, &lkd_vft_w_mem_show, NULL);
1409 * There are various sorts of lookup; src or dst addr v4 /v6 etc.
1410 * there isn't an object type for each (there is only the lookup_dpo_t),
1411 * but, for performance reasons, there is a data plane function, and hence
1412 * VLIB node for each. VLIB graph node construction is based on DPO types
1413 * so we create sub-types.
1415 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC] =
1416 dpo_register_new_type(&lkd_vft, lookup_src_nodes);
1417 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST] =
1418 dpo_register_new_type(&lkd_vft, lookup_dst_nodes);
1419 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST] =
1420 dpo_register_new_type(&lkd_vft, lookup_dst_mcast_nodes);
1421 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE] =
1422 dpo_register_new_type(&lkd_vft, lookup_dst_from_interface_nodes);