2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ip/ip.h>
17 #include <vnet/dpo/lookup_dpo.h>
18 #include <vnet/dpo/load_balance.h>
19 #include <vnet/mpls/mpls.h>
20 #include <vnet/fib/fib_table.h>
21 #include <vnet/fib/ip4_fib.h>
22 #include <vnet/fib/ip6_fib.h>
23 #include <vnet/fib/mpls_fib.h>
25 static const char *const lookup_input_names[] = LOOKUP_INPUTS;
28 * @brief Enumeration of the lookup subtypes
30 typedef enum lookup_sub_type_t_
34 LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE,
36 #define LOOKUP_SUB_TYPE_NUM (LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE+1)
38 #define FOR_EACH_LOOKUP_SUB_TYPE(_st) \
39 for (_st = LOOKUP_SUB_TYPE_IP4_SRC; _st < LOOKUP_SUB_TYPE_NUM; _st++)
42 * @brief pool of all MPLS Label DPOs
44 lookup_dpo_t *lookup_dpo_pool;
47 * @brief An array of registered DPO type values for the sub-types
49 static dpo_type_t lookup_dpo_sub_types[LOOKUP_SUB_TYPE_NUM];
52 lookup_dpo_alloc (void)
56 pool_get_aligned(lookup_dpo_pool, lkd, CLIB_CACHE_LINE_BYTES);
62 lookup_dpo_get_index (lookup_dpo_t *lkd)
64 return (lkd - lookup_dpo_pool);
68 lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
71 lookup_table_t table_config,
77 lkd = lookup_dpo_alloc();
78 lkd->lkd_fib_index = fib_index;
79 lkd->lkd_proto = proto;
80 lkd->lkd_input = input;
81 lkd->lkd_table = table_config;
84 * use the input type to select the lookup sub-type
90 case LOOKUP_INPUT_SRC_ADDR:
91 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC];
93 case LOOKUP_INPUT_DST_ADDR:
96 case LOOKUP_TABLE_FROM_INPUT_INTERFACE:
97 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE];
99 case LOOKUP_TABLE_FROM_CONFIG:
100 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST];
111 dpo_set(dpo, type, proto, lookup_dpo_get_index(lkd));
116 lookup_dpo_add_or_lock_w_fib_index (fib_node_index_t fib_index,
118 lookup_input_t input,
119 lookup_table_t table_config,
122 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
124 fib_table_lock(fib_index, dpo_proto_to_fib(proto));
126 lookup_dpo_add_or_lock_i(fib_index, proto, input, table_config, dpo);
130 lookup_dpo_add_or_lock_w_table_id (u32 table_id,
132 lookup_input_t input,
133 lookup_table_t table_config,
136 fib_node_index_t fib_index = FIB_NODE_INDEX_INVALID;
138 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
141 fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
145 ASSERT(FIB_NODE_INDEX_INVALID != fib_index);
146 lookup_dpo_add_or_lock_i(fib_index, proto, input, table_config, dpo);
150 format_lookup_dpo (u8 *s, va_list *args)
152 index_t index = va_arg (*args, index_t);
155 lkd = lookup_dpo_get(index);
157 if (LOOKUP_TABLE_FROM_INPUT_INTERFACE == lkd->lkd_table)
159 s = format(s, "%s lookup in interface's %U table",
160 lookup_input_names[lkd->lkd_input],
161 format_dpo_proto, lkd->lkd_proto);
165 s = format(s, "%s lookup in %U",
166 lookup_input_names[lkd->lkd_input],
167 format_fib_table_name, lkd->lkd_fib_index,
168 dpo_proto_to_fib(lkd->lkd_proto));
174 lookup_dpo_lock (dpo_id_t *dpo)
178 lkd = lookup_dpo_get(dpo->dpoi_index);
184 lookup_dpo_unlock (dpo_id_t *dpo)
188 lkd = lookup_dpo_get(dpo->dpoi_index);
192 if (0 == lkd->lkd_locks)
194 if (LOOKUP_TABLE_FROM_CONFIG == lkd->lkd_table)
196 fib_table_unlock(lkd->lkd_fib_index,
197 dpo_proto_to_fib(lkd->lkd_proto));
199 pool_put(lookup_dpo_pool, lkd);
204 ip4_src_fib_lookup_one (u32 src_fib_index0,
205 const ip4_address_t * addr0,
206 u32 * src_adj_index0)
208 ip4_fib_mtrie_leaf_t leaf0;
209 ip4_fib_mtrie_t * mtrie0;
211 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
213 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0);
214 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
215 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
217 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
221 ip4_src_fib_lookup_two (u32 src_fib_index0,
223 const ip4_address_t * addr0,
224 const ip4_address_t * addr1,
225 u32 * src_adj_index0,
226 u32 * src_adj_index1)
228 ip4_fib_mtrie_leaf_t leaf0, leaf1;
229 ip4_fib_mtrie_t * mtrie0, * mtrie1;
231 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
232 mtrie1 = &ip4_fib_get (src_fib_index1)->mtrie;
234 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0);
235 leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, addr1);
237 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
238 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 2);
240 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
241 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 3);
243 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
244 src_adj_index1[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
248 * @brief Lookup trace data
250 typedef struct lookup_trace_t_
254 mpls_unicast_header_t hdr;
256 fib_node_index_t fib_index;
262 lookup_dpo_ip4_inline (vlib_main_t * vm,
263 vlib_node_runtime_t * node,
264 vlib_frame_t * from_frame,
266 int table_from_interface)
268 u32 n_left_from, next_index, * from, * to_next;
269 u32 cpu_index = os_get_cpu_number();
270 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
272 from = vlib_frame_vector_args (from_frame);
273 n_left_from = from_frame->n_vectors;
275 next_index = node->cached_next_index;
277 while (n_left_from > 0)
281 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
283 while (n_left_from >= 4 && n_left_to_next > 2)
285 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
286 flow_hash_config_t flow_hash_config0;
287 const ip4_address_t *input_addr0;
288 const load_balance_t *lb0;
289 const lookup_dpo_t * lkd0;
290 const ip4_header_t * ip0;
291 const dpo_id_t *dpo0;
293 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
294 flow_hash_config_t flow_hash_config1;
295 const ip4_address_t *input_addr1;
296 const load_balance_t *lb1;
297 const lookup_dpo_t * lkd1;
298 const ip4_header_t * ip1;
299 const dpo_id_t *dpo1;
302 /* Prefetch next iteration. */
304 vlib_buffer_t * p2, * p3;
306 p2 = vlib_get_buffer (vm, from[2]);
307 p3 = vlib_get_buffer (vm, from[3]);
309 vlib_prefetch_buffer_header (p2, LOAD);
310 vlib_prefetch_buffer_header (p3, LOAD);
312 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
313 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
325 b0 = vlib_get_buffer (vm, bi0);
326 ip0 = vlib_buffer_get_current (b0);
327 b1 = vlib_get_buffer (vm, bi1);
328 ip1 = vlib_buffer_get_current (b1);
330 /* dst lookup was done by ip4 lookup */
331 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
332 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
333 lkd0 = lookup_dpo_get(lkdi0);
334 lkd1 = lookup_dpo_get(lkdi1);
337 * choose between a lookup using the fib index in the DPO
338 * or getting the FIB index from the interface.
340 if (table_from_interface)
343 ip4_fib_table_get_index_for_sw_if_index(
344 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
346 ip4_fib_table_get_index_for_sw_if_index(
347 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
351 fib_index0 = lkd0->lkd_fib_index;
352 fib_index1 = lkd1->lkd_fib_index;
356 * choose between a source or destination address lookup in the table
360 input_addr0 = &ip0->src_address;
361 input_addr1 = &ip1->src_address;
365 input_addr0 = &ip0->dst_address;
366 input_addr1 = &ip1->dst_address;
370 ip4_src_fib_lookup_two (fib_index0, fib_index1,
371 input_addr0, input_addr1,
373 lb0 = load_balance_get(lbi0);
374 lb1 = load_balance_get(lbi1);
376 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
377 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
379 /* Use flow hash to compute multipath adjacency. */
380 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
381 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
383 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
385 flow_hash_config0 = lb0->lb_hash_config;
386 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
387 ip4_compute_flow_hash (ip0, flow_hash_config0);
390 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
392 flow_hash_config1 = lb1->lb_hash_config;
393 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
394 ip4_compute_flow_hash (ip1, flow_hash_config1);
397 dpo0 = load_balance_get_bucket_i(lb0,
399 (lb0->lb_n_buckets_minus_1)));
400 dpo1 = load_balance_get_bucket_i(lb1,
402 (lb1->lb_n_buckets_minus_1)));
404 next0 = dpo0->dpoi_next_node;
405 next1 = dpo1->dpoi_next_node;
406 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
407 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
409 vlib_increment_combined_counter
410 (cm, cpu_index, lbi0, 1,
411 vlib_buffer_length_in_chain (vm, b0));
412 vlib_increment_combined_counter
413 (cm, cpu_index, lbi1, 1,
414 vlib_buffer_length_in_chain (vm, b1));
416 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
418 lookup_trace_t *tr = vlib_add_trace (vm, node,
420 tr->fib_index = fib_index0;
422 tr->addr.ip4 = *input_addr0;
424 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
426 lookup_trace_t *tr = vlib_add_trace (vm, node,
428 tr->fib_index = fib_index1;
430 tr->addr.ip4 = *input_addr1;
433 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
434 to_next, n_left_to_next,
435 bi0, bi1, next0, next1);
438 while (n_left_from > 0 && n_left_to_next > 0)
440 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
441 flow_hash_config_t flow_hash_config0;
442 const ip4_address_t *input_addr;
443 const load_balance_t *lb0;
444 const lookup_dpo_t * lkd0;
445 const ip4_header_t * ip0;
446 const dpo_id_t *dpo0;
456 b0 = vlib_get_buffer (vm, bi0);
457 ip0 = vlib_buffer_get_current (b0);
459 /* dst lookup was done by ip4 lookup */
460 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
461 lkd0 = lookup_dpo_get(lkdi0);
464 * choose between a lookup using the fib index in the DPO
465 * or getting the FIB index from the interface.
467 if (table_from_interface)
470 ip4_fib_table_get_index_for_sw_if_index(
471 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
475 fib_index0 = lkd0->lkd_fib_index;
479 * choose between a source or destination address lookup in the table
483 input_addr = &ip0->src_address;
487 input_addr = &ip0->dst_address;
491 ip4_src_fib_lookup_one (fib_index0, input_addr, &lbi0);
492 lb0 = load_balance_get(lbi0);
494 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
496 /* Use flow hash to compute multipath adjacency. */
497 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
499 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
501 flow_hash_config0 = lb0->lb_hash_config;
502 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
503 ip4_compute_flow_hash (ip0, flow_hash_config0);
506 dpo0 = load_balance_get_bucket_i(lb0,
508 (lb0->lb_n_buckets_minus_1)));
510 next0 = dpo0->dpoi_next_node;
511 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
513 vlib_increment_combined_counter
514 (cm, cpu_index, lbi0, 1,
515 vlib_buffer_length_in_chain (vm, b0));
517 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
519 lookup_trace_t *tr = vlib_add_trace (vm, node,
521 tr->fib_index = fib_index0;
523 tr->addr.ip4 = *input_addr;
526 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
527 n_left_to_next, bi0, next0);
529 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
531 return from_frame->n_vectors;
535 format_lookup_trace (u8 * s, va_list * args)
537 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
538 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
539 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
540 uword indent = format_get_indent (s);
541 s = format (s, "%U fib-index:%d addr:%U load-balance:%d",
542 format_white_space, indent,
544 format_ip46_address, &t->addr, IP46_TYPE_ANY,
550 lookup_ip4_dst (vlib_main_t * vm,
551 vlib_node_runtime_t * node,
552 vlib_frame_t * from_frame)
554 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 0));
557 VLIB_REGISTER_NODE (lookup_ip4_dst_node) = {
558 .function = lookup_ip4_dst,
559 .name = "lookup-ip4-dst",
560 .vector_size = sizeof (u32),
561 .sibling_of = "ip4-lookup",
562 .format_trace = format_lookup_trace,
564 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_node, lookup_ip4_dst)
567 lookup_ip4_dst_itf (vlib_main_t * vm,
568 vlib_node_runtime_t * node,
569 vlib_frame_t * from_frame)
571 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 1));
574 VLIB_REGISTER_NODE (lookup_ip4_dst_itf_node) = {
575 .function = lookup_ip4_dst_itf,
576 .name = "lookup-ip4-dst-itf",
577 .vector_size = sizeof (u32),
578 .sibling_of = "ip4-lookup",
579 .format_trace = format_lookup_trace,
581 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_itf_node, lookup_ip4_dst_itf)
584 lookup_ip4_src (vlib_main_t * vm,
585 vlib_node_runtime_t * node,
586 vlib_frame_t * from_frame)
588 return (lookup_dpo_ip4_inline(vm, node, from_frame, 1, 0));
591 VLIB_REGISTER_NODE (lookup_ip4_src_node) = {
592 .function = lookup_ip4_src,
593 .name = "lookup-ip4-src",
594 .vector_size = sizeof (u32),
595 .format_trace = format_lookup_trace,
596 .sibling_of = "ip4-lookup",
598 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_src_node, lookup_ip4_src)
601 lookup_dpo_ip6_inline (vlib_main_t * vm,
602 vlib_node_runtime_t * node,
603 vlib_frame_t * from_frame,
605 int table_from_interface)
607 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
608 u32 n_left_from, next_index, * from, * to_next;
609 u32 cpu_index = os_get_cpu_number();
611 from = vlib_frame_vector_args (from_frame);
612 n_left_from = from_frame->n_vectors;
614 next_index = node->cached_next_index;
616 while (n_left_from > 0)
620 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
622 while (n_left_from >= 4 && n_left_to_next > 2)
624 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
625 flow_hash_config_t flow_hash_config0;
626 const ip6_address_t *input_addr0;
627 const load_balance_t *lb0;
628 const lookup_dpo_t * lkd0;
629 const ip6_header_t * ip0;
630 const dpo_id_t *dpo0;
632 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
633 flow_hash_config_t flow_hash_config1;
634 const ip6_address_t *input_addr1;
635 const load_balance_t *lb1;
636 const lookup_dpo_t * lkd1;
637 const ip6_header_t * ip1;
638 const dpo_id_t *dpo1;
641 /* Prefetch next iteration. */
643 vlib_buffer_t * p2, * p3;
645 p2 = vlib_get_buffer (vm, from[2]);
646 p3 = vlib_get_buffer (vm, from[3]);
648 vlib_prefetch_buffer_header (p2, LOAD);
649 vlib_prefetch_buffer_header (p3, LOAD);
651 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
652 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
664 b0 = vlib_get_buffer (vm, bi0);
665 ip0 = vlib_buffer_get_current (b0);
666 b1 = vlib_get_buffer (vm, bi1);
667 ip1 = vlib_buffer_get_current (b1);
669 /* dst lookup was done by ip6 lookup */
670 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
671 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
672 lkd0 = lookup_dpo_get(lkdi0);
673 lkd1 = lookup_dpo_get(lkdi1);
676 * choose between a lookup using the fib index in the DPO
677 * or getting the FIB index from the interface.
679 if (table_from_interface)
682 ip6_fib_table_get_index_for_sw_if_index(
683 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
685 ip6_fib_table_get_index_for_sw_if_index(
686 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
690 fib_index0 = lkd0->lkd_fib_index;
691 fib_index1 = lkd1->lkd_fib_index;
695 * choose between a source or destination address lookup in the table
699 input_addr0 = &ip0->src_address;
700 input_addr1 = &ip1->src_address;
704 input_addr0 = &ip0->dst_address;
705 input_addr1 = &ip1->dst_address;
709 lbi0 = ip6_fib_table_fwding_lookup(&ip6_main,
712 lbi1 = ip6_fib_table_fwding_lookup(&ip6_main,
715 lb0 = load_balance_get(lbi0);
716 lb1 = load_balance_get(lbi1);
718 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
719 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
721 /* Use flow hash to compute multipath adjacency. */
722 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
723 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
725 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
727 flow_hash_config0 = lb0->lb_hash_config;
728 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
729 ip6_compute_flow_hash (ip0, flow_hash_config0);
732 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
734 flow_hash_config1 = lb1->lb_hash_config;
735 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
736 ip6_compute_flow_hash (ip1, flow_hash_config1);
739 dpo0 = load_balance_get_bucket_i(lb0,
741 (lb0->lb_n_buckets_minus_1)));
742 dpo1 = load_balance_get_bucket_i(lb1,
744 (lb1->lb_n_buckets_minus_1)));
746 next0 = dpo0->dpoi_next_node;
747 next1 = dpo1->dpoi_next_node;
748 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
749 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
751 vlib_increment_combined_counter
752 (cm, cpu_index, lbi0, 1,
753 vlib_buffer_length_in_chain (vm, b0));
754 vlib_increment_combined_counter
755 (cm, cpu_index, lbi1, 1,
756 vlib_buffer_length_in_chain (vm, b1));
758 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
760 lookup_trace_t *tr = vlib_add_trace (vm, node,
762 tr->fib_index = fib_index0;
764 tr->addr.ip6 = *input_addr0;
766 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
768 lookup_trace_t *tr = vlib_add_trace (vm, node,
770 tr->fib_index = fib_index1;
772 tr->addr.ip6 = *input_addr1;
774 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
775 n_left_to_next, bi0, bi1,
778 while (n_left_from > 0 && n_left_to_next > 0)
780 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
781 flow_hash_config_t flow_hash_config0;
782 const ip6_address_t *input_addr0;
783 const load_balance_t *lb0;
784 const lookup_dpo_t * lkd0;
785 const ip6_header_t * ip0;
786 const dpo_id_t *dpo0;
796 b0 = vlib_get_buffer (vm, bi0);
797 ip0 = vlib_buffer_get_current (b0);
799 /* dst lookup was done by ip6 lookup */
800 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
801 lkd0 = lookup_dpo_get(lkdi0);
804 * choose between a lookup using the fib index in the DPO
805 * or getting the FIB index from the interface.
807 if (table_from_interface)
810 ip6_fib_table_get_index_for_sw_if_index(
811 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
815 fib_index0 = lkd0->lkd_fib_index;
819 * choose between a source or destination address lookup in the table
823 input_addr0 = &ip0->src_address;
827 input_addr0 = &ip0->dst_address;
831 lbi0 = ip6_fib_table_fwding_lookup(&ip6_main,
834 lb0 = load_balance_get(lbi0);
836 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
838 /* Use flow hash to compute multipath adjacency. */
839 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
841 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
843 flow_hash_config0 = lb0->lb_hash_config;
844 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
845 ip6_compute_flow_hash (ip0, flow_hash_config0);
848 dpo0 = load_balance_get_bucket_i(lb0,
850 (lb0->lb_n_buckets_minus_1)));
852 next0 = dpo0->dpoi_next_node;
853 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
855 vlib_increment_combined_counter
856 (cm, cpu_index, lbi0, 1,
857 vlib_buffer_length_in_chain (vm, b0));
859 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
861 lookup_trace_t *tr = vlib_add_trace (vm, node,
863 tr->fib_index = fib_index0;
865 tr->addr.ip6 = *input_addr0;
867 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
868 n_left_to_next, bi0, next0);
870 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
872 return from_frame->n_vectors;
876 lookup_ip6_dst (vlib_main_t * vm,
877 vlib_node_runtime_t * node,
878 vlib_frame_t * from_frame)
880 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 0));
883 VLIB_REGISTER_NODE (lookup_ip6_dst_node) = {
884 .function = lookup_ip6_dst,
885 .name = "lookup-ip6-dst",
886 .vector_size = sizeof (u32),
887 .format_trace = format_lookup_trace,
888 .sibling_of = "ip6-lookup",
890 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_node, lookup_ip6_dst)
893 lookup_ip6_dst_itf (vlib_main_t * vm,
894 vlib_node_runtime_t * node,
895 vlib_frame_t * from_frame)
897 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 1));
900 VLIB_REGISTER_NODE (lookup_ip6_dst_itf_node) = {
901 .function = lookup_ip6_dst_itf,
902 .name = "lookup-ip6-dst-itf",
903 .vector_size = sizeof (u32),
904 .format_trace = format_lookup_trace,
905 .sibling_of = "ip6-lookup",
907 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_itf_node, lookup_ip6_dst_itf)
910 lookup_ip6_src (vlib_main_t * vm,
911 vlib_node_runtime_t * node,
912 vlib_frame_t * from_frame)
914 return (lookup_dpo_ip6_inline(vm, node, from_frame, 1, 0));
917 VLIB_REGISTER_NODE (lookup_ip6_src_node) = {
918 .function = lookup_ip6_src,
919 .name = "lookup-ip6-src",
920 .vector_size = sizeof (u32),
921 .format_trace = format_lookup_trace,
922 .sibling_of = "ip6-lookup",
924 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_src_node, lookup_ip6_src)
927 lookup_dpo_mpls_inline (vlib_main_t * vm,
928 vlib_node_runtime_t * node,
929 vlib_frame_t * from_frame,
930 int table_from_interface)
932 u32 n_left_from, next_index, * from, * to_next;
933 u32 cpu_index = os_get_cpu_number();
934 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
936 from = vlib_frame_vector_args (from_frame);
937 n_left_from = from_frame->n_vectors;
939 next_index = node->cached_next_index;
941 while (n_left_from > 0)
945 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
947 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
950 while (n_left_from > 0 && n_left_to_next > 0)
952 u32 bi0, lkdi0, lbi0, fib_index0, next0;
953 const mpls_unicast_header_t * hdr0;
954 const load_balance_t *lb0;
955 const lookup_dpo_t * lkd0;
956 const dpo_id_t *dpo0;
966 b0 = vlib_get_buffer (vm, bi0);
967 hdr0 = vlib_buffer_get_current (b0);
969 /* dst lookup was done by mpls lookup */
970 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
971 lkd0 = lookup_dpo_get(lkdi0);
974 * choose between a lookup using the fib index in the DPO
975 * or getting the FIB index from the interface.
977 if (table_from_interface)
980 mpls_fib_table_get_index_for_sw_if_index(
981 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
985 fib_index0 = lkd0->lkd_fib_index;
989 lbi0 = mpls_fib_table_forwarding_lookup (fib_index0, hdr0);
990 lb0 = load_balance_get(lbi0);
991 dpo0 = load_balance_get_bucket_i(lb0, 0);
993 next0 = dpo0->dpoi_next_node;
994 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
996 vlib_increment_combined_counter
997 (cm, cpu_index, lbi0, 1,
998 vlib_buffer_length_in_chain (vm, b0));
1000 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1002 lookup_trace_t *tr = vlib_add_trace (vm, node,
1004 tr->fib_index = fib_index0;
1009 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1010 n_left_to_next, bi0, next0);
1012 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1014 return from_frame->n_vectors;
1018 format_lookup_mpls_trace (u8 * s, va_list * args)
1020 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1021 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1022 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
1023 uword indent = format_get_indent (s);
1024 mpls_unicast_header_t hdr;
1026 hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
1028 s = format (s, "%U fib-index:%d hdr:%U load-balance:%d",
1029 format_white_space, indent,
1031 format_mpls_header, hdr,
1037 lookup_mpls_dst (vlib_main_t * vm,
1038 vlib_node_runtime_t * node,
1039 vlib_frame_t * from_frame)
1041 return (lookup_dpo_mpls_inline(vm, node, from_frame, 0));
1044 VLIB_REGISTER_NODE (lookup_mpls_dst_node) = {
1045 .function = lookup_mpls_dst,
1046 .name = "lookup-mpls-dst",
1047 .vector_size = sizeof (u32),
1048 .sibling_of = "mpls-lookup",
1049 .format_trace = format_lookup_mpls_trace,
1052 VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_node, lookup_mpls_dst)
1055 lookup_mpls_dst_itf (vlib_main_t * vm,
1056 vlib_node_runtime_t * node,
1057 vlib_frame_t * from_frame)
1059 return (lookup_dpo_mpls_inline(vm, node, from_frame, 1));
1062 VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = {
1063 .function = lookup_mpls_dst_itf,
1064 .name = "lookup-mpls-dst-itf",
1065 .vector_size = sizeof (u32),
1066 .sibling_of = "mpls-lookup",
1067 .format_trace = format_lookup_mpls_trace,
1070 VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_itf_node, lookup_mpls_dst_itf)
1073 lookup_dpo_mem_show (void)
1075 fib_show_memory_usage("Lookup",
1076 pool_elts(lookup_dpo_pool),
1077 pool_len(lookup_dpo_pool),
1078 sizeof(lookup_dpo_t));
1081 const static dpo_vft_t lkd_vft = {
1082 .dv_lock = lookup_dpo_lock,
1083 .dv_unlock = lookup_dpo_unlock,
1084 .dv_format = format_lookup_dpo,
1086 const static dpo_vft_t lkd_vft_w_mem_show = {
1087 .dv_lock = lookup_dpo_lock,
1088 .dv_unlock = lookup_dpo_unlock,
1089 .dv_format = format_lookup_dpo,
1090 .dv_mem_show = lookup_dpo_mem_show,
1093 const static char* const lookup_src_ip4_nodes[] =
1098 const static char* const lookup_src_ip6_nodes[] =
1103 const static char* const * const lookup_src_nodes[DPO_PROTO_NUM] =
1105 [DPO_PROTO_IP4] = lookup_src_ip4_nodes,
1106 [DPO_PROTO_IP6] = lookup_src_ip6_nodes,
1107 [DPO_PROTO_MPLS] = NULL,
1110 const static char* const lookup_dst_ip4_nodes[] =
1115 const static char* const lookup_dst_ip6_nodes[] =
1120 const static char* const lookup_dst_mpls_nodes[] =
1125 const static char* const * const lookup_dst_nodes[DPO_PROTO_NUM] =
1127 [DPO_PROTO_IP4] = lookup_dst_ip4_nodes,
1128 [DPO_PROTO_IP6] = lookup_dst_ip6_nodes,
1129 [DPO_PROTO_MPLS] = lookup_dst_mpls_nodes,
1132 const static char* const lookup_dst_from_interface_ip4_nodes[] =
1134 "lookup-ip4-dst-itf",
1137 const static char* const lookup_dst_from_interface_ip6_nodes[] =
1139 "lookup-ip6-dst-itf",
1142 const static char* const lookup_dst_from_interface_mpls_nodes[] =
1144 "lookup-mpls-dst-itf",
1147 const static char* const * const lookup_dst_from_interface_nodes[DPO_PROTO_NUM] =
1149 [DPO_PROTO_IP4] = lookup_dst_from_interface_ip4_nodes,
1150 [DPO_PROTO_IP6] = lookup_dst_from_interface_ip6_nodes,
1151 [DPO_PROTO_MPLS] = lookup_dst_from_interface_mpls_nodes,
1156 lookup_dpo_module_init (void)
1158 dpo_register(DPO_LOOKUP, &lkd_vft_w_mem_show, NULL);
1161 * There are various sorts of lookup; src or dst addr v4 /v6 etc.
1162 * there isn't an object type for each (there is only the lookup_dpo_t),
1163 * but, for performance reasons, there is a data plane function, and hence
1164 * VLIB node for each. VLIB graph node construction is based on DPO types
1165 * so we create sub-types.
1167 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC] =
1168 dpo_register_new_type(&lkd_vft, lookup_src_nodes);
1169 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST] =
1170 dpo_register_new_type(&lkd_vft, lookup_dst_nodes);
1171 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE] =
1172 dpo_register_new_type(&lkd_vft, lookup_dst_from_interface_nodes);