2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ip/ip.h>
17 #include <vnet/dpo/lookup_dpo.h>
18 #include <vnet/dpo/load_balance.h>
19 #include <vnet/mpls/mpls.h>
20 #include <vnet/fib/fib_table.h>
21 #include <vnet/fib/ip4_fib.h>
22 #include <vnet/fib/ip6_fib.h>
23 #include <vnet/fib/mpls_fib.h>
25 static const char *const lookup_input_names[] = LOOKUP_INPUTS;
28 * @brief Enumeration of the lookup subtypes
30 typedef enum lookup_sub_type_t_
34 LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE,
36 #define LOOKUP_SUB_TYPE_NUM (LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE+1)
38 #define FOR_EACH_LOOKUP_SUB_TYPE(_st) \
39 for (_st = LOOKUP_SUB_TYPE_IP4_SRC; _st < LOOKUP_SUB_TYPE_NUM; _st++)
42 * @brief pool of all MPLS Label DPOs
44 lookup_dpo_t *lookup_dpo_pool;
47 * @brief An array of registered DPO type values for the sub-types
49 static dpo_type_t lookup_dpo_sub_types[LOOKUP_SUB_TYPE_NUM];
52 lookup_dpo_alloc (void)
56 pool_get_aligned(lookup_dpo_pool, lkd, CLIB_CACHE_LINE_BYTES);
62 lookup_dpo_get_index (lookup_dpo_t *lkd)
64 return (lkd - lookup_dpo_pool);
68 lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
71 lookup_table_t table_config,
77 lkd = lookup_dpo_alloc();
78 lkd->lkd_fib_index = fib_index;
79 lkd->lkd_proto = proto;
80 lkd->lkd_input = input;
81 lkd->lkd_table = table_config;
84 * use the input type to select the lookup sub-type
90 case LOOKUP_INPUT_SRC_ADDR:
91 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC];
93 case LOOKUP_INPUT_DST_ADDR:
96 case LOOKUP_TABLE_FROM_INPUT_INTERFACE:
97 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE];
99 case LOOKUP_TABLE_FROM_CONFIG:
100 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST];
111 dpo_set(dpo, type, proto, lookup_dpo_get_index(lkd));
116 lookup_dpo_add_or_lock_w_fib_index (fib_node_index_t fib_index,
118 lookup_input_t input,
119 lookup_table_t table_config,
122 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
124 fib_table_lock(fib_index, dpo_proto_to_fib(proto));
126 lookup_dpo_add_or_lock_i(fib_index, proto, input, table_config, dpo);
130 lookup_dpo_add_or_lock_w_table_id (u32 table_id,
132 lookup_input_t input,
133 lookup_table_t table_config,
136 fib_node_index_t fib_index = FIB_NODE_INDEX_INVALID;
138 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
141 fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
145 ASSERT(FIB_NODE_INDEX_INVALID != fib_index);
146 lookup_dpo_add_or_lock_i(fib_index, proto, input, table_config, dpo);
150 format_lookup_dpo (u8 *s, va_list *args)
152 index_t index = va_arg (*args, index_t);
155 lkd = lookup_dpo_get(index);
157 if (LOOKUP_TABLE_FROM_INPUT_INTERFACE == lkd->lkd_table)
159 s = format(s, "%s lookup in interface's %U table",
160 lookup_input_names[lkd->lkd_input],
161 format_dpo_proto, lkd->lkd_proto);
165 s = format(s, "%s lookup in %U",
166 lookup_input_names[lkd->lkd_input],
167 format_fib_table_name, lkd->lkd_fib_index,
168 dpo_proto_to_fib(lkd->lkd_proto));
174 lookup_dpo_lock (dpo_id_t *dpo)
178 lkd = lookup_dpo_get(dpo->dpoi_index);
184 lookup_dpo_unlock (dpo_id_t *dpo)
188 lkd = lookup_dpo_get(dpo->dpoi_index);
192 if (0 == lkd->lkd_locks)
194 if (LOOKUP_TABLE_FROM_CONFIG == lkd->lkd_table)
196 fib_table_unlock(lkd->lkd_fib_index,
197 dpo_proto_to_fib(lkd->lkd_proto));
199 pool_put(lookup_dpo_pool, lkd);
204 ip4_src_fib_lookup_one (u32 src_fib_index0,
205 const ip4_address_t * addr0,
206 u32 * src_adj_index0)
208 ip4_fib_mtrie_leaf_t leaf0, leaf1;
209 ip4_fib_mtrie_t * mtrie0;
211 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
213 leaf0 = leaf1 = IP4_FIB_MTRIE_LEAF_ROOT;
214 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 0);
215 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 1);
216 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
217 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
219 /* Handle default route. */
220 leaf0 = (leaf0 == IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie0->default_leaf : leaf0);
221 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
225 ip4_src_fib_lookup_two (u32 src_fib_index0,
227 const ip4_address_t * addr0,
228 const ip4_address_t * addr1,
229 u32 * src_adj_index0,
230 u32 * src_adj_index1)
232 ip4_fib_mtrie_leaf_t leaf0, leaf1;
233 ip4_fib_mtrie_t * mtrie0, * mtrie1;
235 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
236 mtrie1 = &ip4_fib_get (src_fib_index1)->mtrie;
238 leaf0 = leaf1 = IP4_FIB_MTRIE_LEAF_ROOT;
240 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 0);
241 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 0);
243 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 1);
244 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 1);
246 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
247 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 2);
249 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
250 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 3);
252 /* Handle default route. */
253 leaf0 = (leaf0 == IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie0->default_leaf : leaf0);
254 leaf1 = (leaf1 == IP4_FIB_MTRIE_LEAF_EMPTY ? mtrie1->default_leaf : leaf1);
255 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
256 src_adj_index1[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
260 * @brief Lookup trace data
262 typedef struct lookup_trace_t_
266 mpls_unicast_header_t hdr;
268 fib_node_index_t fib_index;
274 lookup_dpo_ip4_inline (vlib_main_t * vm,
275 vlib_node_runtime_t * node,
276 vlib_frame_t * from_frame,
278 int table_from_interface)
280 u32 n_left_from, next_index, * from, * to_next;
281 u32 cpu_index = os_get_cpu_number();
282 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
284 from = vlib_frame_vector_args (from_frame);
285 n_left_from = from_frame->n_vectors;
287 next_index = node->cached_next_index;
289 while (n_left_from > 0)
293 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
295 while (n_left_from >= 4 && n_left_to_next > 2)
297 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
298 flow_hash_config_t flow_hash_config0;
299 const ip4_address_t *input_addr0;
300 const load_balance_t *lb0;
301 const lookup_dpo_t * lkd0;
302 const ip4_header_t * ip0;
303 const dpo_id_t *dpo0;
305 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
306 flow_hash_config_t flow_hash_config1;
307 const ip4_address_t *input_addr1;
308 const load_balance_t *lb1;
309 const lookup_dpo_t * lkd1;
310 const ip4_header_t * ip1;
311 const dpo_id_t *dpo1;
314 /* Prefetch next iteration. */
316 vlib_buffer_t * p2, * p3;
318 p2 = vlib_get_buffer (vm, from[2]);
319 p3 = vlib_get_buffer (vm, from[3]);
321 vlib_prefetch_buffer_header (p2, LOAD);
322 vlib_prefetch_buffer_header (p3, LOAD);
324 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
325 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
337 b0 = vlib_get_buffer (vm, bi0);
338 ip0 = vlib_buffer_get_current (b0);
339 b1 = vlib_get_buffer (vm, bi1);
340 ip1 = vlib_buffer_get_current (b1);
342 /* dst lookup was done by ip4 lookup */
343 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
344 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
345 lkd0 = lookup_dpo_get(lkdi0);
346 lkd1 = lookup_dpo_get(lkdi1);
349 * choose between a lookup using the fib index in the DPO
350 * or getting the FIB index from the interface.
352 if (table_from_interface)
355 ip4_fib_table_get_index_for_sw_if_index(
356 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
358 ip4_fib_table_get_index_for_sw_if_index(
359 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
363 fib_index0 = lkd0->lkd_fib_index;
364 fib_index1 = lkd1->lkd_fib_index;
368 * choose between a source or destination address lookup in the table
372 input_addr0 = &ip0->src_address;
373 input_addr1 = &ip1->src_address;
377 input_addr0 = &ip0->dst_address;
378 input_addr1 = &ip1->dst_address;
382 ip4_src_fib_lookup_two (fib_index0, fib_index1,
383 input_addr0, input_addr1,
385 lb0 = load_balance_get(lbi0);
386 lb1 = load_balance_get(lbi1);
388 /* Use flow hash to compute multipath adjacency. */
389 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
390 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
392 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
394 flow_hash_config0 = lb0->lb_hash_config;
395 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
396 ip4_compute_flow_hash (ip0, flow_hash_config0);
399 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
401 flow_hash_config1 = lb1->lb_hash_config;
402 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
403 ip4_compute_flow_hash (ip1, flow_hash_config1);
406 dpo0 = load_balance_get_bucket_i(lb0,
408 (lb0->lb_n_buckets_minus_1)));
409 dpo1 = load_balance_get_bucket_i(lb1,
411 (lb1->lb_n_buckets_minus_1)));
413 next0 = dpo0->dpoi_next_node;
414 next1 = dpo1->dpoi_next_node;
415 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
416 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
418 vlib_increment_combined_counter
419 (cm, cpu_index, lbi0, 1,
420 vlib_buffer_length_in_chain (vm, b0));
421 vlib_increment_combined_counter
422 (cm, cpu_index, lbi1, 1,
423 vlib_buffer_length_in_chain (vm, b1));
425 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
427 lookup_trace_t *tr = vlib_add_trace (vm, node,
429 tr->fib_index = fib_index0;
431 tr->addr.ip4 = *input_addr0;
433 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
435 lookup_trace_t *tr = vlib_add_trace (vm, node,
437 tr->fib_index = fib_index1;
439 tr->addr.ip4 = *input_addr1;
442 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
443 to_next, n_left_to_next,
444 bi0, bi1, next0, next1);
447 while (n_left_from > 0 && n_left_to_next > 0)
449 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
450 flow_hash_config_t flow_hash_config0;
451 const ip4_address_t *input_addr;
452 const load_balance_t *lb0;
453 const lookup_dpo_t * lkd0;
454 const ip4_header_t * ip0;
455 const dpo_id_t *dpo0;
465 b0 = vlib_get_buffer (vm, bi0);
466 ip0 = vlib_buffer_get_current (b0);
468 /* dst lookup was done by ip4 lookup */
469 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
470 lkd0 = lookup_dpo_get(lkdi0);
473 * choose between a lookup using the fib index in the DPO
474 * or getting the FIB index from the interface.
476 if (table_from_interface)
479 ip4_fib_table_get_index_for_sw_if_index(
480 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
484 fib_index0 = lkd0->lkd_fib_index;
488 * choose between a source or destination address lookup in the table
492 input_addr = &ip0->src_address;
496 input_addr = &ip0->dst_address;
500 ip4_src_fib_lookup_one (fib_index0, input_addr, &lbi0);
501 lb0 = load_balance_get(lbi0);
503 /* Use flow hash to compute multipath adjacency. */
504 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
506 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
508 flow_hash_config0 = lb0->lb_hash_config;
509 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
510 ip4_compute_flow_hash (ip0, flow_hash_config0);
513 dpo0 = load_balance_get_bucket_i(lb0,
515 (lb0->lb_n_buckets_minus_1)));
517 next0 = dpo0->dpoi_next_node;
518 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
520 vlib_increment_combined_counter
521 (cm, cpu_index, lbi0, 1,
522 vlib_buffer_length_in_chain (vm, b0));
524 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
526 lookup_trace_t *tr = vlib_add_trace (vm, node,
528 tr->fib_index = fib_index0;
530 tr->addr.ip4 = *input_addr;
533 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
534 n_left_to_next, bi0, next0);
536 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
538 return from_frame->n_vectors;
542 format_lookup_trace (u8 * s, va_list * args)
544 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
545 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
546 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
547 uword indent = format_get_indent (s);
548 s = format (s, "%U fib-index:%d addr:%U load-balance:%d",
549 format_white_space, indent,
551 format_ip46_address, &t->addr, IP46_TYPE_ANY,
557 lookup_ip4_dst (vlib_main_t * vm,
558 vlib_node_runtime_t * node,
559 vlib_frame_t * from_frame)
561 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 0));
564 VLIB_REGISTER_NODE (lookup_ip4_dst_node) = {
565 .function = lookup_ip4_dst,
566 .name = "lookup-ip4-dst",
567 .vector_size = sizeof (u32),
568 .sibling_of = "ip4-lookup",
569 .format_trace = format_lookup_trace,
571 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_node, lookup_ip4_dst)
574 lookup_ip4_dst_itf (vlib_main_t * vm,
575 vlib_node_runtime_t * node,
576 vlib_frame_t * from_frame)
578 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 1));
581 VLIB_REGISTER_NODE (lookup_ip4_dst_itf_node) = {
582 .function = lookup_ip4_dst_itf,
583 .name = "lookup-ip4-dst-itf",
584 .vector_size = sizeof (u32),
585 .sibling_of = "ip4-lookup",
586 .format_trace = format_lookup_trace,
588 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_itf_node, lookup_ip4_dst_itf)
591 lookup_ip4_src (vlib_main_t * vm,
592 vlib_node_runtime_t * node,
593 vlib_frame_t * from_frame)
595 return (lookup_dpo_ip4_inline(vm, node, from_frame, 1, 0));
598 VLIB_REGISTER_NODE (lookup_ip4_src_node) = {
599 .function = lookup_ip4_src,
600 .name = "lookup-ip4-src",
601 .vector_size = sizeof (u32),
602 .format_trace = format_lookup_trace,
603 .sibling_of = "ip4-lookup",
605 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_src_node, lookup_ip4_src)
608 lookup_dpo_ip6_inline (vlib_main_t * vm,
609 vlib_node_runtime_t * node,
610 vlib_frame_t * from_frame,
612 int table_from_interface)
614 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
615 u32 n_left_from, next_index, * from, * to_next;
616 u32 cpu_index = os_get_cpu_number();
618 from = vlib_frame_vector_args (from_frame);
619 n_left_from = from_frame->n_vectors;
621 next_index = node->cached_next_index;
623 while (n_left_from > 0)
627 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
629 while (n_left_from >= 4 && n_left_to_next > 2)
631 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
632 flow_hash_config_t flow_hash_config0;
633 const ip6_address_t *input_addr0;
634 const load_balance_t *lb0;
635 const lookup_dpo_t * lkd0;
636 const ip6_header_t * ip0;
637 const dpo_id_t *dpo0;
639 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
640 flow_hash_config_t flow_hash_config1;
641 const ip6_address_t *input_addr1;
642 const load_balance_t *lb1;
643 const lookup_dpo_t * lkd1;
644 const ip6_header_t * ip1;
645 const dpo_id_t *dpo1;
648 /* Prefetch next iteration. */
650 vlib_buffer_t * p2, * p3;
652 p2 = vlib_get_buffer (vm, from[2]);
653 p3 = vlib_get_buffer (vm, from[3]);
655 vlib_prefetch_buffer_header (p2, LOAD);
656 vlib_prefetch_buffer_header (p3, LOAD);
658 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
659 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
671 b0 = vlib_get_buffer (vm, bi0);
672 ip0 = vlib_buffer_get_current (b0);
673 b1 = vlib_get_buffer (vm, bi1);
674 ip1 = vlib_buffer_get_current (b1);
676 /* dst lookup was done by ip6 lookup */
677 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
678 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
679 lkd0 = lookup_dpo_get(lkdi0);
680 lkd1 = lookup_dpo_get(lkdi1);
683 * choose between a lookup using the fib index in the DPO
684 * or getting the FIB index from the interface.
686 if (table_from_interface)
689 ip6_fib_table_get_index_for_sw_if_index(
690 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
692 ip6_fib_table_get_index_for_sw_if_index(
693 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
697 fib_index0 = lkd0->lkd_fib_index;
698 fib_index1 = lkd1->lkd_fib_index;
702 * choose between a source or destination address lookup in the table
706 input_addr0 = &ip0->src_address;
707 input_addr1 = &ip1->src_address;
711 input_addr0 = &ip0->dst_address;
712 input_addr1 = &ip1->dst_address;
716 lbi0 = ip6_fib_table_fwding_lookup(&ip6_main,
719 lbi1 = ip6_fib_table_fwding_lookup(&ip6_main,
722 lb0 = load_balance_get(lbi0);
723 lb1 = load_balance_get(lbi1);
725 /* Use flow hash to compute multipath adjacency. */
726 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
727 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
729 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
731 flow_hash_config0 = lb0->lb_hash_config;
732 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
733 ip6_compute_flow_hash (ip0, flow_hash_config0);
736 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
738 flow_hash_config1 = lb1->lb_hash_config;
739 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
740 ip6_compute_flow_hash (ip1, flow_hash_config1);
743 dpo0 = load_balance_get_bucket_i(lb0,
745 (lb0->lb_n_buckets_minus_1)));
746 dpo1 = load_balance_get_bucket_i(lb1,
748 (lb1->lb_n_buckets_minus_1)));
750 next0 = dpo0->dpoi_next_node;
751 next1 = dpo1->dpoi_next_node;
752 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
753 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
755 vlib_increment_combined_counter
756 (cm, cpu_index, lbi0, 1,
757 vlib_buffer_length_in_chain (vm, b0));
758 vlib_increment_combined_counter
759 (cm, cpu_index, lbi1, 1,
760 vlib_buffer_length_in_chain (vm, b1));
762 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
764 lookup_trace_t *tr = vlib_add_trace (vm, node,
766 tr->fib_index = fib_index0;
768 tr->addr.ip6 = *input_addr0;
770 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
772 lookup_trace_t *tr = vlib_add_trace (vm, node,
774 tr->fib_index = fib_index1;
776 tr->addr.ip6 = *input_addr1;
778 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
779 n_left_to_next, bi0, bi1,
782 while (n_left_from > 0 && n_left_to_next > 0)
784 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
785 flow_hash_config_t flow_hash_config0;
786 const ip6_address_t *input_addr0;
787 const load_balance_t *lb0;
788 const lookup_dpo_t * lkd0;
789 const ip6_header_t * ip0;
790 const dpo_id_t *dpo0;
800 b0 = vlib_get_buffer (vm, bi0);
801 ip0 = vlib_buffer_get_current (b0);
803 /* dst lookup was done by ip6 lookup */
804 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
805 lkd0 = lookup_dpo_get(lkdi0);
808 * choose between a lookup using the fib index in the DPO
809 * or getting the FIB index from the interface.
811 if (table_from_interface)
814 ip6_fib_table_get_index_for_sw_if_index(
815 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
819 fib_index0 = lkd0->lkd_fib_index;
823 * choose between a source or destination address lookup in the table
827 input_addr0 = &ip0->src_address;
831 input_addr0 = &ip0->dst_address;
835 lbi0 = ip6_fib_table_fwding_lookup(&ip6_main,
838 lb0 = load_balance_get(lbi0);
840 /* Use flow hash to compute multipath adjacency. */
841 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
843 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
845 flow_hash_config0 = lb0->lb_hash_config;
846 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
847 ip6_compute_flow_hash (ip0, flow_hash_config0);
850 dpo0 = load_balance_get_bucket_i(lb0,
852 (lb0->lb_n_buckets_minus_1)));
854 next0 = dpo0->dpoi_next_node;
855 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
857 vlib_increment_combined_counter
858 (cm, cpu_index, lbi0, 1,
859 vlib_buffer_length_in_chain (vm, b0));
861 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
863 lookup_trace_t *tr = vlib_add_trace (vm, node,
865 tr->fib_index = fib_index0;
867 tr->addr.ip6 = *input_addr0;
869 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
870 n_left_to_next, bi0, next0);
872 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
874 return from_frame->n_vectors;
878 lookup_ip6_dst (vlib_main_t * vm,
879 vlib_node_runtime_t * node,
880 vlib_frame_t * from_frame)
882 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 0));
885 VLIB_REGISTER_NODE (lookup_ip6_dst_node) = {
886 .function = lookup_ip6_dst,
887 .name = "lookup-ip6-dst",
888 .vector_size = sizeof (u32),
889 .format_trace = format_lookup_trace,
890 .sibling_of = "ip6-lookup",
892 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_node, lookup_ip6_dst)
895 lookup_ip6_dst_itf (vlib_main_t * vm,
896 vlib_node_runtime_t * node,
897 vlib_frame_t * from_frame)
899 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 1));
902 VLIB_REGISTER_NODE (lookup_ip6_dst_itf_node) = {
903 .function = lookup_ip6_dst_itf,
904 .name = "lookup-ip6-dst-itf",
905 .vector_size = sizeof (u32),
906 .format_trace = format_lookup_trace,
907 .sibling_of = "ip6-lookup",
909 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_itf_node, lookup_ip6_dst_itf)
912 lookup_ip6_src (vlib_main_t * vm,
913 vlib_node_runtime_t * node,
914 vlib_frame_t * from_frame)
916 return (lookup_dpo_ip6_inline(vm, node, from_frame, 1, 0));
919 VLIB_REGISTER_NODE (lookup_ip6_src_node) = {
920 .function = lookup_ip6_src,
921 .name = "lookup-ip6-src",
922 .vector_size = sizeof (u32),
923 .format_trace = format_lookup_trace,
924 .sibling_of = "ip6-lookup",
926 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_src_node, lookup_ip6_src)
929 lookup_dpo_mpls_inline (vlib_main_t * vm,
930 vlib_node_runtime_t * node,
931 vlib_frame_t * from_frame,
932 int table_from_interface)
934 u32 n_left_from, next_index, * from, * to_next;
935 u32 cpu_index = os_get_cpu_number();
936 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
938 from = vlib_frame_vector_args (from_frame);
939 n_left_from = from_frame->n_vectors;
941 next_index = node->cached_next_index;
943 while (n_left_from > 0)
947 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
949 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
952 while (n_left_from > 0 && n_left_to_next > 0)
954 u32 bi0, lkdi0, lbi0, fib_index0, next0;
955 const mpls_unicast_header_t * hdr0;
956 const load_balance_t *lb0;
957 const lookup_dpo_t * lkd0;
958 const dpo_id_t *dpo0;
968 b0 = vlib_get_buffer (vm, bi0);
969 hdr0 = vlib_buffer_get_current (b0);
971 /* dst lookup was done by mpls lookup */
972 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
973 lkd0 = lookup_dpo_get(lkdi0);
976 * choose between a lookup using the fib index in the DPO
977 * or getting the FIB index from the interface.
979 if (table_from_interface)
982 mpls_fib_table_get_index_for_sw_if_index(
983 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
987 fib_index0 = lkd0->lkd_fib_index;
991 lbi0 = mpls_fib_table_forwarding_lookup (fib_index0, hdr0);
992 lb0 = load_balance_get(lbi0);
993 dpo0 = load_balance_get_bucket_i(lb0, 0);
995 next0 = dpo0->dpoi_next_node;
996 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
998 vlib_increment_combined_counter
999 (cm, cpu_index, lbi0, 1,
1000 vlib_buffer_length_in_chain (vm, b0));
1002 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1004 lookup_trace_t *tr = vlib_add_trace (vm, node,
1006 tr->fib_index = fib_index0;
1011 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1012 n_left_to_next, bi0, next0);
1014 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1016 return from_frame->n_vectors;
1020 format_lookup_mpls_trace (u8 * s, va_list * args)
1022 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1023 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1024 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
1025 uword indent = format_get_indent (s);
1026 mpls_unicast_header_t hdr;
1028 hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
1030 s = format (s, "%U fib-index:%d hdr:%U load-balance:%d",
1031 format_white_space, indent,
1033 format_mpls_header, hdr,
1039 lookup_mpls_dst (vlib_main_t * vm,
1040 vlib_node_runtime_t * node,
1041 vlib_frame_t * from_frame)
1043 return (lookup_dpo_mpls_inline(vm, node, from_frame, 0));
1046 VLIB_REGISTER_NODE (lookup_mpls_dst_node) = {
1047 .function = lookup_mpls_dst,
1048 .name = "lookup-mpls-dst",
1049 .vector_size = sizeof (u32),
1050 .sibling_of = "mpls-lookup",
1051 .format_trace = format_lookup_mpls_trace,
1054 VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_node, lookup_mpls_dst)
1057 lookup_mpls_dst_itf (vlib_main_t * vm,
1058 vlib_node_runtime_t * node,
1059 vlib_frame_t * from_frame)
1061 return (lookup_dpo_mpls_inline(vm, node, from_frame, 1));
1064 VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = {
1065 .function = lookup_mpls_dst_itf,
1066 .name = "lookup-mpls-dst-itf",
1067 .vector_size = sizeof (u32),
1068 .sibling_of = "mpls-lookup",
1069 .format_trace = format_lookup_mpls_trace,
1072 VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_itf_node, lookup_mpls_dst_itf)
1075 lookup_dpo_mem_show (void)
1077 fib_show_memory_usage("Lookup",
1078 pool_elts(lookup_dpo_pool),
1079 pool_len(lookup_dpo_pool),
1080 sizeof(lookup_dpo_t));
1083 const static dpo_vft_t lkd_vft = {
1084 .dv_lock = lookup_dpo_lock,
1085 .dv_unlock = lookup_dpo_unlock,
1086 .dv_format = format_lookup_dpo,
1088 const static dpo_vft_t lkd_vft_w_mem_show = {
1089 .dv_lock = lookup_dpo_lock,
1090 .dv_unlock = lookup_dpo_unlock,
1091 .dv_format = format_lookup_dpo,
1092 .dv_mem_show = lookup_dpo_mem_show,
1095 const static char* const lookup_src_ip4_nodes[] =
1100 const static char* const lookup_src_ip6_nodes[] =
1105 const static char* const * const lookup_src_nodes[DPO_PROTO_NUM] =
1107 [DPO_PROTO_IP4] = lookup_src_ip4_nodes,
1108 [DPO_PROTO_IP6] = lookup_src_ip6_nodes,
1109 [DPO_PROTO_MPLS] = NULL,
1112 const static char* const lookup_dst_ip4_nodes[] =
1117 const static char* const lookup_dst_ip6_nodes[] =
1122 const static char* const lookup_dst_mpls_nodes[] =
1127 const static char* const * const lookup_dst_nodes[DPO_PROTO_NUM] =
1129 [DPO_PROTO_IP4] = lookup_dst_ip4_nodes,
1130 [DPO_PROTO_IP6] = lookup_dst_ip6_nodes,
1131 [DPO_PROTO_MPLS] = lookup_dst_mpls_nodes,
1134 const static char* const lookup_dst_from_interface_ip4_nodes[] =
1136 "lookup-ip4-dst-itf",
1139 const static char* const lookup_dst_from_interface_ip6_nodes[] =
1141 "lookup-ip6-dst-itf",
1144 const static char* const lookup_dst_from_interface_mpls_nodes[] =
1146 "lookup-mpls-dst-itf",
1149 const static char* const * const lookup_dst_from_interface_nodes[DPO_PROTO_NUM] =
1151 [DPO_PROTO_IP4] = lookup_dst_from_interface_ip4_nodes,
1152 [DPO_PROTO_IP6] = lookup_dst_from_interface_ip6_nodes,
1153 [DPO_PROTO_MPLS] = lookup_dst_from_interface_mpls_nodes,
1158 lookup_dpo_module_init (void)
1160 dpo_register(DPO_LOOKUP, &lkd_vft_w_mem_show, NULL);
1163 * There are various sorts of lookup; src or dst addr v4 /v6 etc.
1164 * there isn't an object type for each (there is only the lookup_dpo_t),
1165 * but, for performance reasons, there is a data plane function, and hence
1166 * VLIB node for each. VLIB graph node construction is based on DPO types
1167 * so we create sub-types.
1169 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC] =
1170 dpo_register_new_type(&lkd_vft, lookup_src_nodes);
1171 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST] =
1172 dpo_register_new_type(&lkd_vft, lookup_dst_nodes);
1173 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE] =
1174 dpo_register_new_type(&lkd_vft, lookup_dst_from_interface_nodes);