2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ip/ip.h>
17 #include <vnet/dpo/lookup_dpo.h>
18 #include <vnet/dpo/load_balance_map.h>
19 #include <vnet/mpls/mpls_lookup.h>
20 #include <vnet/fib/fib_table.h>
21 #include <vnet/fib/ip4_fib.h>
22 #include <vnet/fib/ip6_fib.h>
23 #include <vnet/fib/mpls_fib.h>
24 #include <vnet/mfib/mfib_table.h>
25 #include <vnet/mfib/ip4_mfib.h>
26 #include <vnet/mfib/ip6_mfib.h>
28 static const char *const lookup_input_names[] = LOOKUP_INPUTS;
29 static const char *const lookup_cast_names[] = LOOKUP_CASTS;
32 * If a packet encounters a lookup DPO more than the many times
33 * then we assume there is a loop in the forward graph and drop the packet
35 #define MAX_LUKPS_PER_PACKET 4
38 * @brief Enumeration of the lookup subtypes
40 typedef enum lookup_sub_type_t_
44 LOOKUP_SUB_TYPE_DST_MCAST,
45 LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE,
47 #define LOOKUP_SUB_TYPE_NUM (LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE+1)
49 #define FOR_EACH_LOOKUP_SUB_TYPE(_st) \
50 for (_st = LOOKUP_SUB_TYPE_IP4_SRC; _st < LOOKUP_SUB_TYPE_NUM; _st++)
53 * @brief pool of all MPLS Label DPOs
55 lookup_dpo_t *lookup_dpo_pool;
58 * @brief An array of registered DPO type values for the sub-types
60 static dpo_type_t lookup_dpo_sub_types[LOOKUP_SUB_TYPE_NUM];
63 lookup_dpo_alloc (void)
67 pool_get_aligned(lookup_dpo_pool, lkd, CLIB_CACHE_LINE_BYTES);
73 lookup_dpo_get_index (lookup_dpo_t *lkd)
75 return (lkd - lookup_dpo_pool);
79 lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
83 lookup_table_t table_config,
89 lkd = lookup_dpo_alloc();
90 lkd->lkd_fib_index = fib_index;
91 lkd->lkd_proto = proto;
92 lkd->lkd_input = input;
93 lkd->lkd_table = table_config;
97 * use the input type to select the lookup sub-type
103 case LOOKUP_INPUT_SRC_ADDR:
104 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC];
106 case LOOKUP_INPUT_DST_ADDR:
107 switch (table_config)
109 case LOOKUP_TABLE_FROM_INPUT_INTERFACE:
110 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE];
112 case LOOKUP_TABLE_FROM_CONFIG:
113 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST];
116 if (LOOKUP_MULTICAST == cast)
118 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST];
128 dpo_set(dpo, type, proto, lookup_dpo_get_index(lkd));
133 lookup_dpo_add_or_lock_w_fib_index (fib_node_index_t fib_index,
136 lookup_input_t input,
137 lookup_table_t table_config,
140 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
142 if (LOOKUP_UNICAST == cast)
144 fib_table_lock(fib_index,
145 dpo_proto_to_fib(proto),
150 mfib_table_lock(fib_index,
151 dpo_proto_to_fib(proto),
155 lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
159 lookup_dpo_add_or_lock_w_table_id (u32 table_id,
162 lookup_input_t input,
163 lookup_table_t table_config,
166 fib_node_index_t fib_index = FIB_NODE_INDEX_INVALID;
168 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
170 if (LOOKUP_UNICAST == cast)
173 fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
180 mfib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
186 ASSERT(FIB_NODE_INDEX_INVALID != fib_index);
187 lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
191 format_lookup_dpo (u8 *s, va_list *args)
193 index_t index = va_arg (*args, index_t);
196 lkd = lookup_dpo_get(index);
198 if (LOOKUP_TABLE_FROM_INPUT_INTERFACE == lkd->lkd_table)
200 s = format(s, "%s,%s lookup in interface's %U table",
201 lookup_input_names[lkd->lkd_input],
202 lookup_cast_names[lkd->lkd_cast],
203 format_dpo_proto, lkd->lkd_proto);
207 if (LOOKUP_UNICAST == lkd->lkd_cast)
209 s = format(s, "%s,%s lookup in %U",
210 lookup_input_names[lkd->lkd_input],
211 lookup_cast_names[lkd->lkd_cast],
212 format_fib_table_name, lkd->lkd_fib_index,
213 dpo_proto_to_fib(lkd->lkd_proto));
217 s = format(s, "%s,%s lookup in %U",
218 lookup_input_names[lkd->lkd_input],
219 lookup_cast_names[lkd->lkd_cast],
220 format_mfib_table_name, lkd->lkd_fib_index,
221 dpo_proto_to_fib(lkd->lkd_proto));
228 lookup_dpo_lock (dpo_id_t *dpo)
232 lkd = lookup_dpo_get(dpo->dpoi_index);
238 lookup_dpo_unlock (dpo_id_t *dpo)
242 lkd = lookup_dpo_get(dpo->dpoi_index);
246 if (0 == lkd->lkd_locks)
248 if (LOOKUP_TABLE_FROM_CONFIG == lkd->lkd_table)
250 if (LOOKUP_UNICAST == lkd->lkd_cast)
252 fib_table_unlock(lkd->lkd_fib_index,
253 dpo_proto_to_fib(lkd->lkd_proto),
258 mfib_table_unlock(lkd->lkd_fib_index,
259 dpo_proto_to_fib(lkd->lkd_proto),
263 pool_put(lookup_dpo_pool, lkd);
268 ip4_src_fib_lookup_one (u32 src_fib_index0,
269 const ip4_address_t * addr0,
270 u32 * src_adj_index0)
272 ip4_fib_mtrie_leaf_t leaf0;
273 ip4_fib_mtrie_t * mtrie0;
275 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
277 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0);
278 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
279 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
281 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
285 ip4_src_fib_lookup_two (u32 src_fib_index0,
287 const ip4_address_t * addr0,
288 const ip4_address_t * addr1,
289 u32 * src_adj_index0,
290 u32 * src_adj_index1)
292 ip4_fib_mtrie_leaf_t leaf0, leaf1;
293 ip4_fib_mtrie_t * mtrie0, * mtrie1;
295 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
296 mtrie1 = &ip4_fib_get (src_fib_index1)->mtrie;
298 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0);
299 leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, addr1);
301 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
302 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 2);
304 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
305 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 3);
307 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
308 src_adj_index1[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
312 * @brief Lookup trace data
314 typedef struct lookup_trace_t_
318 mpls_unicast_header_t hdr;
320 fib_node_index_t fib_index;
326 lookup_dpo_ip4_inline (vlib_main_t * vm,
327 vlib_node_runtime_t * node,
328 vlib_frame_t * from_frame,
330 int table_from_interface)
332 u32 n_left_from, next_index, * from, * to_next;
333 u32 thread_index = vlib_get_thread_index();
334 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
336 from = vlib_frame_vector_args (from_frame);
337 n_left_from = from_frame->n_vectors;
339 next_index = node->cached_next_index;
341 while (n_left_from > 0)
345 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
347 while (n_left_from >= 4 && n_left_to_next > 2)
349 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
350 flow_hash_config_t flow_hash_config0;
351 const ip4_address_t *input_addr0;
352 const load_balance_t *lb0;
353 const lookup_dpo_t * lkd0;
354 const ip4_header_t * ip0;
355 const dpo_id_t *dpo0;
357 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
358 flow_hash_config_t flow_hash_config1;
359 const ip4_address_t *input_addr1;
360 const load_balance_t *lb1;
361 const lookup_dpo_t * lkd1;
362 const ip4_header_t * ip1;
363 const dpo_id_t *dpo1;
366 /* Prefetch next iteration. */
368 vlib_buffer_t * p2, * p3;
370 p2 = vlib_get_buffer (vm, from[2]);
371 p3 = vlib_get_buffer (vm, from[3]);
373 vlib_prefetch_buffer_header (p2, LOAD);
374 vlib_prefetch_buffer_header (p3, LOAD);
376 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
377 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
389 b0 = vlib_get_buffer (vm, bi0);
390 ip0 = vlib_buffer_get_current (b0);
391 b1 = vlib_get_buffer (vm, bi1);
392 ip1 = vlib_buffer_get_current (b1);
394 /* dst lookup was done by ip4 lookup */
395 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
396 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
397 lkd0 = lookup_dpo_get(lkdi0);
398 lkd1 = lookup_dpo_get(lkdi1);
401 * choose between a lookup using the fib index in the DPO
402 * or getting the FIB index from the interface.
404 if (table_from_interface)
407 ip4_fib_table_get_index_for_sw_if_index(
408 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
410 ip4_fib_table_get_index_for_sw_if_index(
411 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
415 fib_index0 = lkd0->lkd_fib_index;
416 fib_index1 = lkd1->lkd_fib_index;
420 * choose between a source or destination address lookup in the table
424 input_addr0 = &ip0->src_address;
425 input_addr1 = &ip1->src_address;
429 input_addr0 = &ip0->dst_address;
430 input_addr1 = &ip1->dst_address;
434 ip4_src_fib_lookup_two (fib_index0, fib_index1,
435 input_addr0, input_addr1,
437 lb0 = load_balance_get(lbi0);
438 lb1 = load_balance_get(lbi1);
440 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
441 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
443 /* Use flow hash to compute multipath adjacency. */
444 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
445 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
447 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
449 flow_hash_config0 = lb0->lb_hash_config;
450 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
451 ip4_compute_flow_hash (ip0, flow_hash_config0);
454 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
456 flow_hash_config1 = lb1->lb_hash_config;
457 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
458 ip4_compute_flow_hash (ip1, flow_hash_config1);
461 dpo0 = load_balance_get_bucket_i(lb0,
463 (lb0->lb_n_buckets_minus_1)));
464 dpo1 = load_balance_get_bucket_i(lb1,
466 (lb1->lb_n_buckets_minus_1)));
468 next0 = dpo0->dpoi_next_node;
469 next1 = dpo1->dpoi_next_node;
470 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
471 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
473 vlib_increment_combined_counter
474 (cm, thread_index, lbi0, 1,
475 vlib_buffer_length_in_chain (vm, b0));
476 vlib_increment_combined_counter
477 (cm, thread_index, lbi1, 1,
478 vlib_buffer_length_in_chain (vm, b1));
480 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
481 vnet_buffer2(b0)->loop_counter = 0;
482 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
484 if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
485 vnet_buffer2(b1)->loop_counter = 0;
486 b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
489 vnet_buffer2(b0)->loop_counter++;
490 vnet_buffer2(b1)->loop_counter++;
492 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
493 next0 = IP_LOOKUP_NEXT_DROP;
494 if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
495 next1 = IP_LOOKUP_NEXT_DROP;
497 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
499 lookup_trace_t *tr = vlib_add_trace (vm, node,
501 tr->fib_index = fib_index0;
503 tr->addr.ip4 = *input_addr0;
505 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
507 lookup_trace_t *tr = vlib_add_trace (vm, node,
509 tr->fib_index = fib_index1;
511 tr->addr.ip4 = *input_addr1;
514 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
515 to_next, n_left_to_next,
516 bi0, bi1, next0, next1);
519 while (n_left_from > 0 && n_left_to_next > 0)
521 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
522 flow_hash_config_t flow_hash_config0;
523 const ip4_address_t *input_addr;
524 const load_balance_t *lb0;
525 const lookup_dpo_t * lkd0;
526 const ip4_header_t * ip0;
527 const dpo_id_t *dpo0;
537 b0 = vlib_get_buffer (vm, bi0);
538 ip0 = vlib_buffer_get_current (b0);
540 /* dst lookup was done by ip4 lookup */
541 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
542 lkd0 = lookup_dpo_get(lkdi0);
545 * choose between a lookup using the fib index in the DPO
546 * or getting the FIB index from the interface.
548 if (table_from_interface)
551 ip4_fib_table_get_index_for_sw_if_index(
552 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
556 fib_index0 = lkd0->lkd_fib_index;
560 * choose between a source or destination address lookup in the table
564 input_addr = &ip0->src_address;
568 input_addr = &ip0->dst_address;
572 ip4_src_fib_lookup_one (fib_index0, input_addr, &lbi0);
573 lb0 = load_balance_get(lbi0);
575 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
577 /* Use flow hash to compute multipath adjacency. */
578 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
580 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
582 flow_hash_config0 = lb0->lb_hash_config;
583 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
584 ip4_compute_flow_hash (ip0, flow_hash_config0);
587 dpo0 = load_balance_get_bucket_i(lb0,
589 (lb0->lb_n_buckets_minus_1)));
591 next0 = dpo0->dpoi_next_node;
592 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
594 vlib_increment_combined_counter
595 (cm, thread_index, lbi0, 1,
596 vlib_buffer_length_in_chain (vm, b0));
598 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
599 vnet_buffer2(b0)->loop_counter = 0;
600 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
603 vnet_buffer2(b0)->loop_counter++;
605 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
606 next0 = IP_LOOKUP_NEXT_DROP;
608 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
610 lookup_trace_t *tr = vlib_add_trace (vm, node,
612 tr->fib_index = fib_index0;
614 tr->addr.ip4 = *input_addr;
617 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
618 n_left_to_next, bi0, next0);
620 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
622 return from_frame->n_vectors;
626 format_lookup_trace (u8 * s, va_list * args)
628 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
629 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
630 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
631 u32 indent = format_get_indent (s);
632 s = format (s, "%U fib-index:%d addr:%U load-balance:%d",
633 format_white_space, indent,
635 format_ip46_address, &t->addr, IP46_TYPE_ANY,
641 lookup_ip4_dst (vlib_main_t * vm,
642 vlib_node_runtime_t * node,
643 vlib_frame_t * from_frame)
645 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 0));
648 VLIB_REGISTER_NODE (lookup_ip4_dst_node) = {
649 .function = lookup_ip4_dst,
650 .name = "lookup-ip4-dst",
651 .vector_size = sizeof (u32),
652 .sibling_of = "ip4-lookup",
653 .format_trace = format_lookup_trace,
655 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_node, lookup_ip4_dst)
658 lookup_ip4_dst_itf (vlib_main_t * vm,
659 vlib_node_runtime_t * node,
660 vlib_frame_t * from_frame)
662 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 1));
665 VLIB_REGISTER_NODE (lookup_ip4_dst_itf_node) = {
666 .function = lookup_ip4_dst_itf,
667 .name = "lookup-ip4-dst-itf",
668 .vector_size = sizeof (u32),
669 .sibling_of = "ip4-lookup",
670 .format_trace = format_lookup_trace,
672 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_itf_node, lookup_ip4_dst_itf)
675 lookup_ip4_src (vlib_main_t * vm,
676 vlib_node_runtime_t * node,
677 vlib_frame_t * from_frame)
679 return (lookup_dpo_ip4_inline(vm, node, from_frame, 1, 0));
682 VLIB_REGISTER_NODE (lookup_ip4_src_node) = {
683 .function = lookup_ip4_src,
684 .name = "lookup-ip4-src",
685 .vector_size = sizeof (u32),
686 .format_trace = format_lookup_trace,
687 .sibling_of = "ip4-lookup",
689 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_src_node, lookup_ip4_src)
692 lookup_dpo_ip6_inline (vlib_main_t * vm,
693 vlib_node_runtime_t * node,
694 vlib_frame_t * from_frame,
696 int table_from_interface)
698 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
699 u32 n_left_from, next_index, * from, * to_next;
700 u32 thread_index = vlib_get_thread_index();
702 from = vlib_frame_vector_args (from_frame);
703 n_left_from = from_frame->n_vectors;
705 next_index = node->cached_next_index;
707 while (n_left_from > 0)
711 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
713 while (n_left_from >= 4 && n_left_to_next > 2)
715 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
716 flow_hash_config_t flow_hash_config0;
717 const ip6_address_t *input_addr0;
718 const load_balance_t *lb0;
719 const lookup_dpo_t * lkd0;
720 const ip6_header_t * ip0;
721 const dpo_id_t *dpo0;
723 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
724 flow_hash_config_t flow_hash_config1;
725 const ip6_address_t *input_addr1;
726 const load_balance_t *lb1;
727 const lookup_dpo_t * lkd1;
728 const ip6_header_t * ip1;
729 const dpo_id_t *dpo1;
732 /* Prefetch next iteration. */
734 vlib_buffer_t * p2, * p3;
736 p2 = vlib_get_buffer (vm, from[2]);
737 p3 = vlib_get_buffer (vm, from[3]);
739 vlib_prefetch_buffer_header (p2, LOAD);
740 vlib_prefetch_buffer_header (p3, LOAD);
742 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
743 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
755 b0 = vlib_get_buffer (vm, bi0);
756 ip0 = vlib_buffer_get_current (b0);
757 b1 = vlib_get_buffer (vm, bi1);
758 ip1 = vlib_buffer_get_current (b1);
760 /* dst lookup was done by ip6 lookup */
761 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
762 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
763 lkd0 = lookup_dpo_get(lkdi0);
764 lkd1 = lookup_dpo_get(lkdi1);
767 * choose between a lookup using the fib index in the DPO
768 * or getting the FIB index from the interface.
770 if (table_from_interface)
773 ip6_fib_table_get_index_for_sw_if_index(
774 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
776 ip6_fib_table_get_index_for_sw_if_index(
777 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
781 fib_index0 = lkd0->lkd_fib_index;
782 fib_index1 = lkd1->lkd_fib_index;
786 * choose between a source or destination address lookup in the table
790 input_addr0 = &ip0->src_address;
791 input_addr1 = &ip1->src_address;
795 input_addr0 = &ip0->dst_address;
796 input_addr1 = &ip1->dst_address;
800 lbi0 = ip6_fib_table_fwding_lookup(&ip6_main,
803 lbi1 = ip6_fib_table_fwding_lookup(&ip6_main,
806 lb0 = load_balance_get(lbi0);
807 lb1 = load_balance_get(lbi1);
809 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
810 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
812 /* Use flow hash to compute multipath adjacency. */
813 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
814 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
816 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
817 vnet_buffer2(b0)->loop_counter = 0;
818 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
820 if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
821 vnet_buffer2(b1)->loop_counter = 0;
822 b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
825 vnet_buffer2(b0)->loop_counter++;
826 vnet_buffer2(b1)->loop_counter++;
828 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
829 next0 = IP_LOOKUP_NEXT_DROP;
830 if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
831 next1 = IP_LOOKUP_NEXT_DROP;
833 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
835 flow_hash_config0 = lb0->lb_hash_config;
836 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
837 ip6_compute_flow_hash (ip0, flow_hash_config0);
840 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
842 flow_hash_config1 = lb1->lb_hash_config;
843 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
844 ip6_compute_flow_hash (ip1, flow_hash_config1);
847 dpo0 = load_balance_get_bucket_i(lb0,
849 (lb0->lb_n_buckets_minus_1)));
850 dpo1 = load_balance_get_bucket_i(lb1,
852 (lb1->lb_n_buckets_minus_1)));
854 next0 = dpo0->dpoi_next_node;
855 next1 = dpo1->dpoi_next_node;
856 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
857 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
859 vlib_increment_combined_counter
860 (cm, thread_index, lbi0, 1,
861 vlib_buffer_length_in_chain (vm, b0));
862 vlib_increment_combined_counter
863 (cm, thread_index, lbi1, 1,
864 vlib_buffer_length_in_chain (vm, b1));
866 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
868 lookup_trace_t *tr = vlib_add_trace (vm, node,
870 tr->fib_index = fib_index0;
872 tr->addr.ip6 = *input_addr0;
874 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
876 lookup_trace_t *tr = vlib_add_trace (vm, node,
878 tr->fib_index = fib_index1;
880 tr->addr.ip6 = *input_addr1;
882 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
883 n_left_to_next, bi0, bi1,
886 while (n_left_from > 0 && n_left_to_next > 0)
888 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
889 flow_hash_config_t flow_hash_config0;
890 const ip6_address_t *input_addr0;
891 const load_balance_t *lb0;
892 const lookup_dpo_t * lkd0;
893 const ip6_header_t * ip0;
894 const dpo_id_t *dpo0;
904 b0 = vlib_get_buffer (vm, bi0);
905 ip0 = vlib_buffer_get_current (b0);
907 /* dst lookup was done by ip6 lookup */
908 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
909 lkd0 = lookup_dpo_get(lkdi0);
912 * choose between a lookup using the fib index in the DPO
913 * or getting the FIB index from the interface.
915 if (table_from_interface)
918 ip6_fib_table_get_index_for_sw_if_index(
919 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
923 fib_index0 = lkd0->lkd_fib_index;
927 * choose between a source or destination address lookup in the table
931 input_addr0 = &ip0->src_address;
935 input_addr0 = &ip0->dst_address;
939 lbi0 = ip6_fib_table_fwding_lookup(&ip6_main,
942 lb0 = load_balance_get(lbi0);
944 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
946 /* Use flow hash to compute multipath adjacency. */
947 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
949 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
951 flow_hash_config0 = lb0->lb_hash_config;
952 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
953 ip6_compute_flow_hash (ip0, flow_hash_config0);
956 dpo0 = load_balance_get_bucket_i(lb0,
958 (lb0->lb_n_buckets_minus_1)));
960 next0 = dpo0->dpoi_next_node;
961 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
963 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
964 vnet_buffer2(b0)->loop_counter = 0;
965 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
968 vnet_buffer2(b0)->loop_counter++;
970 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
971 next0 = IP_LOOKUP_NEXT_DROP;
973 vlib_increment_combined_counter
974 (cm, thread_index, lbi0, 1,
975 vlib_buffer_length_in_chain (vm, b0));
977 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
979 lookup_trace_t *tr = vlib_add_trace (vm, node,
981 tr->fib_index = fib_index0;
983 tr->addr.ip6 = *input_addr0;
985 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
986 n_left_to_next, bi0, next0);
988 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
990 return from_frame->n_vectors;
994 lookup_ip6_dst (vlib_main_t * vm,
995 vlib_node_runtime_t * node,
996 vlib_frame_t * from_frame)
998 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 0));
1001 VLIB_REGISTER_NODE (lookup_ip6_dst_node) = {
1002 .function = lookup_ip6_dst,
1003 .name = "lookup-ip6-dst",
1004 .vector_size = sizeof (u32),
1005 .format_trace = format_lookup_trace,
1006 .sibling_of = "ip6-lookup",
1008 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_node, lookup_ip6_dst)
1011 lookup_ip6_dst_itf (vlib_main_t * vm,
1012 vlib_node_runtime_t * node,
1013 vlib_frame_t * from_frame)
1015 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 1));
1018 VLIB_REGISTER_NODE (lookup_ip6_dst_itf_node) = {
1019 .function = lookup_ip6_dst_itf,
1020 .name = "lookup-ip6-dst-itf",
1021 .vector_size = sizeof (u32),
1022 .format_trace = format_lookup_trace,
1023 .sibling_of = "ip6-lookup",
1025 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_itf_node, lookup_ip6_dst_itf)
1028 lookup_ip6_src (vlib_main_t * vm,
1029 vlib_node_runtime_t * node,
1030 vlib_frame_t * from_frame)
1032 return (lookup_dpo_ip6_inline(vm, node, from_frame, 1, 0));
1035 VLIB_REGISTER_NODE (lookup_ip6_src_node) = {
1036 .function = lookup_ip6_src,
1037 .name = "lookup-ip6-src",
1038 .vector_size = sizeof (u32),
1039 .format_trace = format_lookup_trace,
1040 .sibling_of = "ip6-lookup",
1042 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_src_node, lookup_ip6_src)
1045 lookup_dpo_mpls_inline (vlib_main_t * vm,
1046 vlib_node_runtime_t * node,
1047 vlib_frame_t * from_frame,
1048 int table_from_interface)
1050 u32 n_left_from, next_index, * from, * to_next;
1051 u32 thread_index = vlib_get_thread_index();
1052 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
1054 from = vlib_frame_vector_args (from_frame);
1055 n_left_from = from_frame->n_vectors;
1057 next_index = node->cached_next_index;
1059 while (n_left_from > 0)
1063 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
1065 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
1068 while (n_left_from > 0 && n_left_to_next > 0)
1070 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash0;
1071 const mpls_unicast_header_t * hdr0;
1072 const load_balance_t *lb0;
1073 const lookup_dpo_t * lkd0;
1074 const dpo_id_t *dpo0;
1082 n_left_to_next -= 1;
1084 b0 = vlib_get_buffer (vm, bi0);
1085 hdr0 = vlib_buffer_get_current (b0);
1087 /* dst lookup was done by mpls lookup */
1088 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
1089 lkd0 = lookup_dpo_get(lkdi0);
1092 * choose between a lookup using the fib index in the DPO
1093 * or getting the FIB index from the interface.
1095 if (table_from_interface)
1098 mpls_fib_table_get_index_for_sw_if_index(
1099 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
1103 fib_index0 = lkd0->lkd_fib_index;
1107 lbi0 = mpls_fib_table_forwarding_lookup (fib_index0, hdr0);
1108 lb0 = load_balance_get(lbi0);
1109 dpo0 = load_balance_get_bucket_i(lb0, 0);
1111 next0 = dpo0->dpoi_next_node;
1112 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1115 if (MPLS_IS_REPLICATE & lbi0)
1117 next0 = mpls_lookup_to_replicate_edge;
1118 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
1119 (lbi0 & ~MPLS_IS_REPLICATE);
1123 lb0 = load_balance_get(lbi0);
1124 ASSERT (lb0->lb_n_buckets > 0);
1125 ASSERT (is_pow2 (lb0->lb_n_buckets));
1127 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
1129 hash0 = vnet_buffer (b0)->ip.flow_hash =
1130 mpls_compute_flow_hash(hdr0, lb0->lb_hash_config);
1131 dpo0 = load_balance_get_fwd_bucket
1133 (hash0 & (lb0->lb_n_buckets_minus_1)));
1137 dpo0 = load_balance_get_bucket_i (lb0, 0);
1139 next0 = dpo0->dpoi_next_node;
1141 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1143 vlib_increment_combined_counter
1144 (cm, thread_index, lbi0, 1,
1145 vlib_buffer_length_in_chain (vm, b0));
1148 vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3];
1149 vnet_buffer (b0)->mpls.exp = (((char*)hdr0)[2] & 0xe) >> 1;
1150 vnet_buffer (b0)->mpls.first = 1;
1151 vlib_buffer_advance(b0, sizeof(*hdr0));
1153 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
1154 vnet_buffer2(b0)->loop_counter = 0;
1155 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
1158 vnet_buffer2(b0)->loop_counter++;
1160 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
1161 next0 = MPLS_LOOKUP_NEXT_DROP;
1163 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1165 lookup_trace_t *tr = vlib_add_trace (vm, node,
1167 tr->fib_index = fib_index0;
1172 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1173 n_left_to_next, bi0, next0);
1175 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1177 return from_frame->n_vectors;
1181 format_lookup_mpls_trace (u8 * s, va_list * args)
1183 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1184 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1185 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
1186 u32 indent = format_get_indent (s);
1187 mpls_unicast_header_t hdr;
1189 hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
1191 s = format (s, "%U fib-index:%d hdr:%U load-balance:%d",
1192 format_white_space, indent,
1194 format_mpls_header, hdr,
1200 lookup_mpls_dst (vlib_main_t * vm,
1201 vlib_node_runtime_t * node,
1202 vlib_frame_t * from_frame)
1204 return (lookup_dpo_mpls_inline(vm, node, from_frame, 0));
1207 VLIB_REGISTER_NODE (lookup_mpls_dst_node) = {
1208 .function = lookup_mpls_dst,
1209 .name = "lookup-mpls-dst",
1210 .vector_size = sizeof (u32),
1211 .sibling_of = "mpls-lookup",
1212 .format_trace = format_lookup_mpls_trace,
1215 VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_node, lookup_mpls_dst)
1218 lookup_mpls_dst_itf (vlib_main_t * vm,
1219 vlib_node_runtime_t * node,
1220 vlib_frame_t * from_frame)
1222 return (lookup_dpo_mpls_inline(vm, node, from_frame, 1));
1225 VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = {
1226 .function = lookup_mpls_dst_itf,
1227 .name = "lookup-mpls-dst-itf",
1228 .vector_size = sizeof (u32),
1229 .sibling_of = "mpls-lookup",
1230 .format_trace = format_lookup_mpls_trace,
1233 VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_itf_node, lookup_mpls_dst_itf)
1235 typedef enum lookup_ip_dst_mcast_next_t_ {
1236 LOOKUP_IP_DST_MCAST_NEXT_DROP,
1237 LOOKUP_IP_DST_MCAST_NEXT_RPF,
1238 LOOKUP_IP_DST_MCAST_N_NEXT,
1239 } mfib_forward_lookup_next_t;
1242 lookup_dpo_ip_dst_mcast_inline (vlib_main_t * vm,
1243 vlib_node_runtime_t * node,
1244 vlib_frame_t * from_frame,
1247 u32 n_left_from, next_index, * from, * to_next;
1249 from = vlib_frame_vector_args (from_frame);
1250 n_left_from = from_frame->n_vectors;
1252 next_index = LOOKUP_IP_DST_MCAST_NEXT_RPF;
1254 while (n_left_from > 0)
1258 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
1260 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
1263 while (n_left_from > 0 && n_left_to_next > 0)
1265 u32 bi0, lkdi0, fib_index0, next0;
1266 const lookup_dpo_t * lkd0;
1267 fib_node_index_t mfei0;
1275 n_left_to_next -= 1;
1277 b0 = vlib_get_buffer (vm, bi0);
1279 /* dst lookup was done by mpls lookup */
1280 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
1281 lkd0 = lookup_dpo_get(lkdi0);
1282 fib_index0 = lkd0->lkd_fib_index;
1283 next0 = LOOKUP_IP_DST_MCAST_NEXT_RPF;
1289 ip0 = vlib_buffer_get_current (b0);
1290 mfei0 = ip4_mfib_table_lookup(ip4_mfib_get(fib_index0),
1294 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1296 lookup_trace_t *tr = vlib_add_trace (vm, node,
1298 tr->fib_index = fib_index0;
1300 tr->addr.ip4 = ip0->dst_address;
1307 ip0 = vlib_buffer_get_current (b0);
1308 mfei0 = ip6_mfib_table_lookup2(ip6_mfib_get(fib_index0),
1311 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1313 lookup_trace_t *tr = vlib_add_trace (vm, node,
1315 tr->fib_index = fib_index0;
1317 tr->addr.ip6 = ip0->dst_address;
1321 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = mfei0;
1323 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
1324 vnet_buffer2(b0)->loop_counter = 0;
1325 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
1328 vnet_buffer2(b0)->loop_counter++;
1330 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
1331 next0 = LOOKUP_IP_DST_MCAST_NEXT_DROP;
1333 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1334 n_left_to_next, bi0, next0);
1336 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1338 return from_frame->n_vectors;
1342 lookup_ip4_dst_mcast (vlib_main_t * vm,
1343 vlib_node_runtime_t * node,
1344 vlib_frame_t * from_frame)
1346 return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 1));
1349 VLIB_REGISTER_NODE (lookup_ip4_dst_mcast_node) = {
1350 .function = lookup_ip4_dst_mcast,
1351 .name = "lookup-ip4-dst-mcast",
1352 .vector_size = sizeof (u32),
1354 .format_trace = format_lookup_trace,
1355 .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
1357 [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip4-drop",
1358 [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip4-mfib-forward-rpf",
1361 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_mcast_node,
1362 lookup_ip4_dst_mcast)
1365 lookup_ip6_dst_mcast (vlib_main_t * vm,
1366 vlib_node_runtime_t * node,
1367 vlib_frame_t * from_frame)
1369 return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 0));
1372 VLIB_REGISTER_NODE (lookup_ip6_dst_mcast_node) = {
1373 .function = lookup_ip6_dst_mcast,
1374 .name = "lookup-ip6-dst-mcast",
1375 .vector_size = sizeof (u32),
1377 .format_trace = format_lookup_trace,
1378 .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
1380 [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip6-drop",
1381 [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip6-mfib-forward-rpf",
1384 VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_mcast_node,
1385 lookup_ip6_dst_mcast)
1388 lookup_dpo_mem_show (void)
1390 fib_show_memory_usage("Lookup",
1391 pool_elts(lookup_dpo_pool),
1392 pool_len(lookup_dpo_pool),
1393 sizeof(lookup_dpo_t));
1396 const static dpo_vft_t lkd_vft = {
1397 .dv_lock = lookup_dpo_lock,
1398 .dv_unlock = lookup_dpo_unlock,
1399 .dv_format = format_lookup_dpo,
1401 const static dpo_vft_t lkd_vft_w_mem_show = {
1402 .dv_lock = lookup_dpo_lock,
1403 .dv_unlock = lookup_dpo_unlock,
1404 .dv_format = format_lookup_dpo,
1405 .dv_mem_show = lookup_dpo_mem_show,
1408 const static char* const lookup_src_ip4_nodes[] =
1413 const static char* const lookup_src_ip6_nodes[] =
1418 const static char* const * const lookup_src_nodes[DPO_PROTO_NUM] =
1420 [DPO_PROTO_IP4] = lookup_src_ip4_nodes,
1421 [DPO_PROTO_IP6] = lookup_src_ip6_nodes,
1422 [DPO_PROTO_MPLS] = NULL,
1425 const static char* const lookup_dst_ip4_nodes[] =
1430 const static char* const lookup_dst_ip6_nodes[] =
1435 const static char* const lookup_dst_mpls_nodes[] =
1440 const static char* const * const lookup_dst_nodes[DPO_PROTO_NUM] =
1442 [DPO_PROTO_IP4] = lookup_dst_ip4_nodes,
1443 [DPO_PROTO_IP6] = lookup_dst_ip6_nodes,
1444 [DPO_PROTO_MPLS] = lookup_dst_mpls_nodes,
1447 const static char* const lookup_dst_mcast_ip4_nodes[] =
1449 "lookup-ip4-dst-mcast",
1452 const static char* const lookup_dst_mcast_ip6_nodes[] =
1454 "lookup-ip6-dst-mcast",
1457 const static char* const * const lookup_dst_mcast_nodes[DPO_PROTO_NUM] =
1459 [DPO_PROTO_IP4] = lookup_dst_mcast_ip4_nodes,
1460 [DPO_PROTO_IP6] = lookup_dst_mcast_ip6_nodes,
1463 const static char* const lookup_dst_from_interface_ip4_nodes[] =
1465 "lookup-ip4-dst-itf",
1468 const static char* const lookup_dst_from_interface_ip6_nodes[] =
1470 "lookup-ip6-dst-itf",
1473 const static char* const lookup_dst_from_interface_mpls_nodes[] =
1475 "lookup-mpls-dst-itf",
1478 const static char* const * const lookup_dst_from_interface_nodes[DPO_PROTO_NUM] =
1480 [DPO_PROTO_IP4] = lookup_dst_from_interface_ip4_nodes,
1481 [DPO_PROTO_IP6] = lookup_dst_from_interface_ip6_nodes,
1482 [DPO_PROTO_MPLS] = lookup_dst_from_interface_mpls_nodes,
1485 static clib_error_t *
1486 lookup_dpo_show (vlib_main_t * vm,
1487 unformat_input_t * input,
1488 vlib_cli_command_t * cmd)
1490 index_t lkdi = INDEX_INVALID;
1492 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1494 if (unformat (input, "%d", &lkdi))
1500 if (INDEX_INVALID != lkdi)
1502 vlib_cli_output (vm, "%U", format_lookup_dpo, lkdi);
1508 pool_foreach(lkd, lookup_dpo_pool,
1510 vlib_cli_output (vm, "[@%d] %U",
1511 lookup_dpo_get_index(lkd),
1513 lookup_dpo_get_index(lkd));
1520 VLIB_CLI_COMMAND (replicate_show_command, static) = {
1521 .path = "show lookup-dpo",
1522 .short_help = "show lookup-dpo [<index>]",
1523 .function = lookup_dpo_show,
1527 lookup_dpo_module_init (void)
1529 dpo_register(DPO_LOOKUP, &lkd_vft_w_mem_show, NULL);
1532 * There are various sorts of lookup; src or dst addr v4 /v6 etc.
1533 * there isn't an object type for each (there is only the lookup_dpo_t),
1534 * but, for performance reasons, there is a data plane function, and hence
1535 * VLIB node for each. VLIB graph node construction is based on DPO types
1536 * so we create sub-types.
1538 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC] =
1539 dpo_register_new_type(&lkd_vft, lookup_src_nodes);
1540 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST] =
1541 dpo_register_new_type(&lkd_vft, lookup_dst_nodes);
1542 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST] =
1543 dpo_register_new_type(&lkd_vft, lookup_dst_mcast_nodes);
1544 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE] =
1545 dpo_register_new_type(&lkd_vft, lookup_dst_from_interface_nodes);