2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ip/ip.h>
17 #include <vnet/dpo/lookup_dpo.h>
18 #include <vnet/dpo/load_balance_map.h>
19 #include <vnet/mpls/mpls_lookup.h>
20 #include <vnet/fib/fib_table.h>
21 #include <vnet/fib/ip4_fib.h>
22 #include <vnet/fib/ip6_fib.h>
23 #include <vnet/fib/mpls_fib.h>
24 #include <vnet/mfib/mfib_table.h>
25 #include <vnet/mfib/ip4_mfib.h>
26 #include <vnet/mfib/ip6_mfib.h>
28 static const char *const lookup_input_names[] = LOOKUP_INPUTS;
29 static const char *const lookup_cast_names[] = LOOKUP_CASTS;
32 * If a packet encounters a lookup DPO more than the many times
33 * then we assume there is a loop in the forward graph and drop the packet
35 #define MAX_LUKPS_PER_PACKET 4
38 * @brief Enumeration of the lookup subtypes
40 typedef enum lookup_sub_type_t_
44 LOOKUP_SUB_TYPE_DST_MCAST,
45 LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE,
47 #define LOOKUP_SUB_TYPE_NUM (LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE+1)
49 #define FOR_EACH_LOOKUP_SUB_TYPE(_st) \
50 for (_st = LOOKUP_SUB_TYPE_IP4_SRC; _st < LOOKUP_SUB_TYPE_NUM; _st++)
53 * @brief pool of all MPLS Label DPOs
55 lookup_dpo_t *lookup_dpo_pool;
58 * @brief An array of registered DPO type values for the sub-types
60 static dpo_type_t lookup_dpo_sub_types[LOOKUP_SUB_TYPE_NUM];
63 lookup_dpo_alloc (void)
67 pool_get_aligned(lookup_dpo_pool, lkd, CLIB_CACHE_LINE_BYTES);
73 lookup_dpo_get_index (lookup_dpo_t *lkd)
75 return (lkd - lookup_dpo_pool);
79 lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
83 lookup_table_t table_config,
89 lkd = lookup_dpo_alloc();
90 lkd->lkd_fib_index = fib_index;
91 lkd->lkd_proto = proto;
92 lkd->lkd_input = input;
93 lkd->lkd_table = table_config;
97 * use the input type to select the lookup sub-type
103 case LOOKUP_INPUT_SRC_ADDR:
104 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC];
106 case LOOKUP_INPUT_DST_ADDR:
107 switch (table_config)
109 case LOOKUP_TABLE_FROM_INPUT_INTERFACE:
110 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE];
112 case LOOKUP_TABLE_FROM_CONFIG:
113 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST];
116 if (LOOKUP_MULTICAST == cast)
118 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST];
128 dpo_set(dpo, type, proto, lookup_dpo_get_index(lkd));
133 lookup_dpo_add_or_lock_w_fib_index (fib_node_index_t fib_index,
136 lookup_input_t input,
137 lookup_table_t table_config,
140 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
142 if (LOOKUP_UNICAST == cast)
144 fib_table_lock(fib_index,
145 dpo_proto_to_fib(proto),
150 mfib_table_lock(fib_index,
151 dpo_proto_to_fib(proto),
155 lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
159 lookup_dpo_add_or_lock_w_table_id (u32 table_id,
162 lookup_input_t input,
163 lookup_table_t table_config,
166 fib_node_index_t fib_index = FIB_NODE_INDEX_INVALID;
168 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
170 if (LOOKUP_UNICAST == cast)
173 fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
180 mfib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
186 ASSERT(FIB_NODE_INDEX_INVALID != fib_index);
187 lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
191 format_lookup_dpo (u8 *s, va_list *args)
193 index_t index = va_arg (*args, index_t);
196 lkd = lookup_dpo_get(index);
198 if (LOOKUP_TABLE_FROM_INPUT_INTERFACE == lkd->lkd_table)
200 s = format(s, "%s,%s lookup in interface's %U table",
201 lookup_input_names[lkd->lkd_input],
202 lookup_cast_names[lkd->lkd_cast],
203 format_dpo_proto, lkd->lkd_proto);
207 if (LOOKUP_UNICAST == lkd->lkd_cast)
209 s = format(s, "%s,%s lookup in %U",
210 lookup_input_names[lkd->lkd_input],
211 lookup_cast_names[lkd->lkd_cast],
212 format_fib_table_name, lkd->lkd_fib_index,
213 dpo_proto_to_fib(lkd->lkd_proto));
217 s = format(s, "%s,%s lookup in %U",
218 lookup_input_names[lkd->lkd_input],
219 lookup_cast_names[lkd->lkd_cast],
220 format_mfib_table_name, lkd->lkd_fib_index,
221 dpo_proto_to_fib(lkd->lkd_proto));
228 lookup_dpo_lock (dpo_id_t *dpo)
232 lkd = lookup_dpo_get(dpo->dpoi_index);
238 lookup_dpo_unlock (dpo_id_t *dpo)
242 lkd = lookup_dpo_get(dpo->dpoi_index);
246 if (0 == lkd->lkd_locks)
248 if (LOOKUP_TABLE_FROM_CONFIG == lkd->lkd_table)
250 if (LOOKUP_UNICAST == lkd->lkd_cast)
252 fib_table_unlock(lkd->lkd_fib_index,
253 dpo_proto_to_fib(lkd->lkd_proto),
258 mfib_table_unlock(lkd->lkd_fib_index,
259 dpo_proto_to_fib(lkd->lkd_proto),
263 pool_put(lookup_dpo_pool, lkd);
268 ip4_src_fib_lookup_one (u32 src_fib_index0,
269 const ip4_address_t * addr0,
270 u32 * src_adj_index0)
272 ip4_fib_mtrie_leaf_t leaf0;
273 ip4_fib_mtrie_t * mtrie0;
275 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
277 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0);
278 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
279 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
281 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
285 ip4_src_fib_lookup_two (u32 src_fib_index0,
287 const ip4_address_t * addr0,
288 const ip4_address_t * addr1,
289 u32 * src_adj_index0,
290 u32 * src_adj_index1)
292 ip4_fib_mtrie_leaf_t leaf0, leaf1;
293 ip4_fib_mtrie_t * mtrie0, * mtrie1;
295 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
296 mtrie1 = &ip4_fib_get (src_fib_index1)->mtrie;
298 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0);
299 leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, addr1);
301 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
302 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 2);
304 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
305 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 3);
307 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
308 src_adj_index1[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
312 * @brief Lookup trace data
314 typedef struct lookup_trace_t_
318 mpls_unicast_header_t hdr;
320 fib_node_index_t fib_index;
326 lookup_dpo_ip4_inline (vlib_main_t * vm,
327 vlib_node_runtime_t * node,
328 vlib_frame_t * from_frame,
330 int table_from_interface)
332 u32 n_left_from, next_index, * from, * to_next;
333 u32 thread_index = vlib_get_thread_index();
334 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
336 from = vlib_frame_vector_args (from_frame);
337 n_left_from = from_frame->n_vectors;
339 next_index = node->cached_next_index;
341 while (n_left_from > 0)
345 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
347 while (n_left_from >= 4 && n_left_to_next > 2)
349 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
350 flow_hash_config_t flow_hash_config0;
351 const ip4_address_t *input_addr0;
352 const load_balance_t *lb0;
353 const lookup_dpo_t * lkd0;
354 const ip4_header_t * ip0;
355 const dpo_id_t *dpo0;
357 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
358 flow_hash_config_t flow_hash_config1;
359 const ip4_address_t *input_addr1;
360 const load_balance_t *lb1;
361 const lookup_dpo_t * lkd1;
362 const ip4_header_t * ip1;
363 const dpo_id_t *dpo1;
366 /* Prefetch next iteration. */
368 vlib_buffer_t * p2, * p3;
370 p2 = vlib_get_buffer (vm, from[2]);
371 p3 = vlib_get_buffer (vm, from[3]);
373 vlib_prefetch_buffer_header (p2, LOAD);
374 vlib_prefetch_buffer_header (p3, LOAD);
376 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
377 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
389 b0 = vlib_get_buffer (vm, bi0);
390 ip0 = vlib_buffer_get_current (b0);
391 b1 = vlib_get_buffer (vm, bi1);
392 ip1 = vlib_buffer_get_current (b1);
394 /* dst lookup was done by ip4 lookup */
395 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
396 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
397 lkd0 = lookup_dpo_get(lkdi0);
398 lkd1 = lookup_dpo_get(lkdi1);
401 * choose between a lookup using the fib index in the DPO
402 * or getting the FIB index from the interface.
404 if (table_from_interface)
407 ip4_fib_table_get_index_for_sw_if_index(
408 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
410 ip4_fib_table_get_index_for_sw_if_index(
411 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
415 fib_index0 = lkd0->lkd_fib_index;
416 fib_index1 = lkd1->lkd_fib_index;
420 * choose between a source or destination address lookup in the table
424 input_addr0 = &ip0->src_address;
425 input_addr1 = &ip1->src_address;
429 input_addr0 = &ip0->dst_address;
430 input_addr1 = &ip1->dst_address;
434 ip4_src_fib_lookup_two (fib_index0, fib_index1,
435 input_addr0, input_addr1,
437 lb0 = load_balance_get(lbi0);
438 lb1 = load_balance_get(lbi1);
440 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
441 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
443 /* Use flow hash to compute multipath adjacency. */
444 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
445 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
447 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
449 flow_hash_config0 = lb0->lb_hash_config;
450 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
451 ip4_compute_flow_hash (ip0, flow_hash_config0);
454 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
456 flow_hash_config1 = lb1->lb_hash_config;
457 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
458 ip4_compute_flow_hash (ip1, flow_hash_config1);
461 dpo0 = load_balance_get_bucket_i(lb0,
463 (lb0->lb_n_buckets_minus_1)));
464 dpo1 = load_balance_get_bucket_i(lb1,
466 (lb1->lb_n_buckets_minus_1)));
468 next0 = dpo0->dpoi_next_node;
469 next1 = dpo1->dpoi_next_node;
470 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
471 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
473 vlib_increment_combined_counter
474 (cm, thread_index, lbi0, 1,
475 vlib_buffer_length_in_chain (vm, b0));
476 vlib_increment_combined_counter
477 (cm, thread_index, lbi1, 1,
478 vlib_buffer_length_in_chain (vm, b1));
480 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
481 vnet_buffer2(b0)->loop_counter = 0;
482 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
484 if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
485 vnet_buffer2(b1)->loop_counter = 0;
486 b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
489 vnet_buffer2(b0)->loop_counter++;
490 vnet_buffer2(b1)->loop_counter++;
492 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
493 next0 = IP_LOOKUP_NEXT_DROP;
494 if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
495 next1 = IP_LOOKUP_NEXT_DROP;
497 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
499 lookup_trace_t *tr = vlib_add_trace (vm, node,
501 tr->fib_index = fib_index0;
503 tr->addr.ip4 = *input_addr0;
505 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
507 lookup_trace_t *tr = vlib_add_trace (vm, node,
509 tr->fib_index = fib_index1;
511 tr->addr.ip4 = *input_addr1;
514 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
515 to_next, n_left_to_next,
516 bi0, bi1, next0, next1);
519 while (n_left_from > 0 && n_left_to_next > 0)
521 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
522 flow_hash_config_t flow_hash_config0;
523 const ip4_address_t *input_addr;
524 const load_balance_t *lb0;
525 const lookup_dpo_t * lkd0;
526 const ip4_header_t * ip0;
527 const dpo_id_t *dpo0;
537 b0 = vlib_get_buffer (vm, bi0);
538 ip0 = vlib_buffer_get_current (b0);
540 /* dst lookup was done by ip4 lookup */
541 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
542 lkd0 = lookup_dpo_get(lkdi0);
545 * choose between a lookup using the fib index in the DPO
546 * or getting the FIB index from the interface.
548 if (table_from_interface)
551 ip4_fib_table_get_index_for_sw_if_index(
552 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
556 fib_index0 = lkd0->lkd_fib_index;
560 * choose between a source or destination address lookup in the table
564 input_addr = &ip0->src_address;
568 input_addr = &ip0->dst_address;
572 ip4_src_fib_lookup_one (fib_index0, input_addr, &lbi0);
573 lb0 = load_balance_get(lbi0);
575 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
577 /* Use flow hash to compute multipath adjacency. */
578 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
580 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
582 flow_hash_config0 = lb0->lb_hash_config;
583 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
584 ip4_compute_flow_hash (ip0, flow_hash_config0);
587 dpo0 = load_balance_get_bucket_i(lb0,
589 (lb0->lb_n_buckets_minus_1)));
591 next0 = dpo0->dpoi_next_node;
592 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
594 vlib_increment_combined_counter
595 (cm, thread_index, lbi0, 1,
596 vlib_buffer_length_in_chain (vm, b0));
598 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
599 vnet_buffer2(b0)->loop_counter = 0;
600 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
603 vnet_buffer2(b0)->loop_counter++;
605 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
606 next0 = IP_LOOKUP_NEXT_DROP;
608 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
610 lookup_trace_t *tr = vlib_add_trace (vm, node,
612 tr->fib_index = fib_index0;
614 tr->addr.ip4 = *input_addr;
617 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
618 n_left_to_next, bi0, next0);
620 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
622 return from_frame->n_vectors;
626 format_lookup_trace (u8 * s, va_list * args)
628 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
629 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
630 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
631 u32 indent = format_get_indent (s);
632 s = format (s, "%U fib-index:%d addr:%U load-balance:%d",
633 format_white_space, indent,
635 format_ip46_address, &t->addr, IP46_TYPE_ANY,
640 VLIB_NODE_FN (lookup_ip4_dst_node) (vlib_main_t * vm,
641 vlib_node_runtime_t * node,
642 vlib_frame_t * from_frame)
644 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 0));
647 VLIB_REGISTER_NODE (lookup_ip4_dst_node) = {
648 .name = "lookup-ip4-dst",
649 .vector_size = sizeof (u32),
650 .sibling_of = "ip4-lookup",
651 .format_trace = format_lookup_trace,
654 VLIB_NODE_FN (lookup_ip4_dst_itf_node) (vlib_main_t * vm,
655 vlib_node_runtime_t * node,
656 vlib_frame_t * from_frame)
658 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 1));
661 VLIB_REGISTER_NODE (lookup_ip4_dst_itf_node) = {
662 .name = "lookup-ip4-dst-itf",
663 .vector_size = sizeof (u32),
664 .sibling_of = "ip4-lookup",
665 .format_trace = format_lookup_trace,
668 VLIB_NODE_FN (lookup_ip4_src_node) (vlib_main_t * vm,
669 vlib_node_runtime_t * node,
670 vlib_frame_t * from_frame)
672 return (lookup_dpo_ip4_inline(vm, node, from_frame, 1, 0));
675 VLIB_REGISTER_NODE (lookup_ip4_src_node) = {
676 .name = "lookup-ip4-src",
677 .vector_size = sizeof (u32),
678 .format_trace = format_lookup_trace,
679 .sibling_of = "ip4-lookup",
683 lookup_dpo_ip6_inline (vlib_main_t * vm,
684 vlib_node_runtime_t * node,
685 vlib_frame_t * from_frame,
687 int table_from_interface)
689 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
690 u32 n_left_from, next_index, * from, * to_next;
691 u32 thread_index = vlib_get_thread_index();
693 from = vlib_frame_vector_args (from_frame);
694 n_left_from = from_frame->n_vectors;
696 next_index = node->cached_next_index;
698 while (n_left_from > 0)
702 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
704 while (n_left_from >= 4 && n_left_to_next > 2)
706 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
707 flow_hash_config_t flow_hash_config0;
708 const ip6_address_t *input_addr0;
709 const load_balance_t *lb0;
710 const lookup_dpo_t * lkd0;
711 const ip6_header_t * ip0;
712 const dpo_id_t *dpo0;
714 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
715 flow_hash_config_t flow_hash_config1;
716 const ip6_address_t *input_addr1;
717 const load_balance_t *lb1;
718 const lookup_dpo_t * lkd1;
719 const ip6_header_t * ip1;
720 const dpo_id_t *dpo1;
723 /* Prefetch next iteration. */
725 vlib_buffer_t * p2, * p3;
727 p2 = vlib_get_buffer (vm, from[2]);
728 p3 = vlib_get_buffer (vm, from[3]);
730 vlib_prefetch_buffer_header (p2, LOAD);
731 vlib_prefetch_buffer_header (p3, LOAD);
733 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
734 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
746 b0 = vlib_get_buffer (vm, bi0);
747 ip0 = vlib_buffer_get_current (b0);
748 b1 = vlib_get_buffer (vm, bi1);
749 ip1 = vlib_buffer_get_current (b1);
751 /* dst lookup was done by ip6 lookup */
752 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
753 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
754 lkd0 = lookup_dpo_get(lkdi0);
755 lkd1 = lookup_dpo_get(lkdi1);
758 * choose between a lookup using the fib index in the DPO
759 * or getting the FIB index from the interface.
761 if (table_from_interface)
764 ip6_fib_table_get_index_for_sw_if_index(
765 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
767 ip6_fib_table_get_index_for_sw_if_index(
768 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
772 fib_index0 = lkd0->lkd_fib_index;
773 fib_index1 = lkd1->lkd_fib_index;
777 * choose between a source or destination address lookup in the table
781 input_addr0 = &ip0->src_address;
782 input_addr1 = &ip1->src_address;
786 input_addr0 = &ip0->dst_address;
787 input_addr1 = &ip1->dst_address;
791 lbi0 = ip6_fib_table_fwding_lookup(
794 lbi1 = ip6_fib_table_fwding_lookup(
797 lb0 = load_balance_get(lbi0);
798 lb1 = load_balance_get(lbi1);
800 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
801 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
803 /* Use flow hash to compute multipath adjacency. */
804 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
805 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
807 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
808 vnet_buffer2(b0)->loop_counter = 0;
809 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
811 if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
812 vnet_buffer2(b1)->loop_counter = 0;
813 b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
816 vnet_buffer2(b0)->loop_counter++;
817 vnet_buffer2(b1)->loop_counter++;
819 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
820 next0 = IP_LOOKUP_NEXT_DROP;
821 if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
822 next1 = IP_LOOKUP_NEXT_DROP;
824 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
826 flow_hash_config0 = lb0->lb_hash_config;
827 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
828 ip6_compute_flow_hash (ip0, flow_hash_config0);
831 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
833 flow_hash_config1 = lb1->lb_hash_config;
834 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
835 ip6_compute_flow_hash (ip1, flow_hash_config1);
838 dpo0 = load_balance_get_bucket_i(lb0,
840 (lb0->lb_n_buckets_minus_1)));
841 dpo1 = load_balance_get_bucket_i(lb1,
843 (lb1->lb_n_buckets_minus_1)));
845 next0 = dpo0->dpoi_next_node;
846 next1 = dpo1->dpoi_next_node;
847 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
848 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
850 vlib_increment_combined_counter
851 (cm, thread_index, lbi0, 1,
852 vlib_buffer_length_in_chain (vm, b0));
853 vlib_increment_combined_counter
854 (cm, thread_index, lbi1, 1,
855 vlib_buffer_length_in_chain (vm, b1));
857 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
859 lookup_trace_t *tr = vlib_add_trace (vm, node,
861 tr->fib_index = fib_index0;
863 tr->addr.ip6 = *input_addr0;
865 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
867 lookup_trace_t *tr = vlib_add_trace (vm, node,
869 tr->fib_index = fib_index1;
871 tr->addr.ip6 = *input_addr1;
873 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
874 n_left_to_next, bi0, bi1,
877 while (n_left_from > 0 && n_left_to_next > 0)
879 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
880 flow_hash_config_t flow_hash_config0;
881 const ip6_address_t *input_addr0;
882 const load_balance_t *lb0;
883 const lookup_dpo_t * lkd0;
884 const ip6_header_t * ip0;
885 const dpo_id_t *dpo0;
895 b0 = vlib_get_buffer (vm, bi0);
896 ip0 = vlib_buffer_get_current (b0);
898 /* dst lookup was done by ip6 lookup */
899 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
900 lkd0 = lookup_dpo_get(lkdi0);
903 * choose between a lookup using the fib index in the DPO
904 * or getting the FIB index from the interface.
906 if (table_from_interface)
909 ip6_fib_table_get_index_for_sw_if_index(
910 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
914 fib_index0 = lkd0->lkd_fib_index;
918 * choose between a source or destination address lookup in the table
922 input_addr0 = &ip0->src_address;
926 input_addr0 = &ip0->dst_address;
930 lbi0 = ip6_fib_table_fwding_lookup(
933 lb0 = load_balance_get(lbi0);
935 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
937 /* Use flow hash to compute multipath adjacency. */
938 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
940 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
942 flow_hash_config0 = lb0->lb_hash_config;
943 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
944 ip6_compute_flow_hash (ip0, flow_hash_config0);
947 dpo0 = load_balance_get_bucket_i(lb0,
949 (lb0->lb_n_buckets_minus_1)));
951 next0 = dpo0->dpoi_next_node;
952 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
954 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
955 vnet_buffer2(b0)->loop_counter = 0;
956 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
959 vnet_buffer2(b0)->loop_counter++;
961 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
962 next0 = IP_LOOKUP_NEXT_DROP;
964 vlib_increment_combined_counter
965 (cm, thread_index, lbi0, 1,
966 vlib_buffer_length_in_chain (vm, b0));
968 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
970 lookup_trace_t *tr = vlib_add_trace (vm, node,
972 tr->fib_index = fib_index0;
974 tr->addr.ip6 = *input_addr0;
976 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
977 n_left_to_next, bi0, next0);
979 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
981 return from_frame->n_vectors;
984 VLIB_NODE_FN (lookup_ip6_dst_node) (vlib_main_t * vm,
985 vlib_node_runtime_t * node,
986 vlib_frame_t * from_frame)
988 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 0));
991 VLIB_REGISTER_NODE (lookup_ip6_dst_node) = {
992 .name = "lookup-ip6-dst",
993 .vector_size = sizeof (u32),
994 .format_trace = format_lookup_trace,
995 .sibling_of = "ip6-lookup",
998 VLIB_NODE_FN (lookup_ip6_dst_itf_node) (vlib_main_t * vm,
999 vlib_node_runtime_t * node,
1000 vlib_frame_t * from_frame)
1002 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 1));
1005 VLIB_REGISTER_NODE (lookup_ip6_dst_itf_node) = {
1006 .name = "lookup-ip6-dst-itf",
1007 .vector_size = sizeof (u32),
1008 .format_trace = format_lookup_trace,
1009 .sibling_of = "ip6-lookup",
1012 VLIB_NODE_FN (lookup_ip6_src_node) (vlib_main_t * vm,
1013 vlib_node_runtime_t * node,
1014 vlib_frame_t * from_frame)
1016 return (lookup_dpo_ip6_inline(vm, node, from_frame, 1, 0));
1019 VLIB_REGISTER_NODE (lookup_ip6_src_node) = {
1020 .name = "lookup-ip6-src",
1021 .vector_size = sizeof (u32),
1022 .format_trace = format_lookup_trace,
1023 .sibling_of = "ip6-lookup",
1027 lookup_dpo_mpls_inline (vlib_main_t * vm,
1028 vlib_node_runtime_t * node,
1029 vlib_frame_t * from_frame,
1030 int table_from_interface)
1032 u32 n_left_from, next_index, * from, * to_next;
1033 u32 thread_index = vlib_get_thread_index();
1034 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
1036 from = vlib_frame_vector_args (from_frame);
1037 n_left_from = from_frame->n_vectors;
1039 next_index = node->cached_next_index;
1041 while (n_left_from > 0)
1045 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
1047 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
1050 while (n_left_from > 0 && n_left_to_next > 0)
1052 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash0;
1053 const mpls_unicast_header_t * hdr0;
1054 const load_balance_t *lb0;
1055 const lookup_dpo_t * lkd0;
1056 const dpo_id_t *dpo0;
1064 n_left_to_next -= 1;
1066 b0 = vlib_get_buffer (vm, bi0);
1067 hdr0 = vlib_buffer_get_current (b0);
1069 /* dst lookup was done by mpls lookup */
1070 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
1071 lkd0 = lookup_dpo_get(lkdi0);
1074 * choose between a lookup using the fib index in the DPO
1075 * or getting the FIB index from the interface.
1077 if (table_from_interface)
1080 mpls_fib_table_get_index_for_sw_if_index(
1081 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
1085 fib_index0 = lkd0->lkd_fib_index;
1089 lbi0 = mpls_fib_table_forwarding_lookup (fib_index0, hdr0);
1090 lb0 = load_balance_get(lbi0);
1091 dpo0 = load_balance_get_bucket_i(lb0, 0);
1093 next0 = dpo0->dpoi_next_node;
1094 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1097 if (MPLS_IS_REPLICATE & lbi0)
1099 next0 = mpls_lookup_to_replicate_edge;
1100 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
1101 (lbi0 & ~MPLS_IS_REPLICATE);
1105 lb0 = load_balance_get(lbi0);
1106 ASSERT (lb0->lb_n_buckets > 0);
1107 ASSERT (is_pow2 (lb0->lb_n_buckets));
1109 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
1111 hash0 = vnet_buffer (b0)->ip.flow_hash =
1112 mpls_compute_flow_hash(hdr0, lb0->lb_hash_config);
1113 dpo0 = load_balance_get_fwd_bucket
1115 (hash0 & (lb0->lb_n_buckets_minus_1)));
1119 dpo0 = load_balance_get_bucket_i (lb0, 0);
1121 next0 = dpo0->dpoi_next_node;
1123 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1125 vlib_increment_combined_counter
1126 (cm, thread_index, lbi0, 1,
1127 vlib_buffer_length_in_chain (vm, b0));
1130 vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3];
1131 vnet_buffer (b0)->mpls.exp = (((char*)hdr0)[2] & 0xe) >> 1;
1132 vnet_buffer (b0)->mpls.first = 1;
1133 vlib_buffer_advance(b0, sizeof(*hdr0));
1135 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
1136 vnet_buffer2(b0)->loop_counter = 0;
1137 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
1140 vnet_buffer2(b0)->loop_counter++;
1142 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
1143 next0 = MPLS_LOOKUP_NEXT_DROP;
1145 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1147 lookup_trace_t *tr = vlib_add_trace (vm, node,
1149 tr->fib_index = fib_index0;
1154 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1155 n_left_to_next, bi0, next0);
1157 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1159 return from_frame->n_vectors;
1163 format_lookup_mpls_trace (u8 * s, va_list * args)
1165 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1166 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1167 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
1168 u32 indent = format_get_indent (s);
1169 mpls_unicast_header_t hdr;
1171 hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
1173 s = format (s, "%U fib-index:%d hdr:%U load-balance:%d",
1174 format_white_space, indent,
1176 format_mpls_header, hdr,
1181 VLIB_NODE_FN (lookup_mpls_dst_node) (vlib_main_t * vm,
1182 vlib_node_runtime_t * node,
1183 vlib_frame_t * from_frame)
1185 return (lookup_dpo_mpls_inline(vm, node, from_frame, 0));
1188 VLIB_REGISTER_NODE (lookup_mpls_dst_node) = {
1189 .name = "lookup-mpls-dst",
1190 .vector_size = sizeof (u32),
1191 .sibling_of = "mpls-lookup",
1192 .format_trace = format_lookup_mpls_trace,
1196 VLIB_NODE_FN (lookup_mpls_dst_itf_node) (vlib_main_t * vm,
1197 vlib_node_runtime_t * node,
1198 vlib_frame_t * from_frame)
1200 return (lookup_dpo_mpls_inline(vm, node, from_frame, 1));
1203 VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = {
1204 .name = "lookup-mpls-dst-itf",
1205 .vector_size = sizeof (u32),
1206 .sibling_of = "mpls-lookup",
1207 .format_trace = format_lookup_mpls_trace,
1211 typedef enum lookup_ip_dst_mcast_next_t_ {
1212 LOOKUP_IP_DST_MCAST_NEXT_DROP,
1213 LOOKUP_IP_DST_MCAST_NEXT_RPF,
1214 LOOKUP_IP_DST_MCAST_N_NEXT,
1215 } mfib_forward_lookup_next_t;
1218 lookup_dpo_ip_dst_mcast_inline (vlib_main_t * vm,
1219 vlib_node_runtime_t * node,
1220 vlib_frame_t * from_frame,
1223 u32 n_left_from, next_index, * from, * to_next;
1225 from = vlib_frame_vector_args (from_frame);
1226 n_left_from = from_frame->n_vectors;
1228 next_index = LOOKUP_IP_DST_MCAST_NEXT_RPF;
1230 while (n_left_from > 0)
1234 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
1236 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
1239 while (n_left_from > 0 && n_left_to_next > 0)
1241 u32 bi0, lkdi0, fib_index0, next0;
1242 const lookup_dpo_t * lkd0;
1243 fib_node_index_t mfei0;
1251 n_left_to_next -= 1;
1253 b0 = vlib_get_buffer (vm, bi0);
1255 /* dst lookup was done by mpls lookup */
1256 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
1257 lkd0 = lookup_dpo_get(lkdi0);
1258 fib_index0 = lkd0->lkd_fib_index;
1259 next0 = LOOKUP_IP_DST_MCAST_NEXT_RPF;
1265 ip0 = vlib_buffer_get_current (b0);
1266 mfei0 = ip4_mfib_table_lookup(ip4_mfib_get(fib_index0),
1270 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1272 lookup_trace_t *tr = vlib_add_trace (vm, node,
1274 tr->fib_index = fib_index0;
1276 tr->addr.ip4 = ip0->dst_address;
1283 ip0 = vlib_buffer_get_current (b0);
1284 mfei0 = ip6_mfib_table_fwd_lookup(ip6_mfib_get(fib_index0),
1287 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1289 lookup_trace_t *tr = vlib_add_trace (vm, node,
1291 tr->fib_index = fib_index0;
1293 tr->addr.ip6 = ip0->dst_address;
1297 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = mfei0;
1299 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
1300 vnet_buffer2(b0)->loop_counter = 0;
1301 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
1304 vnet_buffer2(b0)->loop_counter++;
1306 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
1307 next0 = LOOKUP_IP_DST_MCAST_NEXT_DROP;
1309 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1310 n_left_to_next, bi0, next0);
1312 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1314 return from_frame->n_vectors;
1317 VLIB_NODE_FN (lookup_ip4_dst_mcast_node) (vlib_main_t * vm,
1318 vlib_node_runtime_t * node,
1319 vlib_frame_t * from_frame)
1321 return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 1));
1324 VLIB_REGISTER_NODE (lookup_ip4_dst_mcast_node) = {
1325 .name = "lookup-ip4-dst-mcast",
1326 .vector_size = sizeof (u32),
1328 .format_trace = format_lookup_trace,
1329 .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
1331 [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip4-drop",
1332 [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip4-mfib-forward-rpf",
1336 VLIB_NODE_FN (lookup_ip6_dst_mcast_node) (vlib_main_t * vm,
1337 vlib_node_runtime_t * node,
1338 vlib_frame_t * from_frame)
1340 return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 0));
1343 VLIB_REGISTER_NODE (lookup_ip6_dst_mcast_node) = {
1344 .name = "lookup-ip6-dst-mcast",
1345 .vector_size = sizeof (u32),
1347 .format_trace = format_lookup_trace,
1348 .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
1350 [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip6-drop",
1351 [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip6-mfib-forward-rpf",
1356 lookup_dpo_mem_show (void)
1358 fib_show_memory_usage("Lookup",
1359 pool_elts(lookup_dpo_pool),
1360 pool_len(lookup_dpo_pool),
1361 sizeof(lookup_dpo_t));
1364 const static dpo_vft_t lkd_vft = {
1365 .dv_lock = lookup_dpo_lock,
1366 .dv_unlock = lookup_dpo_unlock,
1367 .dv_format = format_lookup_dpo,
1369 const static dpo_vft_t lkd_vft_w_mem_show = {
1370 .dv_lock = lookup_dpo_lock,
1371 .dv_unlock = lookup_dpo_unlock,
1372 .dv_format = format_lookup_dpo,
1373 .dv_mem_show = lookup_dpo_mem_show,
1376 const static char* const lookup_src_ip4_nodes[] =
1381 const static char* const lookup_src_ip6_nodes[] =
1386 const static char* const * const lookup_src_nodes[DPO_PROTO_NUM] =
1388 [DPO_PROTO_IP4] = lookup_src_ip4_nodes,
1389 [DPO_PROTO_IP6] = lookup_src_ip6_nodes,
1390 [DPO_PROTO_MPLS] = NULL,
1393 const static char* const lookup_dst_ip4_nodes[] =
1398 const static char* const lookup_dst_ip6_nodes[] =
1403 const static char* const lookup_dst_mpls_nodes[] =
1408 const static char* const * const lookup_dst_nodes[DPO_PROTO_NUM] =
1410 [DPO_PROTO_IP4] = lookup_dst_ip4_nodes,
1411 [DPO_PROTO_IP6] = lookup_dst_ip6_nodes,
1412 [DPO_PROTO_MPLS] = lookup_dst_mpls_nodes,
1415 const static char* const lookup_dst_mcast_ip4_nodes[] =
1417 "lookup-ip4-dst-mcast",
1420 const static char* const lookup_dst_mcast_ip6_nodes[] =
1422 "lookup-ip6-dst-mcast",
1425 const static char* const * const lookup_dst_mcast_nodes[DPO_PROTO_NUM] =
1427 [DPO_PROTO_IP4] = lookup_dst_mcast_ip4_nodes,
1428 [DPO_PROTO_IP6] = lookup_dst_mcast_ip6_nodes,
1431 const static char* const lookup_dst_from_interface_ip4_nodes[] =
1433 "lookup-ip4-dst-itf",
1436 const static char* const lookup_dst_from_interface_ip6_nodes[] =
1438 "lookup-ip6-dst-itf",
1441 const static char* const lookup_dst_from_interface_mpls_nodes[] =
1443 "lookup-mpls-dst-itf",
1446 const static char* const * const lookup_dst_from_interface_nodes[DPO_PROTO_NUM] =
1448 [DPO_PROTO_IP4] = lookup_dst_from_interface_ip4_nodes,
1449 [DPO_PROTO_IP6] = lookup_dst_from_interface_ip6_nodes,
1450 [DPO_PROTO_MPLS] = lookup_dst_from_interface_mpls_nodes,
1453 static clib_error_t *
1454 lookup_dpo_show (vlib_main_t * vm,
1455 unformat_input_t * input,
1456 vlib_cli_command_t * cmd)
1458 index_t lkdi = INDEX_INVALID;
1460 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1462 if (unformat (input, "%d", &lkdi))
1468 if (INDEX_INVALID != lkdi)
1470 vlib_cli_output (vm, "%U", format_lookup_dpo, lkdi);
1476 pool_foreach(lkd, lookup_dpo_pool,
1478 vlib_cli_output (vm, "[@%d] %U",
1479 lookup_dpo_get_index(lkd),
1481 lookup_dpo_get_index(lkd));
1488 VLIB_CLI_COMMAND (replicate_show_command, static) = {
1489 .path = "show lookup-dpo",
1490 .short_help = "show lookup-dpo [<index>]",
1491 .function = lookup_dpo_show,
1495 lookup_dpo_module_init (void)
1497 dpo_register(DPO_LOOKUP, &lkd_vft_w_mem_show, NULL);
1500 * There are various sorts of lookup; src or dst addr v4 /v6 etc.
1501 * there isn't an object type for each (there is only the lookup_dpo_t),
1502 * but, for performance reasons, there is a data plane function, and hence
1503 * VLIB node for each. VLIB graph node construction is based on DPO types
1504 * so we create sub-types.
1506 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC] =
1507 dpo_register_new_type(&lkd_vft, lookup_src_nodes);
1508 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST] =
1509 dpo_register_new_type(&lkd_vft, lookup_dst_nodes);
1510 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST] =
1511 dpo_register_new_type(&lkd_vft, lookup_dst_mcast_nodes);
1512 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE] =
1513 dpo_register_new_type(&lkd_vft, lookup_dst_from_interface_nodes);