2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ip/ip.h>
17 #include <vnet/dpo/lookup_dpo.h>
18 #include <vnet/dpo/load_balance_map.h>
19 #include <vnet/mpls/mpls_lookup.h>
20 #include <vnet/fib/fib_table.h>
21 #include <vnet/fib/ip4_fib.h>
22 #include <vnet/fib/ip6_fib.h>
23 #include <vnet/fib/mpls_fib.h>
24 #include <vnet/mfib/mfib_table.h>
25 #include <vnet/mfib/ip4_mfib.h>
26 #include <vnet/mfib/ip6_mfib.h>
28 static const char *const lookup_input_names[] = LOOKUP_INPUTS;
29 static const char *const lookup_cast_names[] = LOOKUP_CASTS;
32 * If a packet encounters a lookup DPO more than the many times
33 * then we assume there is a loop in the forward graph and drop the packet
35 #define MAX_LUKPS_PER_PACKET 4
38 * @brief Enumeration of the lookup subtypes
40 typedef enum lookup_sub_type_t_
44 LOOKUP_SUB_TYPE_DST_MCAST,
45 LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE,
47 #define LOOKUP_SUB_TYPE_NUM (LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE+1)
49 #define FOR_EACH_LOOKUP_SUB_TYPE(_st) \
50 for (_st = LOOKUP_SUB_TYPE_IP4_SRC; _st < LOOKUP_SUB_TYPE_NUM; _st++)
53 * @brief pool of all MPLS Label DPOs
55 lookup_dpo_t *lookup_dpo_pool;
58 * @brief An array of registered DPO type values for the sub-types
60 static dpo_type_t lookup_dpo_sub_types[LOOKUP_SUB_TYPE_NUM];
63 lookup_dpo_alloc (void)
69 dpo_pool_barrier_sync (vm, lookup_dpo_pool, did_barrier_sync);
70 pool_get_aligned(lookup_dpo_pool, lkd, CLIB_CACHE_LINE_BYTES);
71 dpo_pool_barrier_release (vm, did_barrier_sync);
77 lookup_dpo_get_index (lookup_dpo_t *lkd)
79 return (lkd - lookup_dpo_pool);
83 lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
87 lookup_table_t table_config,
93 lkd = lookup_dpo_alloc();
94 lkd->lkd_fib_index = fib_index;
95 lkd->lkd_proto = proto;
96 lkd->lkd_input = input;
97 lkd->lkd_table = table_config;
101 * use the input type to select the lookup sub-type
107 case LOOKUP_INPUT_SRC_ADDR:
108 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC];
110 case LOOKUP_INPUT_DST_ADDR:
111 switch (table_config)
113 case LOOKUP_TABLE_FROM_INPUT_INTERFACE:
114 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE];
116 case LOOKUP_TABLE_FROM_CONFIG:
117 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST];
120 if (LOOKUP_MULTICAST == cast)
122 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST];
132 dpo_set(dpo, type, proto, lookup_dpo_get_index(lkd));
137 lookup_dpo_add_or_lock_w_fib_index (fib_node_index_t fib_index,
140 lookup_input_t input,
141 lookup_table_t table_config,
144 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
146 if (LOOKUP_UNICAST == cast)
148 fib_table_lock(fib_index,
149 dpo_proto_to_fib(proto),
154 mfib_table_lock(fib_index,
155 dpo_proto_to_fib(proto),
159 lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
163 lookup_dpo_add_or_lock_w_table_id (u32 table_id,
166 lookup_input_t input,
167 lookup_table_t table_config,
170 fib_node_index_t fib_index = FIB_NODE_INDEX_INVALID;
172 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
174 if (LOOKUP_UNICAST == cast)
177 fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
184 mfib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
190 ASSERT(FIB_NODE_INDEX_INVALID != fib_index);
191 lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
195 format_lookup_dpo (u8 *s, va_list *args)
197 index_t index = va_arg (*args, index_t);
200 lkd = lookup_dpo_get(index);
202 if (LOOKUP_TABLE_FROM_INPUT_INTERFACE == lkd->lkd_table)
204 s = format(s, "%s,%s lookup in interface's %U table",
205 lookup_input_names[lkd->lkd_input],
206 lookup_cast_names[lkd->lkd_cast],
207 format_dpo_proto, lkd->lkd_proto);
211 if (LOOKUP_UNICAST == lkd->lkd_cast)
213 s = format(s, "%s,%s lookup in %U",
214 lookup_input_names[lkd->lkd_input],
215 lookup_cast_names[lkd->lkd_cast],
216 format_fib_table_name, lkd->lkd_fib_index,
217 dpo_proto_to_fib(lkd->lkd_proto));
221 s = format(s, "%s,%s lookup in %U",
222 lookup_input_names[lkd->lkd_input],
223 lookup_cast_names[lkd->lkd_cast],
224 format_mfib_table_name, lkd->lkd_fib_index,
225 dpo_proto_to_fib(lkd->lkd_proto));
232 lookup_dpo_lock (dpo_id_t *dpo)
236 lkd = lookup_dpo_get(dpo->dpoi_index);
242 lookup_dpo_unlock (dpo_id_t *dpo)
246 lkd = lookup_dpo_get(dpo->dpoi_index);
250 if (0 == lkd->lkd_locks)
252 if (LOOKUP_TABLE_FROM_CONFIG == lkd->lkd_table)
254 if (LOOKUP_UNICAST == lkd->lkd_cast)
256 fib_table_unlock(lkd->lkd_fib_index,
257 dpo_proto_to_fib(lkd->lkd_proto),
262 mfib_table_unlock(lkd->lkd_fib_index,
263 dpo_proto_to_fib(lkd->lkd_proto),
267 pool_put(lookup_dpo_pool, lkd);
272 ip4_src_fib_lookup_one (u32 src_fib_index0,
273 const ip4_address_t * addr0,
274 u32 * src_adj_index0)
276 ip4_fib_mtrie_leaf_t leaf0;
277 ip4_fib_mtrie_t * mtrie0;
279 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
281 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0);
282 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
283 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
285 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
289 ip4_src_fib_lookup_two (u32 src_fib_index0,
291 const ip4_address_t * addr0,
292 const ip4_address_t * addr1,
293 u32 * src_adj_index0,
294 u32 * src_adj_index1)
296 ip4_fib_mtrie_leaf_t leaf0, leaf1;
297 ip4_fib_mtrie_t * mtrie0, * mtrie1;
299 mtrie0 = &ip4_fib_get (src_fib_index0)->mtrie;
300 mtrie1 = &ip4_fib_get (src_fib_index1)->mtrie;
302 leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, addr0);
303 leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, addr1);
305 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 2);
306 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 2);
308 leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, addr0, 3);
309 leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, addr1, 3);
311 src_adj_index0[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
312 src_adj_index1[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
316 * @brief Lookup trace data
318 typedef struct lookup_trace_t_
322 mpls_unicast_header_t hdr;
324 fib_node_index_t fib_index;
330 lookup_dpo_ip4_inline (vlib_main_t * vm,
331 vlib_node_runtime_t * node,
332 vlib_frame_t * from_frame,
334 int table_from_interface)
336 u32 n_left_from, next_index, * from, * to_next;
337 u32 thread_index = vlib_get_thread_index();
338 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
340 from = vlib_frame_vector_args (from_frame);
341 n_left_from = from_frame->n_vectors;
343 next_index = node->cached_next_index;
345 while (n_left_from > 0)
349 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
351 while (n_left_from >= 4 && n_left_to_next > 2)
353 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
354 flow_hash_config_t flow_hash_config0;
355 const ip4_address_t *input_addr0;
356 const load_balance_t *lb0;
357 const lookup_dpo_t * lkd0;
358 const ip4_header_t * ip0;
359 const dpo_id_t *dpo0;
361 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
362 flow_hash_config_t flow_hash_config1;
363 const ip4_address_t *input_addr1;
364 const load_balance_t *lb1;
365 const lookup_dpo_t * lkd1;
366 const ip4_header_t * ip1;
367 const dpo_id_t *dpo1;
370 /* Prefetch next iteration. */
372 vlib_buffer_t * p2, * p3;
374 p2 = vlib_get_buffer (vm, from[2]);
375 p3 = vlib_get_buffer (vm, from[3]);
377 vlib_prefetch_buffer_header (p2, LOAD);
378 vlib_prefetch_buffer_header (p3, LOAD);
380 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
381 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
393 b0 = vlib_get_buffer (vm, bi0);
394 ip0 = vlib_buffer_get_current (b0);
395 b1 = vlib_get_buffer (vm, bi1);
396 ip1 = vlib_buffer_get_current (b1);
398 /* dst lookup was done by ip4 lookup */
399 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
400 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
401 lkd0 = lookup_dpo_get(lkdi0);
402 lkd1 = lookup_dpo_get(lkdi1);
405 * choose between a lookup using the fib index in the DPO
406 * or getting the FIB index from the interface.
408 if (table_from_interface)
411 ip4_fib_table_get_index_for_sw_if_index(
412 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
414 ip4_fib_table_get_index_for_sw_if_index(
415 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
419 fib_index0 = lkd0->lkd_fib_index;
420 fib_index1 = lkd1->lkd_fib_index;
424 * choose between a source or destination address lookup in the table
428 input_addr0 = &ip0->src_address;
429 input_addr1 = &ip1->src_address;
433 input_addr0 = &ip0->dst_address;
434 input_addr1 = &ip1->dst_address;
438 ip4_src_fib_lookup_two (fib_index0, fib_index1,
439 input_addr0, input_addr1,
441 lb0 = load_balance_get(lbi0);
442 lb1 = load_balance_get(lbi1);
444 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
445 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
447 /* Use flow hash to compute multipath adjacency. */
448 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
449 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
451 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
453 flow_hash_config0 = lb0->lb_hash_config;
454 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
455 ip4_compute_flow_hash (ip0, flow_hash_config0);
458 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
460 flow_hash_config1 = lb1->lb_hash_config;
461 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
462 ip4_compute_flow_hash (ip1, flow_hash_config1);
465 dpo0 = load_balance_get_bucket_i(lb0,
467 (lb0->lb_n_buckets_minus_1)));
468 dpo1 = load_balance_get_bucket_i(lb1,
470 (lb1->lb_n_buckets_minus_1)));
472 next0 = dpo0->dpoi_next_node;
473 next1 = dpo1->dpoi_next_node;
474 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
475 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
477 vlib_increment_combined_counter
478 (cm, thread_index, lbi0, 1,
479 vlib_buffer_length_in_chain (vm, b0));
480 vlib_increment_combined_counter
481 (cm, thread_index, lbi1, 1,
482 vlib_buffer_length_in_chain (vm, b1));
484 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
485 vnet_buffer2(b0)->loop_counter = 0;
486 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
488 if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
489 vnet_buffer2(b1)->loop_counter = 0;
490 b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
493 vnet_buffer2(b0)->loop_counter++;
494 vnet_buffer2(b1)->loop_counter++;
496 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
497 next0 = IP_LOOKUP_NEXT_DROP;
498 if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
499 next1 = IP_LOOKUP_NEXT_DROP;
501 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
503 lookup_trace_t *tr = vlib_add_trace (vm, node,
505 tr->fib_index = fib_index0;
507 tr->addr.ip4 = *input_addr0;
509 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
511 lookup_trace_t *tr = vlib_add_trace (vm, node,
513 tr->fib_index = fib_index1;
515 tr->addr.ip4 = *input_addr1;
518 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
519 to_next, n_left_to_next,
520 bi0, bi1, next0, next1);
523 while (n_left_from > 0 && n_left_to_next > 0)
525 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
526 flow_hash_config_t flow_hash_config0;
527 const ip4_address_t *input_addr;
528 const load_balance_t *lb0;
529 const lookup_dpo_t * lkd0;
530 const ip4_header_t * ip0;
531 const dpo_id_t *dpo0;
541 b0 = vlib_get_buffer (vm, bi0);
542 ip0 = vlib_buffer_get_current (b0);
544 /* dst lookup was done by ip4 lookup */
545 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
546 lkd0 = lookup_dpo_get(lkdi0);
549 * choose between a lookup using the fib index in the DPO
550 * or getting the FIB index from the interface.
552 if (table_from_interface)
555 ip4_fib_table_get_index_for_sw_if_index(
556 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
560 fib_index0 = lkd0->lkd_fib_index;
564 * choose between a source or destination address lookup in the table
568 input_addr = &ip0->src_address;
572 input_addr = &ip0->dst_address;
576 ip4_src_fib_lookup_one (fib_index0, input_addr, &lbi0);
577 lb0 = load_balance_get(lbi0);
579 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
581 /* Use flow hash to compute multipath adjacency. */
582 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
584 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
586 flow_hash_config0 = lb0->lb_hash_config;
587 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
588 ip4_compute_flow_hash (ip0, flow_hash_config0);
591 dpo0 = load_balance_get_bucket_i(lb0,
593 (lb0->lb_n_buckets_minus_1)));
595 next0 = dpo0->dpoi_next_node;
596 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
598 vlib_increment_combined_counter
599 (cm, thread_index, lbi0, 1,
600 vlib_buffer_length_in_chain (vm, b0));
602 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
603 vnet_buffer2(b0)->loop_counter = 0;
604 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
607 vnet_buffer2(b0)->loop_counter++;
609 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
610 next0 = IP_LOOKUP_NEXT_DROP;
612 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
614 lookup_trace_t *tr = vlib_add_trace (vm, node,
616 tr->fib_index = fib_index0;
618 tr->addr.ip4 = *input_addr;
621 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
622 n_left_to_next, bi0, next0);
624 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
626 return from_frame->n_vectors;
630 format_lookup_trace (u8 * s, va_list * args)
632 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
633 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
634 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
635 u32 indent = format_get_indent (s);
636 s = format (s, "%U fib-index:%d addr:%U load-balance:%d",
637 format_white_space, indent,
639 format_ip46_address, &t->addr, IP46_TYPE_ANY,
644 VLIB_NODE_FN (lookup_ip4_dst_node) (vlib_main_t * vm,
645 vlib_node_runtime_t * node,
646 vlib_frame_t * from_frame)
648 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 0));
651 VLIB_REGISTER_NODE (lookup_ip4_dst_node) = {
652 .name = "lookup-ip4-dst",
653 .vector_size = sizeof (u32),
654 .sibling_of = "ip4-lookup",
655 .format_trace = format_lookup_trace,
658 VLIB_NODE_FN (lookup_ip4_dst_itf_node) (vlib_main_t * vm,
659 vlib_node_runtime_t * node,
660 vlib_frame_t * from_frame)
662 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 1));
665 VLIB_REGISTER_NODE (lookup_ip4_dst_itf_node) = {
666 .name = "lookup-ip4-dst-itf",
667 .vector_size = sizeof (u32),
668 .sibling_of = "ip4-lookup",
669 .format_trace = format_lookup_trace,
672 VLIB_NODE_FN (lookup_ip4_src_node) (vlib_main_t * vm,
673 vlib_node_runtime_t * node,
674 vlib_frame_t * from_frame)
676 return (lookup_dpo_ip4_inline(vm, node, from_frame, 1, 0));
679 VLIB_REGISTER_NODE (lookup_ip4_src_node) = {
680 .name = "lookup-ip4-src",
681 .vector_size = sizeof (u32),
682 .format_trace = format_lookup_trace,
683 .sibling_of = "ip4-lookup",
687 lookup_dpo_ip6_inline (vlib_main_t * vm,
688 vlib_node_runtime_t * node,
689 vlib_frame_t * from_frame,
691 int table_from_interface)
693 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
694 u32 n_left_from, next_index, * from, * to_next;
695 u32 thread_index = vlib_get_thread_index();
697 from = vlib_frame_vector_args (from_frame);
698 n_left_from = from_frame->n_vectors;
700 next_index = node->cached_next_index;
702 while (n_left_from > 0)
706 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
708 while (n_left_from >= 4 && n_left_to_next > 2)
710 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
711 flow_hash_config_t flow_hash_config0;
712 const ip6_address_t *input_addr0;
713 const load_balance_t *lb0;
714 const lookup_dpo_t * lkd0;
715 const ip6_header_t * ip0;
716 const dpo_id_t *dpo0;
718 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
719 flow_hash_config_t flow_hash_config1;
720 const ip6_address_t *input_addr1;
721 const load_balance_t *lb1;
722 const lookup_dpo_t * lkd1;
723 const ip6_header_t * ip1;
724 const dpo_id_t *dpo1;
727 /* Prefetch next iteration. */
729 vlib_buffer_t * p2, * p3;
731 p2 = vlib_get_buffer (vm, from[2]);
732 p3 = vlib_get_buffer (vm, from[3]);
734 vlib_prefetch_buffer_header (p2, LOAD);
735 vlib_prefetch_buffer_header (p3, LOAD);
737 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
738 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
750 b0 = vlib_get_buffer (vm, bi0);
751 ip0 = vlib_buffer_get_current (b0);
752 b1 = vlib_get_buffer (vm, bi1);
753 ip1 = vlib_buffer_get_current (b1);
755 /* dst lookup was done by ip6 lookup */
756 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
757 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
758 lkd0 = lookup_dpo_get(lkdi0);
759 lkd1 = lookup_dpo_get(lkdi1);
762 * choose between a lookup using the fib index in the DPO
763 * or getting the FIB index from the interface.
765 if (table_from_interface)
768 ip6_fib_table_get_index_for_sw_if_index(
769 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
771 ip6_fib_table_get_index_for_sw_if_index(
772 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
776 fib_index0 = lkd0->lkd_fib_index;
777 fib_index1 = lkd1->lkd_fib_index;
781 * choose between a source or destination address lookup in the table
785 input_addr0 = &ip0->src_address;
786 input_addr1 = &ip1->src_address;
790 input_addr0 = &ip0->dst_address;
791 input_addr1 = &ip1->dst_address;
795 lbi0 = ip6_fib_table_fwding_lookup(
798 lbi1 = ip6_fib_table_fwding_lookup(
801 lb0 = load_balance_get(lbi0);
802 lb1 = load_balance_get(lbi1);
804 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
805 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
807 /* Use flow hash to compute multipath adjacency. */
808 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
809 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
811 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
812 vnet_buffer2(b0)->loop_counter = 0;
813 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
815 if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
816 vnet_buffer2(b1)->loop_counter = 0;
817 b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
820 vnet_buffer2(b0)->loop_counter++;
821 vnet_buffer2(b1)->loop_counter++;
823 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
824 next0 = IP_LOOKUP_NEXT_DROP;
825 if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
826 next1 = IP_LOOKUP_NEXT_DROP;
828 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
830 flow_hash_config0 = lb0->lb_hash_config;
831 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
832 ip6_compute_flow_hash (ip0, flow_hash_config0);
835 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
837 flow_hash_config1 = lb1->lb_hash_config;
838 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
839 ip6_compute_flow_hash (ip1, flow_hash_config1);
842 dpo0 = load_balance_get_bucket_i(lb0,
844 (lb0->lb_n_buckets_minus_1)));
845 dpo1 = load_balance_get_bucket_i(lb1,
847 (lb1->lb_n_buckets_minus_1)));
849 next0 = dpo0->dpoi_next_node;
850 next1 = dpo1->dpoi_next_node;
851 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
852 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
854 vlib_increment_combined_counter
855 (cm, thread_index, lbi0, 1,
856 vlib_buffer_length_in_chain (vm, b0));
857 vlib_increment_combined_counter
858 (cm, thread_index, lbi1, 1,
859 vlib_buffer_length_in_chain (vm, b1));
861 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
863 lookup_trace_t *tr = vlib_add_trace (vm, node,
865 tr->fib_index = fib_index0;
867 tr->addr.ip6 = *input_addr0;
869 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
871 lookup_trace_t *tr = vlib_add_trace (vm, node,
873 tr->fib_index = fib_index1;
875 tr->addr.ip6 = *input_addr1;
877 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
878 n_left_to_next, bi0, bi1,
881 while (n_left_from > 0 && n_left_to_next > 0)
883 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
884 flow_hash_config_t flow_hash_config0;
885 const ip6_address_t *input_addr0;
886 const load_balance_t *lb0;
887 const lookup_dpo_t * lkd0;
888 const ip6_header_t * ip0;
889 const dpo_id_t *dpo0;
899 b0 = vlib_get_buffer (vm, bi0);
900 ip0 = vlib_buffer_get_current (b0);
902 /* dst lookup was done by ip6 lookup */
903 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
904 lkd0 = lookup_dpo_get(lkdi0);
907 * choose between a lookup using the fib index in the DPO
908 * or getting the FIB index from the interface.
910 if (table_from_interface)
913 ip6_fib_table_get_index_for_sw_if_index(
914 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
918 fib_index0 = lkd0->lkd_fib_index;
922 * choose between a source or destination address lookup in the table
926 input_addr0 = &ip0->src_address;
930 input_addr0 = &ip0->dst_address;
934 lbi0 = ip6_fib_table_fwding_lookup(
937 lb0 = load_balance_get(lbi0);
939 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
941 /* Use flow hash to compute multipath adjacency. */
942 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
944 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
946 flow_hash_config0 = lb0->lb_hash_config;
947 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
948 ip6_compute_flow_hash (ip0, flow_hash_config0);
951 dpo0 = load_balance_get_bucket_i(lb0,
953 (lb0->lb_n_buckets_minus_1)));
955 next0 = dpo0->dpoi_next_node;
956 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
958 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
959 vnet_buffer2(b0)->loop_counter = 0;
960 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
963 vnet_buffer2(b0)->loop_counter++;
965 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
966 next0 = IP_LOOKUP_NEXT_DROP;
968 vlib_increment_combined_counter
969 (cm, thread_index, lbi0, 1,
970 vlib_buffer_length_in_chain (vm, b0));
972 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
974 lookup_trace_t *tr = vlib_add_trace (vm, node,
976 tr->fib_index = fib_index0;
978 tr->addr.ip6 = *input_addr0;
980 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
981 n_left_to_next, bi0, next0);
983 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
985 return from_frame->n_vectors;
988 VLIB_NODE_FN (lookup_ip6_dst_node) (vlib_main_t * vm,
989 vlib_node_runtime_t * node,
990 vlib_frame_t * from_frame)
992 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 0));
995 VLIB_REGISTER_NODE (lookup_ip6_dst_node) = {
996 .name = "lookup-ip6-dst",
997 .vector_size = sizeof (u32),
998 .format_trace = format_lookup_trace,
999 .sibling_of = "ip6-lookup",
1002 VLIB_NODE_FN (lookup_ip6_dst_itf_node) (vlib_main_t * vm,
1003 vlib_node_runtime_t * node,
1004 vlib_frame_t * from_frame)
1006 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 1));
1009 VLIB_REGISTER_NODE (lookup_ip6_dst_itf_node) = {
1010 .name = "lookup-ip6-dst-itf",
1011 .vector_size = sizeof (u32),
1012 .format_trace = format_lookup_trace,
1013 .sibling_of = "ip6-lookup",
1016 VLIB_NODE_FN (lookup_ip6_src_node) (vlib_main_t * vm,
1017 vlib_node_runtime_t * node,
1018 vlib_frame_t * from_frame)
1020 return (lookup_dpo_ip6_inline(vm, node, from_frame, 1, 0));
1023 VLIB_REGISTER_NODE (lookup_ip6_src_node) = {
1024 .name = "lookup-ip6-src",
1025 .vector_size = sizeof (u32),
1026 .format_trace = format_lookup_trace,
1027 .sibling_of = "ip6-lookup",
1031 lookup_dpo_mpls_inline (vlib_main_t * vm,
1032 vlib_node_runtime_t * node,
1033 vlib_frame_t * from_frame,
1034 int table_from_interface)
1036 u32 n_left_from, next_index, * from, * to_next;
1037 u32 thread_index = vlib_get_thread_index();
1038 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
1040 from = vlib_frame_vector_args (from_frame);
1041 n_left_from = from_frame->n_vectors;
1043 next_index = node->cached_next_index;
1045 while (n_left_from > 0)
1049 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
1051 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
1054 while (n_left_from > 0 && n_left_to_next > 0)
1056 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash0;
1057 const mpls_unicast_header_t * hdr0;
1058 const load_balance_t *lb0;
1059 const lookup_dpo_t * lkd0;
1060 const dpo_id_t *dpo0;
1068 n_left_to_next -= 1;
1070 b0 = vlib_get_buffer (vm, bi0);
1071 hdr0 = vlib_buffer_get_current (b0);
1073 /* dst lookup was done by mpls lookup */
1074 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
1075 lkd0 = lookup_dpo_get(lkdi0);
1078 * choose between a lookup using the fib index in the DPO
1079 * or getting the FIB index from the interface.
1081 if (table_from_interface)
1084 mpls_fib_table_get_index_for_sw_if_index(
1085 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
1089 fib_index0 = lkd0->lkd_fib_index;
1093 lbi0 = mpls_fib_table_forwarding_lookup (fib_index0, hdr0);
1094 lb0 = load_balance_get(lbi0);
1095 dpo0 = load_balance_get_bucket_i(lb0, 0);
1097 next0 = dpo0->dpoi_next_node;
1098 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1101 if (MPLS_IS_REPLICATE & lbi0)
1103 next0 = mpls_lookup_to_replicate_edge;
1104 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
1105 (lbi0 & ~MPLS_IS_REPLICATE);
1109 lb0 = load_balance_get(lbi0);
1110 ASSERT (lb0->lb_n_buckets > 0);
1111 ASSERT (is_pow2 (lb0->lb_n_buckets));
1113 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
1115 hash0 = vnet_buffer (b0)->ip.flow_hash =
1116 mpls_compute_flow_hash(hdr0, lb0->lb_hash_config);
1117 dpo0 = load_balance_get_fwd_bucket
1119 (hash0 & (lb0->lb_n_buckets_minus_1)));
1123 dpo0 = load_balance_get_bucket_i (lb0, 0);
1125 next0 = dpo0->dpoi_next_node;
1127 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1129 vlib_increment_combined_counter
1130 (cm, thread_index, lbi0, 1,
1131 vlib_buffer_length_in_chain (vm, b0));
1134 vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3];
1135 vnet_buffer (b0)->mpls.exp = (((char*)hdr0)[2] & 0xe) >> 1;
1136 vnet_buffer (b0)->mpls.first = 1;
1137 vlib_buffer_advance(b0, sizeof(*hdr0));
1139 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
1140 vnet_buffer2(b0)->loop_counter = 0;
1141 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
1144 vnet_buffer2(b0)->loop_counter++;
1146 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
1147 next0 = MPLS_LOOKUP_NEXT_DROP;
1149 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1151 lookup_trace_t *tr = vlib_add_trace (vm, node,
1153 tr->fib_index = fib_index0;
1158 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1159 n_left_to_next, bi0, next0);
1161 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1163 return from_frame->n_vectors;
1167 format_lookup_mpls_trace (u8 * s, va_list * args)
1169 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1170 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1171 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
1172 u32 indent = format_get_indent (s);
1173 mpls_unicast_header_t hdr;
1175 hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
1177 s = format (s, "%U fib-index:%d hdr:%U load-balance:%d",
1178 format_white_space, indent,
1180 format_mpls_header, hdr,
1185 VLIB_NODE_FN (lookup_mpls_dst_node) (vlib_main_t * vm,
1186 vlib_node_runtime_t * node,
1187 vlib_frame_t * from_frame)
1189 return (lookup_dpo_mpls_inline(vm, node, from_frame, 0));
1192 VLIB_REGISTER_NODE (lookup_mpls_dst_node) = {
1193 .name = "lookup-mpls-dst",
1194 .vector_size = sizeof (u32),
1195 .sibling_of = "mpls-lookup",
1196 .format_trace = format_lookup_mpls_trace,
1200 VLIB_NODE_FN (lookup_mpls_dst_itf_node) (vlib_main_t * vm,
1201 vlib_node_runtime_t * node,
1202 vlib_frame_t * from_frame)
1204 return (lookup_dpo_mpls_inline(vm, node, from_frame, 1));
1207 VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = {
1208 .name = "lookup-mpls-dst-itf",
1209 .vector_size = sizeof (u32),
1210 .sibling_of = "mpls-lookup",
1211 .format_trace = format_lookup_mpls_trace,
1215 typedef enum lookup_ip_dst_mcast_next_t_ {
1216 LOOKUP_IP_DST_MCAST_NEXT_DROP,
1217 LOOKUP_IP_DST_MCAST_NEXT_RPF,
1218 LOOKUP_IP_DST_MCAST_N_NEXT,
1219 } mfib_forward_lookup_next_t;
1222 lookup_dpo_ip_dst_mcast_inline (vlib_main_t * vm,
1223 vlib_node_runtime_t * node,
1224 vlib_frame_t * from_frame,
1227 u32 n_left_from, next_index, * from, * to_next;
1229 from = vlib_frame_vector_args (from_frame);
1230 n_left_from = from_frame->n_vectors;
1232 next_index = LOOKUP_IP_DST_MCAST_NEXT_RPF;
1234 while (n_left_from > 0)
1238 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
1240 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
1243 while (n_left_from > 0 && n_left_to_next > 0)
1245 u32 bi0, lkdi0, fib_index0, next0;
1246 const lookup_dpo_t * lkd0;
1247 fib_node_index_t mfei0;
1255 n_left_to_next -= 1;
1257 b0 = vlib_get_buffer (vm, bi0);
1259 /* dst lookup was done by mpls lookup */
1260 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
1261 lkd0 = lookup_dpo_get(lkdi0);
1262 fib_index0 = lkd0->lkd_fib_index;
1263 next0 = LOOKUP_IP_DST_MCAST_NEXT_RPF;
1269 ip0 = vlib_buffer_get_current (b0);
1270 mfei0 = ip4_mfib_table_lookup(ip4_mfib_get(fib_index0),
1274 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1276 lookup_trace_t *tr = vlib_add_trace (vm, node,
1278 tr->fib_index = fib_index0;
1280 tr->addr.ip4 = ip0->dst_address;
1287 ip0 = vlib_buffer_get_current (b0);
1288 mfei0 = ip6_mfib_table_fwd_lookup(ip6_mfib_get(fib_index0),
1291 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1293 lookup_trace_t *tr = vlib_add_trace (vm, node,
1295 tr->fib_index = fib_index0;
1297 tr->addr.ip6 = ip0->dst_address;
1301 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = mfei0;
1303 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
1304 vnet_buffer2(b0)->loop_counter = 0;
1305 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
1308 vnet_buffer2(b0)->loop_counter++;
1310 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
1311 next0 = LOOKUP_IP_DST_MCAST_NEXT_DROP;
1313 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1314 n_left_to_next, bi0, next0);
1316 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1318 return from_frame->n_vectors;
1321 VLIB_NODE_FN (lookup_ip4_dst_mcast_node) (vlib_main_t * vm,
1322 vlib_node_runtime_t * node,
1323 vlib_frame_t * from_frame)
1325 return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 1));
1328 VLIB_REGISTER_NODE (lookup_ip4_dst_mcast_node) = {
1329 .name = "lookup-ip4-dst-mcast",
1330 .vector_size = sizeof (u32),
1332 .format_trace = format_lookup_trace,
1333 .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
1335 [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip4-drop",
1336 [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip4-mfib-forward-rpf",
1340 VLIB_NODE_FN (lookup_ip6_dst_mcast_node) (vlib_main_t * vm,
1341 vlib_node_runtime_t * node,
1342 vlib_frame_t * from_frame)
1344 return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 0));
1347 VLIB_REGISTER_NODE (lookup_ip6_dst_mcast_node) = {
1348 .name = "lookup-ip6-dst-mcast",
1349 .vector_size = sizeof (u32),
1351 .format_trace = format_lookup_trace,
1352 .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
1354 [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip6-drop",
1355 [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip6-mfib-forward-rpf",
1360 lookup_dpo_mem_show (void)
1362 fib_show_memory_usage("Lookup",
1363 pool_elts(lookup_dpo_pool),
1364 pool_len(lookup_dpo_pool),
1365 sizeof(lookup_dpo_t));
1368 const static dpo_vft_t lkd_vft = {
1369 .dv_lock = lookup_dpo_lock,
1370 .dv_unlock = lookup_dpo_unlock,
1371 .dv_format = format_lookup_dpo,
1373 const static dpo_vft_t lkd_vft_w_mem_show = {
1374 .dv_lock = lookup_dpo_lock,
1375 .dv_unlock = lookup_dpo_unlock,
1376 .dv_format = format_lookup_dpo,
1377 .dv_mem_show = lookup_dpo_mem_show,
1380 const static char* const lookup_src_ip4_nodes[] =
1385 const static char* const lookup_src_ip6_nodes[] =
1390 const static char* const * const lookup_src_nodes[DPO_PROTO_NUM] =
1392 [DPO_PROTO_IP4] = lookup_src_ip4_nodes,
1393 [DPO_PROTO_IP6] = lookup_src_ip6_nodes,
1394 [DPO_PROTO_MPLS] = NULL,
1397 const static char* const lookup_dst_ip4_nodes[] =
1402 const static char* const lookup_dst_ip6_nodes[] =
1407 const static char* const lookup_dst_mpls_nodes[] =
1412 const static char* const * const lookup_dst_nodes[DPO_PROTO_NUM] =
1414 [DPO_PROTO_IP4] = lookup_dst_ip4_nodes,
1415 [DPO_PROTO_IP6] = lookup_dst_ip6_nodes,
1416 [DPO_PROTO_MPLS] = lookup_dst_mpls_nodes,
1419 const static char* const lookup_dst_mcast_ip4_nodes[] =
1421 "lookup-ip4-dst-mcast",
1424 const static char* const lookup_dst_mcast_ip6_nodes[] =
1426 "lookup-ip6-dst-mcast",
1429 const static char* const * const lookup_dst_mcast_nodes[DPO_PROTO_NUM] =
1431 [DPO_PROTO_IP4] = lookup_dst_mcast_ip4_nodes,
1432 [DPO_PROTO_IP6] = lookup_dst_mcast_ip6_nodes,
1435 const static char* const lookup_dst_from_interface_ip4_nodes[] =
1437 "lookup-ip4-dst-itf",
1440 const static char* const lookup_dst_from_interface_ip6_nodes[] =
1442 "lookup-ip6-dst-itf",
1445 const static char* const lookup_dst_from_interface_mpls_nodes[] =
1447 "lookup-mpls-dst-itf",
1450 const static char* const * const lookup_dst_from_interface_nodes[DPO_PROTO_NUM] =
1452 [DPO_PROTO_IP4] = lookup_dst_from_interface_ip4_nodes,
1453 [DPO_PROTO_IP6] = lookup_dst_from_interface_ip6_nodes,
1454 [DPO_PROTO_MPLS] = lookup_dst_from_interface_mpls_nodes,
1457 static clib_error_t *
1458 lookup_dpo_show (vlib_main_t * vm,
1459 unformat_input_t * input,
1460 vlib_cli_command_t * cmd)
1462 index_t lkdi = INDEX_INVALID;
1464 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1466 if (unformat (input, "%d", &lkdi))
1472 if (INDEX_INVALID != lkdi)
1474 if (pool_is_free_index(lookup_dpo_pool, lkdi))
1475 vlib_cli_output (vm, "no such index %d", lkdi);
1477 vlib_cli_output (vm, "%U", format_lookup_dpo, lkdi);
1483 pool_foreach(lkd, lookup_dpo_pool,
1485 vlib_cli_output (vm, "[@%d] %U",
1486 lookup_dpo_get_index(lkd),
1488 lookup_dpo_get_index(lkd));
1495 VLIB_CLI_COMMAND (replicate_show_command, static) = {
1496 .path = "show lookup-dpo",
1497 .short_help = "show lookup-dpo [<index>]",
1498 .function = lookup_dpo_show,
1502 lookup_dpo_module_init (void)
1504 dpo_register(DPO_LOOKUP, &lkd_vft_w_mem_show, NULL);
1507 * There are various sorts of lookup; src or dst addr v4 /v6 etc.
1508 * there isn't an object type for each (there is only the lookup_dpo_t),
1509 * but, for performance reasons, there is a data plane function, and hence
1510 * VLIB node for each. VLIB graph node construction is based on DPO types
1511 * so we create sub-types.
1513 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC] =
1514 dpo_register_new_type(&lkd_vft, lookup_src_nodes);
1515 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST] =
1516 dpo_register_new_type(&lkd_vft, lookup_dst_nodes);
1517 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST] =
1518 dpo_register_new_type(&lkd_vft, lookup_dst_mcast_nodes);
1519 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE] =
1520 dpo_register_new_type(&lkd_vft, lookup_dst_from_interface_nodes);