2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ip/ip.h>
17 #include <vnet/dpo/lookup_dpo.h>
18 #include <vnet/dpo/load_balance_map.h>
19 #include <vnet/mpls/mpls_lookup.h>
20 #include <vnet/fib/fib_table.h>
21 #include <vnet/fib/ip4_fib.h>
22 #include <vnet/fib/ip6_fib.h>
23 #include <vnet/fib/mpls_fib.h>
24 #include <vnet/mfib/mfib_table.h>
25 #include <vnet/mfib/ip4_mfib.h>
26 #include <vnet/mfib/ip6_mfib.h>
28 static const char *const lookup_input_names[] = LOOKUP_INPUTS;
29 static const char *const lookup_cast_names[] = LOOKUP_CASTS;
32 * If a packet encounters a lookup DPO more than the many times
33 * then we assume there is a loop in the forward graph and drop the packet
35 #define MAX_LUKPS_PER_PACKET 4
38 * @brief Enumeration of the lookup subtypes
40 typedef enum lookup_sub_type_t_
44 LOOKUP_SUB_TYPE_DST_MCAST,
45 LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE,
47 #define LOOKUP_SUB_TYPE_NUM (LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE+1)
49 #define FOR_EACH_LOOKUP_SUB_TYPE(_st) \
50 for (_st = LOOKUP_SUB_TYPE_IP4_SRC; _st < LOOKUP_SUB_TYPE_NUM; _st++)
53 * @brief pool of all MPLS Label DPOs
55 lookup_dpo_t *lookup_dpo_pool;
58 * @brief An array of registered DPO type values for the sub-types
60 static dpo_type_t lookup_dpo_sub_types[LOOKUP_SUB_TYPE_NUM];
63 lookup_dpo_alloc (void)
69 dpo_pool_barrier_sync (vm, lookup_dpo_pool, did_barrier_sync);
70 pool_get_aligned(lookup_dpo_pool, lkd, CLIB_CACHE_LINE_BYTES);
71 dpo_pool_barrier_release (vm, did_barrier_sync);
77 lookup_dpo_get_index (lookup_dpo_t *lkd)
79 return (lkd - lookup_dpo_pool);
83 lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
87 lookup_table_t table_config,
93 lkd = lookup_dpo_alloc();
94 lkd->lkd_fib_index = fib_index;
95 lkd->lkd_proto = proto;
96 lkd->lkd_input = input;
97 lkd->lkd_table = table_config;
101 * use the input type to select the lookup sub-type
107 case LOOKUP_INPUT_SRC_ADDR:
108 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC];
110 case LOOKUP_INPUT_DST_ADDR:
111 switch (table_config)
113 case LOOKUP_TABLE_FROM_INPUT_INTERFACE:
114 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE];
116 case LOOKUP_TABLE_FROM_CONFIG:
117 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST];
120 if (LOOKUP_MULTICAST == cast)
122 type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST];
132 dpo_set(dpo, type, proto, lookup_dpo_get_index(lkd));
137 lookup_dpo_add_or_lock_w_fib_index (fib_node_index_t fib_index,
140 lookup_input_t input,
141 lookup_table_t table_config,
144 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
146 if (LOOKUP_UNICAST == cast)
148 fib_table_lock(fib_index,
149 dpo_proto_to_fib(proto),
154 mfib_table_lock(fib_index,
155 dpo_proto_to_fib(proto),
159 lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
163 lookup_dpo_add_or_lock_w_table_id (u32 table_id,
166 lookup_input_t input,
167 lookup_table_t table_config,
170 fib_node_index_t fib_index = FIB_NODE_INDEX_INVALID;
172 if (LOOKUP_TABLE_FROM_CONFIG == table_config)
174 if (LOOKUP_UNICAST == cast)
177 fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
184 mfib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
190 ASSERT(FIB_NODE_INDEX_INVALID != fib_index);
191 lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
195 format_lookup_dpo (u8 *s, va_list *args)
197 index_t index = va_arg (*args, index_t);
200 lkd = lookup_dpo_get(index);
202 if (LOOKUP_TABLE_FROM_INPUT_INTERFACE == lkd->lkd_table)
204 s = format(s, "%s,%s lookup in interface's %U table",
205 lookup_input_names[lkd->lkd_input],
206 lookup_cast_names[lkd->lkd_cast],
207 format_dpo_proto, lkd->lkd_proto);
211 if (LOOKUP_UNICAST == lkd->lkd_cast)
213 s = format(s, "%s,%s lookup in %U",
214 lookup_input_names[lkd->lkd_input],
215 lookup_cast_names[lkd->lkd_cast],
216 format_fib_table_name, lkd->lkd_fib_index,
217 dpo_proto_to_fib(lkd->lkd_proto));
221 s = format(s, "%s,%s lookup in %U",
222 lookup_input_names[lkd->lkd_input],
223 lookup_cast_names[lkd->lkd_cast],
224 format_mfib_table_name, lkd->lkd_fib_index,
225 dpo_proto_to_fib(lkd->lkd_proto));
232 lookup_dpo_lock (dpo_id_t *dpo)
236 lkd = lookup_dpo_get(dpo->dpoi_index);
242 lookup_dpo_unlock (dpo_id_t *dpo)
246 lkd = lookup_dpo_get(dpo->dpoi_index);
250 if (0 == lkd->lkd_locks)
252 if (LOOKUP_TABLE_FROM_CONFIG == lkd->lkd_table)
254 if (LOOKUP_UNICAST == lkd->lkd_cast)
256 fib_table_unlock(lkd->lkd_fib_index,
257 dpo_proto_to_fib(lkd->lkd_proto),
262 mfib_table_unlock(lkd->lkd_fib_index,
263 dpo_proto_to_fib(lkd->lkd_proto),
267 pool_put(lookup_dpo_pool, lkd);
272 * @brief Lookup trace data
274 typedef struct lookup_trace_t_
278 mpls_unicast_header_t hdr;
280 fib_node_index_t fib_index;
286 lookup_dpo_ip4_inline (vlib_main_t * vm,
287 vlib_node_runtime_t * node,
288 vlib_frame_t * from_frame,
290 int table_from_interface)
292 u32 n_left_from, next_index, * from, * to_next;
293 u32 thread_index = vlib_get_thread_index();
294 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
296 from = vlib_frame_vector_args (from_frame);
297 n_left_from = from_frame->n_vectors;
299 next_index = node->cached_next_index;
301 while (n_left_from > 0)
305 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
307 while (n_left_from >= 4 && n_left_to_next > 2)
309 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
310 flow_hash_config_t flow_hash_config0;
311 const ip4_address_t *input_addr0;
312 const load_balance_t *lb0;
313 const lookup_dpo_t * lkd0;
314 const ip4_header_t * ip0;
315 const dpo_id_t *dpo0;
317 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
318 flow_hash_config_t flow_hash_config1;
319 const ip4_address_t *input_addr1;
320 const load_balance_t *lb1;
321 const lookup_dpo_t * lkd1;
322 const ip4_header_t * ip1;
323 const dpo_id_t *dpo1;
326 /* Prefetch next iteration. */
328 vlib_buffer_t * p2, * p3;
330 p2 = vlib_get_buffer (vm, from[2]);
331 p3 = vlib_get_buffer (vm, from[3]);
333 vlib_prefetch_buffer_header (p2, LOAD);
334 vlib_prefetch_buffer_header (p3, LOAD);
336 clib_prefetch_store (p2->data);
337 clib_prefetch_store (p3->data);
349 b0 = vlib_get_buffer (vm, bi0);
350 ip0 = vlib_buffer_get_current (b0);
351 b1 = vlib_get_buffer (vm, bi1);
352 ip1 = vlib_buffer_get_current (b1);
354 /* dst lookup was done by ip4 lookup */
355 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
356 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
357 lkd0 = lookup_dpo_get(lkdi0);
358 lkd1 = lookup_dpo_get(lkdi1);
361 * choose between a lookup using the fib index in the DPO
362 * or getting the FIB index from the interface.
364 if (table_from_interface)
367 ip4_fib_table_get_index_for_sw_if_index(
368 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
370 ip4_fib_table_get_index_for_sw_if_index(
371 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
375 fib_index0 = lkd0->lkd_fib_index;
376 fib_index1 = lkd1->lkd_fib_index;
380 * choose between a source or destination address lookup in the table
384 input_addr0 = &ip0->src_address;
385 input_addr1 = &ip1->src_address;
389 input_addr0 = &ip0->dst_address;
390 input_addr1 = &ip1->dst_address;
394 ip4_fib_forwarding_lookup_x2 (fib_index0, fib_index1, input_addr0,
395 input_addr1, &lbi0, &lbi1);
396 lb0 = load_balance_get(lbi0);
397 lb1 = load_balance_get(lbi1);
399 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
400 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
402 /* Use flow hash to compute multipath adjacency. */
403 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
404 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
406 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
408 flow_hash_config0 = lb0->lb_hash_config;
409 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
410 ip4_compute_flow_hash (ip0, flow_hash_config0);
413 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
415 flow_hash_config1 = lb1->lb_hash_config;
416 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
417 ip4_compute_flow_hash (ip1, flow_hash_config1);
420 dpo0 = load_balance_get_bucket_i(lb0,
422 (lb0->lb_n_buckets_minus_1)));
423 dpo1 = load_balance_get_bucket_i(lb1,
425 (lb1->lb_n_buckets_minus_1)));
427 next0 = dpo0->dpoi_next_node;
428 next1 = dpo1->dpoi_next_node;
429 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
430 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
432 vlib_increment_combined_counter
433 (cm, thread_index, lbi0, 1,
434 vlib_buffer_length_in_chain (vm, b0));
435 vlib_increment_combined_counter
436 (cm, thread_index, lbi1, 1,
437 vlib_buffer_length_in_chain (vm, b1));
439 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
440 vnet_buffer2(b0)->loop_counter = 0;
441 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
443 if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
444 vnet_buffer2(b1)->loop_counter = 0;
445 b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
448 vnet_buffer2(b0)->loop_counter++;
449 vnet_buffer2(b1)->loop_counter++;
451 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
452 next0 = IP_LOOKUP_NEXT_DROP;
453 if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
454 next1 = IP_LOOKUP_NEXT_DROP;
456 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
458 lookup_trace_t *tr = vlib_add_trace (vm, node,
460 tr->fib_index = fib_index0;
462 tr->addr.ip4 = *input_addr0;
464 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
466 lookup_trace_t *tr = vlib_add_trace (vm, node,
468 tr->fib_index = fib_index1;
470 tr->addr.ip4 = *input_addr1;
473 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
474 to_next, n_left_to_next,
475 bi0, bi1, next0, next1);
478 while (n_left_from > 0 && n_left_to_next > 0)
480 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
481 flow_hash_config_t flow_hash_config0;
482 const ip4_address_t *input_addr;
483 const load_balance_t *lb0;
484 const lookup_dpo_t * lkd0;
485 const ip4_header_t * ip0;
486 const dpo_id_t *dpo0;
496 b0 = vlib_get_buffer (vm, bi0);
497 ip0 = vlib_buffer_get_current (b0);
499 /* dst lookup was done by ip4 lookup */
500 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
501 lkd0 = lookup_dpo_get(lkdi0);
504 * choose between a lookup using the fib index in the DPO
505 * or getting the FIB index from the interface.
507 if (table_from_interface)
510 ip4_fib_table_get_index_for_sw_if_index(
511 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
515 fib_index0 = lkd0->lkd_fib_index;
519 * choose between a source or destination address lookup in the table
523 input_addr = &ip0->src_address;
527 input_addr = &ip0->dst_address;
531 lbi0 = ip4_fib_forwarding_lookup (fib_index0, input_addr);
532 lb0 = load_balance_get(lbi0);
534 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
536 /* Use flow hash to compute multipath adjacency. */
537 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
539 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
541 flow_hash_config0 = lb0->lb_hash_config;
542 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
543 ip4_compute_flow_hash (ip0, flow_hash_config0);
546 dpo0 = load_balance_get_bucket_i(lb0,
548 (lb0->lb_n_buckets_minus_1)));
550 next0 = dpo0->dpoi_next_node;
551 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
553 vlib_increment_combined_counter
554 (cm, thread_index, lbi0, 1,
555 vlib_buffer_length_in_chain (vm, b0));
557 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
558 vnet_buffer2(b0)->loop_counter = 0;
559 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
562 vnet_buffer2(b0)->loop_counter++;
564 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
565 next0 = IP_LOOKUP_NEXT_DROP;
567 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
569 lookup_trace_t *tr = vlib_add_trace (vm, node,
571 tr->fib_index = fib_index0;
573 tr->addr.ip4 = *input_addr;
576 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
577 n_left_to_next, bi0, next0);
579 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
581 return from_frame->n_vectors;
585 format_lookup_trace (u8 * s, va_list * args)
587 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
588 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
589 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
590 u32 indent = format_get_indent (s);
591 s = format (s, "%U fib-index:%d addr:%U load-balance:%d",
592 format_white_space, indent,
594 format_ip46_address, &t->addr, IP46_TYPE_ANY,
599 VLIB_NODE_FN (lookup_ip4_dst_node) (vlib_main_t * vm,
600 vlib_node_runtime_t * node,
601 vlib_frame_t * from_frame)
603 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 0));
606 VLIB_REGISTER_NODE (lookup_ip4_dst_node) = {
607 .name = "lookup-ip4-dst",
608 .vector_size = sizeof (u32),
609 .sibling_of = "ip4-lookup",
610 .format_trace = format_lookup_trace,
613 VLIB_NODE_FN (lookup_ip4_dst_itf_node) (vlib_main_t * vm,
614 vlib_node_runtime_t * node,
615 vlib_frame_t * from_frame)
617 return (lookup_dpo_ip4_inline(vm, node, from_frame, 0, 1));
620 VLIB_REGISTER_NODE (lookup_ip4_dst_itf_node) = {
621 .name = "lookup-ip4-dst-itf",
622 .vector_size = sizeof (u32),
623 .sibling_of = "ip4-lookup",
624 .format_trace = format_lookup_trace,
627 VLIB_NODE_FN (lookup_ip4_src_node) (vlib_main_t * vm,
628 vlib_node_runtime_t * node,
629 vlib_frame_t * from_frame)
631 return (lookup_dpo_ip4_inline(vm, node, from_frame, 1, 0));
634 VLIB_REGISTER_NODE (lookup_ip4_src_node) = {
635 .name = "lookup-ip4-src",
636 .vector_size = sizeof (u32),
637 .format_trace = format_lookup_trace,
638 .sibling_of = "ip4-lookup",
642 lookup_dpo_ip6_inline (vlib_main_t * vm,
643 vlib_node_runtime_t * node,
644 vlib_frame_t * from_frame,
646 int table_from_interface)
648 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
649 u32 n_left_from, next_index, * from, * to_next;
650 u32 thread_index = vlib_get_thread_index();
652 from = vlib_frame_vector_args (from_frame);
653 n_left_from = from_frame->n_vectors;
655 next_index = node->cached_next_index;
657 while (n_left_from > 0)
661 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
663 while (n_left_from >= 4 && n_left_to_next > 2)
665 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
666 flow_hash_config_t flow_hash_config0;
667 const ip6_address_t *input_addr0;
668 const load_balance_t *lb0;
669 const lookup_dpo_t * lkd0;
670 const ip6_header_t * ip0;
671 const dpo_id_t *dpo0;
673 u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1;
674 flow_hash_config_t flow_hash_config1;
675 const ip6_address_t *input_addr1;
676 const load_balance_t *lb1;
677 const lookup_dpo_t * lkd1;
678 const ip6_header_t * ip1;
679 const dpo_id_t *dpo1;
682 /* Prefetch next iteration. */
684 vlib_buffer_t * p2, * p3;
686 p2 = vlib_get_buffer (vm, from[2]);
687 p3 = vlib_get_buffer (vm, from[3]);
689 vlib_prefetch_buffer_header (p2, LOAD);
690 vlib_prefetch_buffer_header (p3, LOAD);
692 clib_prefetch_store (p2->data);
693 clib_prefetch_store (p3->data);
705 b0 = vlib_get_buffer (vm, bi0);
706 ip0 = vlib_buffer_get_current (b0);
707 b1 = vlib_get_buffer (vm, bi1);
708 ip1 = vlib_buffer_get_current (b1);
710 /* dst lookup was done by ip6 lookup */
711 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
712 lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
713 lkd0 = lookup_dpo_get(lkdi0);
714 lkd1 = lookup_dpo_get(lkdi1);
717 * choose between a lookup using the fib index in the DPO
718 * or getting the FIB index from the interface.
720 if (table_from_interface)
723 ip6_fib_table_get_index_for_sw_if_index(
724 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
726 ip6_fib_table_get_index_for_sw_if_index(
727 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
731 fib_index0 = lkd0->lkd_fib_index;
732 fib_index1 = lkd1->lkd_fib_index;
736 * choose between a source or destination address lookup in the table
740 input_addr0 = &ip0->src_address;
741 input_addr1 = &ip1->src_address;
745 input_addr0 = &ip0->dst_address;
746 input_addr1 = &ip1->dst_address;
750 lbi0 = ip6_fib_table_fwding_lookup(
753 lbi1 = ip6_fib_table_fwding_lookup(
756 lb0 = load_balance_get(lbi0);
757 lb1 = load_balance_get(lbi1);
759 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
760 vnet_buffer(b1)->sw_if_index[VLIB_TX] = fib_index1;
762 /* Use flow hash to compute multipath adjacency. */
763 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
764 hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
766 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
767 vnet_buffer2(b0)->loop_counter = 0;
768 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
770 if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
771 vnet_buffer2(b1)->loop_counter = 0;
772 b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
775 vnet_buffer2(b0)->loop_counter++;
776 vnet_buffer2(b1)->loop_counter++;
778 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
779 next0 = IP_LOOKUP_NEXT_DROP;
780 if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
781 next1 = IP_LOOKUP_NEXT_DROP;
783 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
785 flow_hash_config0 = lb0->lb_hash_config;
786 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
787 ip6_compute_flow_hash (ip0, flow_hash_config0);
790 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
792 flow_hash_config1 = lb1->lb_hash_config;
793 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
794 ip6_compute_flow_hash (ip1, flow_hash_config1);
797 dpo0 = load_balance_get_bucket_i(lb0,
799 (lb0->lb_n_buckets_minus_1)));
800 dpo1 = load_balance_get_bucket_i(lb1,
802 (lb1->lb_n_buckets_minus_1)));
804 next0 = dpo0->dpoi_next_node;
805 next1 = dpo1->dpoi_next_node;
806 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
807 vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
809 vlib_increment_combined_counter
810 (cm, thread_index, lbi0, 1,
811 vlib_buffer_length_in_chain (vm, b0));
812 vlib_increment_combined_counter
813 (cm, thread_index, lbi1, 1,
814 vlib_buffer_length_in_chain (vm, b1));
816 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
818 lookup_trace_t *tr = vlib_add_trace (vm, node,
820 tr->fib_index = fib_index0;
822 tr->addr.ip6 = *input_addr0;
824 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
826 lookup_trace_t *tr = vlib_add_trace (vm, node,
828 tr->fib_index = fib_index1;
830 tr->addr.ip6 = *input_addr1;
832 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
833 n_left_to_next, bi0, bi1,
836 while (n_left_from > 0 && n_left_to_next > 0)
838 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0;
839 flow_hash_config_t flow_hash_config0;
840 const ip6_address_t *input_addr0;
841 const load_balance_t *lb0;
842 const lookup_dpo_t * lkd0;
843 const ip6_header_t * ip0;
844 const dpo_id_t *dpo0;
854 b0 = vlib_get_buffer (vm, bi0);
855 ip0 = vlib_buffer_get_current (b0);
857 /* dst lookup was done by ip6 lookup */
858 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
859 lkd0 = lookup_dpo_get(lkdi0);
862 * choose between a lookup using the fib index in the DPO
863 * or getting the FIB index from the interface.
865 if (table_from_interface)
868 ip6_fib_table_get_index_for_sw_if_index(
869 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
873 fib_index0 = lkd0->lkd_fib_index;
877 * choose between a source or destination address lookup in the table
881 input_addr0 = &ip0->src_address;
885 input_addr0 = &ip0->dst_address;
889 lbi0 = ip6_fib_table_fwding_lookup(
892 lb0 = load_balance_get(lbi0);
894 vnet_buffer(b0)->sw_if_index[VLIB_TX] = fib_index0;
896 /* Use flow hash to compute multipath adjacency. */
897 hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
899 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
901 flow_hash_config0 = lb0->lb_hash_config;
902 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
903 ip6_compute_flow_hash (ip0, flow_hash_config0);
906 dpo0 = load_balance_get_bucket_i(lb0,
908 (lb0->lb_n_buckets_minus_1)));
910 next0 = dpo0->dpoi_next_node;
911 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
913 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
914 vnet_buffer2(b0)->loop_counter = 0;
915 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
918 vnet_buffer2(b0)->loop_counter++;
920 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
921 next0 = IP_LOOKUP_NEXT_DROP;
923 vlib_increment_combined_counter
924 (cm, thread_index, lbi0, 1,
925 vlib_buffer_length_in_chain (vm, b0));
927 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
929 lookup_trace_t *tr = vlib_add_trace (vm, node,
931 tr->fib_index = fib_index0;
933 tr->addr.ip6 = *input_addr0;
935 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
936 n_left_to_next, bi0, next0);
938 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
940 return from_frame->n_vectors;
943 VLIB_NODE_FN (lookup_ip6_dst_node) (vlib_main_t * vm,
944 vlib_node_runtime_t * node,
945 vlib_frame_t * from_frame)
947 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 0));
950 VLIB_REGISTER_NODE (lookup_ip6_dst_node) = {
951 .name = "lookup-ip6-dst",
952 .vector_size = sizeof (u32),
953 .format_trace = format_lookup_trace,
954 .sibling_of = "ip6-lookup",
957 VLIB_NODE_FN (lookup_ip6_dst_itf_node) (vlib_main_t * vm,
958 vlib_node_runtime_t * node,
959 vlib_frame_t * from_frame)
961 return (lookup_dpo_ip6_inline(vm, node, from_frame, 0 /*use src*/, 1));
964 VLIB_REGISTER_NODE (lookup_ip6_dst_itf_node) = {
965 .name = "lookup-ip6-dst-itf",
966 .vector_size = sizeof (u32),
967 .format_trace = format_lookup_trace,
968 .sibling_of = "ip6-lookup",
971 VLIB_NODE_FN (lookup_ip6_src_node) (vlib_main_t * vm,
972 vlib_node_runtime_t * node,
973 vlib_frame_t * from_frame)
975 return (lookup_dpo_ip6_inline(vm, node, from_frame, 1, 0));
978 VLIB_REGISTER_NODE (lookup_ip6_src_node) = {
979 .name = "lookup-ip6-src",
980 .vector_size = sizeof (u32),
981 .format_trace = format_lookup_trace,
982 .sibling_of = "ip6-lookup",
986 lookup_dpo_mpls_inline (vlib_main_t * vm,
987 vlib_node_runtime_t * node,
988 vlib_frame_t * from_frame,
989 int table_from_interface)
991 u32 n_left_from, next_index, * from, * to_next;
992 u32 thread_index = vlib_get_thread_index();
993 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
995 from = vlib_frame_vector_args (from_frame);
996 n_left_from = from_frame->n_vectors;
998 next_index = node->cached_next_index;
1000 while (n_left_from > 0)
1004 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
1006 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
1009 while (n_left_from > 0 && n_left_to_next > 0)
1011 u32 bi0, lkdi0, lbi0, fib_index0, next0, hash0;
1012 const mpls_unicast_header_t * hdr0;
1013 const load_balance_t *lb0;
1014 const lookup_dpo_t * lkd0;
1015 const dpo_id_t *dpo0;
1023 n_left_to_next -= 1;
1025 b0 = vlib_get_buffer (vm, bi0);
1026 hdr0 = vlib_buffer_get_current (b0);
1028 /* dst lookup was done by mpls lookup */
1029 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
1030 lkd0 = lookup_dpo_get(lkdi0);
1033 * choose between a lookup using the fib index in the DPO
1034 * or getting the FIB index from the interface.
1036 if (table_from_interface)
1039 mpls_fib_table_get_index_for_sw_if_index(
1040 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
1044 fib_index0 = lkd0->lkd_fib_index;
1048 lbi0 = mpls_fib_table_forwarding_lookup (fib_index0, hdr0);
1049 lb0 = load_balance_get(lbi0);
1050 dpo0 = load_balance_get_bucket_i(lb0, 0);
1052 next0 = dpo0->dpoi_next_node;
1053 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1056 if (MPLS_IS_REPLICATE & lbi0)
1058 next0 = mpls_lookup_to_replicate_edge;
1059 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
1060 (lbi0 & ~MPLS_IS_REPLICATE);
1064 lb0 = load_balance_get(lbi0);
1065 ASSERT (lb0->lb_n_buckets > 0);
1066 ASSERT (is_pow2 (lb0->lb_n_buckets));
1068 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
1070 hash0 = vnet_buffer (b0)->ip.flow_hash =
1071 mpls_compute_flow_hash(hdr0, lb0->lb_hash_config);
1072 dpo0 = load_balance_get_fwd_bucket
1074 (hash0 & (lb0->lb_n_buckets_minus_1)));
1078 dpo0 = load_balance_get_bucket_i (lb0, 0);
1080 next0 = dpo0->dpoi_next_node;
1082 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1084 vlib_increment_combined_counter
1085 (cm, thread_index, lbi0, 1,
1086 vlib_buffer_length_in_chain (vm, b0));
1089 vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3];
1090 vnet_buffer (b0)->mpls.exp = (((char*)hdr0)[2] & 0xe) >> 1;
1091 vnet_buffer (b0)->mpls.first = 1;
1092 vlib_buffer_advance(b0, sizeof(*hdr0));
1094 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
1095 vnet_buffer2(b0)->loop_counter = 0;
1096 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
1099 vnet_buffer2(b0)->loop_counter++;
1101 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
1102 next0 = MPLS_LOOKUP_NEXT_DROP;
1104 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1106 lookup_trace_t *tr = vlib_add_trace (vm, node,
1108 tr->fib_index = fib_index0;
1113 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1114 n_left_to_next, bi0, next0);
1116 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1118 return from_frame->n_vectors;
1122 format_lookup_mpls_trace (u8 * s, va_list * args)
1124 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1125 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1126 lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
1127 u32 indent = format_get_indent (s);
1128 mpls_unicast_header_t hdr;
1130 hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
1132 s = format (s, "%U fib-index:%d hdr:%U load-balance:%d",
1133 format_white_space, indent,
1135 format_mpls_header, hdr,
1140 VLIB_NODE_FN (lookup_mpls_dst_node) (vlib_main_t * vm,
1141 vlib_node_runtime_t * node,
1142 vlib_frame_t * from_frame)
1144 return (lookup_dpo_mpls_inline(vm, node, from_frame, 0));
1147 VLIB_REGISTER_NODE (lookup_mpls_dst_node) = {
1148 .name = "lookup-mpls-dst",
1149 .vector_size = sizeof (u32),
1150 .sibling_of = "mpls-lookup",
1151 .format_trace = format_lookup_mpls_trace,
1155 VLIB_NODE_FN (lookup_mpls_dst_itf_node) (vlib_main_t * vm,
1156 vlib_node_runtime_t * node,
1157 vlib_frame_t * from_frame)
1159 return (lookup_dpo_mpls_inline(vm, node, from_frame, 1));
1162 VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = {
1163 .name = "lookup-mpls-dst-itf",
1164 .vector_size = sizeof (u32),
1165 .sibling_of = "mpls-lookup",
1166 .format_trace = format_lookup_mpls_trace,
1170 typedef enum lookup_ip_dst_mcast_next_t_ {
1171 LOOKUP_IP_DST_MCAST_NEXT_DROP,
1172 LOOKUP_IP_DST_MCAST_NEXT_RPF,
1173 LOOKUP_IP_DST_MCAST_N_NEXT,
1174 } mfib_forward_lookup_next_t;
1177 lookup_dpo_ip_dst_mcast_inline (vlib_main_t * vm,
1178 vlib_node_runtime_t * node,
1179 vlib_frame_t * from_frame,
1182 u32 n_left_from, next_index, * from, * to_next;
1184 from = vlib_frame_vector_args (from_frame);
1185 n_left_from = from_frame->n_vectors;
1187 next_index = LOOKUP_IP_DST_MCAST_NEXT_RPF;
1189 while (n_left_from > 0)
1193 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
1195 /* while (n_left_from >= 4 && n_left_to_next >= 2) */
1198 while (n_left_from > 0 && n_left_to_next > 0)
1200 u32 bi0, lkdi0, fib_index0, next0;
1201 const lookup_dpo_t * lkd0;
1202 fib_node_index_t mfei0;
1210 n_left_to_next -= 1;
1212 b0 = vlib_get_buffer (vm, bi0);
1214 /* dst lookup was done by mpls lookup */
1215 lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
1216 lkd0 = lookup_dpo_get(lkdi0);
1217 fib_index0 = lkd0->lkd_fib_index;
1218 next0 = LOOKUP_IP_DST_MCAST_NEXT_RPF;
1224 ip0 = vlib_buffer_get_current (b0);
1225 mfei0 = ip4_mfib_table_lookup(ip4_mfib_get(fib_index0),
1229 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1231 lookup_trace_t *tr = vlib_add_trace (vm, node,
1233 tr->fib_index = fib_index0;
1235 tr->addr.ip4 = ip0->dst_address;
1242 ip0 = vlib_buffer_get_current (b0);
1243 mfei0 = ip6_mfib_table_fwd_lookup(ip6_mfib_get(fib_index0),
1246 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
1248 lookup_trace_t *tr = vlib_add_trace (vm, node,
1250 tr->fib_index = fib_index0;
1252 tr->addr.ip6 = ip0->dst_address;
1256 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = mfei0;
1258 if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
1259 vnet_buffer2(b0)->loop_counter = 0;
1260 b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
1263 vnet_buffer2(b0)->loop_counter++;
1265 if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
1266 next0 = LOOKUP_IP_DST_MCAST_NEXT_DROP;
1268 vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
1269 n_left_to_next, bi0, next0);
1271 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1273 return from_frame->n_vectors;
1276 VLIB_NODE_FN (lookup_ip4_dst_mcast_node) (vlib_main_t * vm,
1277 vlib_node_runtime_t * node,
1278 vlib_frame_t * from_frame)
1280 return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 1));
1283 VLIB_REGISTER_NODE (lookup_ip4_dst_mcast_node) = {
1284 .name = "lookup-ip4-dst-mcast",
1285 .vector_size = sizeof (u32),
1287 .format_trace = format_lookup_trace,
1288 .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
1290 [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip4-drop",
1291 [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip4-mfib-forward-rpf",
1295 VLIB_NODE_FN (lookup_ip6_dst_mcast_node) (vlib_main_t * vm,
1296 vlib_node_runtime_t * node,
1297 vlib_frame_t * from_frame)
1299 return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 0));
1302 VLIB_REGISTER_NODE (lookup_ip6_dst_mcast_node) = {
1303 .name = "lookup-ip6-dst-mcast",
1304 .vector_size = sizeof (u32),
1306 .format_trace = format_lookup_trace,
1307 .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
1309 [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip6-drop",
1310 [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip6-mfib-forward-rpf",
1315 lookup_dpo_mem_show (void)
1317 fib_show_memory_usage("Lookup",
1318 pool_elts(lookup_dpo_pool),
1319 pool_len(lookup_dpo_pool),
1320 sizeof(lookup_dpo_t));
1323 const static dpo_vft_t lkd_vft = {
1324 .dv_lock = lookup_dpo_lock,
1325 .dv_unlock = lookup_dpo_unlock,
1326 .dv_format = format_lookup_dpo,
1328 const static dpo_vft_t lkd_vft_w_mem_show = {
1329 .dv_lock = lookup_dpo_lock,
1330 .dv_unlock = lookup_dpo_unlock,
1331 .dv_format = format_lookup_dpo,
1332 .dv_mem_show = lookup_dpo_mem_show,
1335 const static char* const lookup_src_ip4_nodes[] =
1340 const static char* const lookup_src_ip6_nodes[] =
1345 const static char* const * const lookup_src_nodes[DPO_PROTO_NUM] =
1347 [DPO_PROTO_IP4] = lookup_src_ip4_nodes,
1348 [DPO_PROTO_IP6] = lookup_src_ip6_nodes,
1349 [DPO_PROTO_MPLS] = NULL,
1352 const static char* const lookup_dst_ip4_nodes[] =
1357 const static char* const lookup_dst_ip6_nodes[] =
1362 const static char* const lookup_dst_mpls_nodes[] =
1367 const static char* const * const lookup_dst_nodes[DPO_PROTO_NUM] =
1369 [DPO_PROTO_IP4] = lookup_dst_ip4_nodes,
1370 [DPO_PROTO_IP6] = lookup_dst_ip6_nodes,
1371 [DPO_PROTO_MPLS] = lookup_dst_mpls_nodes,
1374 const static char* const lookup_dst_mcast_ip4_nodes[] =
1376 "lookup-ip4-dst-mcast",
1379 const static char* const lookup_dst_mcast_ip6_nodes[] =
1381 "lookup-ip6-dst-mcast",
1384 const static char* const * const lookup_dst_mcast_nodes[DPO_PROTO_NUM] =
1386 [DPO_PROTO_IP4] = lookup_dst_mcast_ip4_nodes,
1387 [DPO_PROTO_IP6] = lookup_dst_mcast_ip6_nodes,
1390 const static char* const lookup_dst_from_interface_ip4_nodes[] =
1392 "lookup-ip4-dst-itf",
1395 const static char* const lookup_dst_from_interface_ip6_nodes[] =
1397 "lookup-ip6-dst-itf",
1400 const static char* const lookup_dst_from_interface_mpls_nodes[] =
1402 "lookup-mpls-dst-itf",
1405 const static char* const * const lookup_dst_from_interface_nodes[DPO_PROTO_NUM] =
1407 [DPO_PROTO_IP4] = lookup_dst_from_interface_ip4_nodes,
1408 [DPO_PROTO_IP6] = lookup_dst_from_interface_ip6_nodes,
1409 [DPO_PROTO_MPLS] = lookup_dst_from_interface_mpls_nodes,
1412 static clib_error_t *
1413 lookup_dpo_show (vlib_main_t * vm,
1414 unformat_input_t * input,
1415 vlib_cli_command_t * cmd)
1417 index_t lkdi = INDEX_INVALID;
1419 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1421 if (unformat (input, "%d", &lkdi))
1427 if (INDEX_INVALID != lkdi)
1429 if (pool_is_free_index(lookup_dpo_pool, lkdi))
1430 vlib_cli_output (vm, "no such index %d", lkdi);
1432 vlib_cli_output (vm, "%U", format_lookup_dpo, lkdi);
1438 pool_foreach (lkd, lookup_dpo_pool)
1440 vlib_cli_output (vm, "[@%d] %U",
1441 lookup_dpo_get_index(lkd),
1443 lookup_dpo_get_index(lkd));
1450 VLIB_CLI_COMMAND (replicate_show_command, static) = {
1451 .path = "show lookup-dpo",
1452 .short_help = "show lookup-dpo [<index>]",
1453 .function = lookup_dpo_show,
1457 lookup_dpo_module_init (void)
1459 dpo_register(DPO_LOOKUP, &lkd_vft_w_mem_show, NULL);
1462 * There are various sorts of lookup; src or dst addr v4 /v6 etc.
1463 * there isn't an object type for each (there is only the lookup_dpo_t),
1464 * but, for performance reasons, there is a data plane function, and hence
1465 * VLIB node for each. VLIB graph node construction is based on DPO types
1466 * so we create sub-types.
1468 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_SRC] =
1469 dpo_register_new_type(&lkd_vft, lookup_src_nodes);
1470 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST] =
1471 dpo_register_new_type(&lkd_vft, lookup_dst_nodes);
1472 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST] =
1473 dpo_register_new_type(&lkd_vft, lookup_dst_mcast_nodes);
1474 lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE] =
1475 dpo_register_new_type(&lkd_vft, lookup_dst_from_interface_nodes);