2 * mpls_lookup.c: MPLS lookup
4 * Copyright (c) 2012-2014 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/mpls/mpls.h>
21 #include <vnet/fib/mpls_fib.h>
22 #include <vnet/dpo/load_balance.h>
24 vlib_node_registration_t mpls_lookup_node;
30 u32 label_net_byte_order;
32 } mpls_lookup_trace_t;
35 format_mpls_lookup_trace (u8 * s, va_list * args)
37 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
38 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
39 mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *);
41 s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %d"
43 t->next_index, t->lfib_index, t->lb_index, t->hash,
44 vnet_mpls_uc_get_label(
45 clib_net_to_host_u32(t->label_net_byte_order)),
46 vnet_mpls_uc_get_s(t->label_net_byte_order));
52 * We'll use it to select which adjacency to use for this flow. And other things.
55 mpls_compute_flow_hash (const mpls_unicast_header_t * hdr,
56 flow_hash_config_t flow_hash_config)
59 return (vnet_mpls_uc_get_label(hdr->label_exp_s_ttl));
63 mpls_lookup (vlib_main_t * vm,
64 vlib_node_runtime_t * node,
65 vlib_frame_t * from_frame)
67 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
68 u32 n_left_from, next_index, * from, * to_next;
69 mpls_main_t * mm = &mpls_main;
70 u32 cpu_index = os_get_cpu_number();
72 from = vlib_frame_vector_args (from_frame);
73 n_left_from = from_frame->n_vectors;
74 next_index = node->cached_next_index;
76 while (n_left_from > 0)
80 vlib_get_next_frame (vm, node, next_index,
81 to_next, n_left_to_next);
83 while (n_left_from >= 8 && n_left_to_next >= 4)
85 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
86 const mpls_unicast_header_t * h0;
87 const load_balance_t *lb0;
90 u32 lbi1, next1, lfib_index1, bi1, hash_c1;
91 const mpls_unicast_header_t * h1;
92 const load_balance_t *lb1;
95 u32 lbi2, next2, lfib_index2, bi2, hash_c2;
96 const mpls_unicast_header_t * h2;
97 const load_balance_t *lb2;
100 u32 lbi3, next3, lfib_index3, bi3, hash_c3;
101 const mpls_unicast_header_t * h3;
102 const load_balance_t *lb3;
103 const dpo_id_t *dpo3;
106 /* Prefetch next iteration. */
108 vlib_buffer_t * p2, * p3, *p4, *p5;
110 p2 = vlib_get_buffer (vm, from[2]);
111 p3 = vlib_get_buffer (vm, from[3]);
112 p4 = vlib_get_buffer (vm, from[4]);
113 p5 = vlib_get_buffer (vm, from[5]);
115 vlib_prefetch_buffer_header (p2, STORE);
116 vlib_prefetch_buffer_header (p3, STORE);
117 vlib_prefetch_buffer_header (p4, STORE);
118 vlib_prefetch_buffer_header (p5, STORE);
120 CLIB_PREFETCH (p2->data, sizeof (h0[0]), STORE);
121 CLIB_PREFETCH (p3->data, sizeof (h0[0]), STORE);
122 CLIB_PREFETCH (p4->data, sizeof (h0[0]), STORE);
123 CLIB_PREFETCH (p5->data, sizeof (h0[0]), STORE);
126 bi0 = to_next[0] = from[0];
127 bi1 = to_next[1] = from[1];
128 bi2 = to_next[2] = from[2];
129 bi3 = to_next[3] = from[3];
136 b0 = vlib_get_buffer (vm, bi0);
137 b1 = vlib_get_buffer (vm, bi1);
138 b2 = vlib_get_buffer (vm, bi2);
139 b3 = vlib_get_buffer (vm, bi3);
140 h0 = vlib_buffer_get_current (b0);
141 h1 = vlib_buffer_get_current (b1);
142 h2 = vlib_buffer_get_current (b2);
143 h3 = vlib_buffer_get_current (b3);
145 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
146 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
147 lfib_index1 = vec_elt(mm->fib_index_by_sw_if_index,
148 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
149 lfib_index2 = vec_elt(mm->fib_index_by_sw_if_index,
150 vnet_buffer(b2)->sw_if_index[VLIB_RX]);
151 lfib_index3 = vec_elt(mm->fib_index_by_sw_if_index,
152 vnet_buffer(b3)->sw_if_index[VLIB_RX]);
154 lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0);
155 lbi1 = mpls_fib_table_forwarding_lookup (lfib_index1, h1);
156 lbi2 = mpls_fib_table_forwarding_lookup (lfib_index2, h2);
157 lbi3 = mpls_fib_table_forwarding_lookup (lfib_index3, h3);
159 lb0 = load_balance_get(lbi0);
160 lb1 = load_balance_get(lbi1);
161 lb2 = load_balance_get(lbi2);
162 lb3 = load_balance_get(lbi3);
164 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
165 hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0;
166 hash_c2 = vnet_buffer(b2)->ip.flow_hash = 0;
167 hash_c3 = vnet_buffer(b3)->ip.flow_hash = 0;
169 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
171 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
172 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
174 if (PREDICT_FALSE(lb1->lb_n_buckets > 1))
176 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
177 mpls_compute_flow_hash(h1, lb1->lb_hash_config);
179 if (PREDICT_FALSE(lb2->lb_n_buckets > 1))
181 hash_c2 = vnet_buffer (b2)->ip.flow_hash =
182 mpls_compute_flow_hash(h2, lb2->lb_hash_config);
184 if (PREDICT_FALSE(lb3->lb_n_buckets > 1))
186 hash_c3 = vnet_buffer (b3)->ip.flow_hash =
187 mpls_compute_flow_hash(h3, lb3->lb_hash_config);
190 ASSERT (lb0->lb_n_buckets > 0);
191 ASSERT (is_pow2 (lb0->lb_n_buckets));
192 ASSERT (lb1->lb_n_buckets > 0);
193 ASSERT (is_pow2 (lb1->lb_n_buckets));
194 ASSERT (lb2->lb_n_buckets > 0);
195 ASSERT (is_pow2 (lb2->lb_n_buckets));
196 ASSERT (lb3->lb_n_buckets > 0);
197 ASSERT (is_pow2 (lb3->lb_n_buckets));
199 dpo0 = load_balance_get_bucket_i(lb0,
201 (lb0->lb_n_buckets_minus_1)));
202 dpo1 = load_balance_get_bucket_i(lb1,
204 (lb1->lb_n_buckets_minus_1)));
205 dpo2 = load_balance_get_bucket_i(lb2,
207 (lb2->lb_n_buckets_minus_1)));
208 dpo3 = load_balance_get_bucket_i(lb3,
210 (lb3->lb_n_buckets_minus_1)));
212 next0 = dpo0->dpoi_next_node;
213 next1 = dpo1->dpoi_next_node;
214 next2 = dpo2->dpoi_next_node;
215 next3 = dpo3->dpoi_next_node;
217 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
218 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
219 vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
220 vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
222 vlib_increment_combined_counter
223 (cm, cpu_index, lbi0, 1,
224 vlib_buffer_length_in_chain (vm, b0));
225 vlib_increment_combined_counter
226 (cm, cpu_index, lbi1, 1,
227 vlib_buffer_length_in_chain (vm, b1));
228 vlib_increment_combined_counter
229 (cm, cpu_index, lbi2, 1,
230 vlib_buffer_length_in_chain (vm, b2));
231 vlib_increment_combined_counter
232 (cm, cpu_index, lbi3, 1,
233 vlib_buffer_length_in_chain (vm, b3));
236 * before we pop the label copy th values we need to maintain.
237 * The label header is in network byte order.
238 * last byte is the TTL.
239 * bits 2 to 4 inclusive are the EXP bits
241 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
242 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
243 vnet_buffer (b0)->mpls.first = 1;
244 vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3];
245 vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1;
246 vnet_buffer (b1)->mpls.first = 1;
247 vnet_buffer (b2)->mpls.ttl = ((char*)h2)[3];
248 vnet_buffer (b2)->mpls.exp = (((char*)h2)[2] & 0xe) >> 1;
249 vnet_buffer (b2)->mpls.first = 1;
250 vnet_buffer (b3)->mpls.ttl = ((char*)h3)[3];
251 vnet_buffer (b3)->mpls.exp = (((char*)h3)[2] & 0xe) >> 1;
252 vnet_buffer (b3)->mpls.first = 1;
255 * pop the label that was just used in the lookup
257 vlib_buffer_advance(b0, sizeof(*h0));
258 vlib_buffer_advance(b1, sizeof(*h1));
259 vlib_buffer_advance(b2, sizeof(*h2));
260 vlib_buffer_advance(b3, sizeof(*h3));
262 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
264 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
266 tr->next_index = next0;
268 tr->lfib_index = lfib_index0;
270 tr->label_net_byte_order = h0->label_exp_s_ttl;
273 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
275 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
277 tr->next_index = next1;
279 tr->lfib_index = lfib_index1;
281 tr->label_net_byte_order = h1->label_exp_s_ttl;
284 if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
286 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
288 tr->next_index = next2;
290 tr->lfib_index = lfib_index2;
292 tr->label_net_byte_order = h2->label_exp_s_ttl;
295 if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
297 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
299 tr->next_index = next3;
301 tr->lfib_index = lfib_index3;
303 tr->label_net_byte_order = h3->label_exp_s_ttl;
306 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
307 to_next, n_left_to_next,
309 next0, next1, next2, next3);
312 while (n_left_from > 0 && n_left_to_next > 0)
314 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
315 const mpls_unicast_header_t * h0;
316 const load_balance_t *lb0;
317 const dpo_id_t *dpo0;
327 b0 = vlib_get_buffer (vm, bi0);
328 h0 = vlib_buffer_get_current (b0);
330 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
331 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
333 lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0);
334 lb0 = load_balance_get(lbi0);
336 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
337 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
339 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
340 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
343 ASSERT (lb0->lb_n_buckets > 0);
344 ASSERT (is_pow2 (lb0->lb_n_buckets));
346 dpo0 = load_balance_get_bucket_i(lb0,
348 (lb0->lb_n_buckets_minus_1)));
350 next0 = dpo0->dpoi_next_node;
351 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
353 vlib_increment_combined_counter
354 (cm, cpu_index, lbi0, 1,
355 vlib_buffer_length_in_chain (vm, b0));
358 * before we pop the label copy th values we need to maintain.
359 * The label header is in network byte order.
360 * last byte is the TTL.
361 * bits 2 to 4 inclusive are the EXP bits
363 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
364 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
365 vnet_buffer (b0)->mpls.first = 1;
368 * pop the label that was just used in the lookup
370 vlib_buffer_advance(b0, sizeof(*h0));
372 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
374 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
376 tr->next_index = next0;
378 tr->lfib_index = lfib_index0;
380 tr->label_net_byte_order = h0->label_exp_s_ttl;
383 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
384 to_next, n_left_to_next,
388 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
390 vlib_node_increment_counter (vm, mpls_lookup_node.index,
391 MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
392 return from_frame->n_vectors;
395 static char * mpls_error_strings[] = {
396 #define mpls_error(n,s) s,
401 VLIB_REGISTER_NODE (mpls_lookup_node) = {
402 .function = mpls_lookup,
403 .name = "mpls-lookup",
404 /* Takes a vector of packets. */
405 .vector_size = sizeof (u32),
406 .n_errors = MPLS_N_ERROR,
407 .error_strings = mpls_error_strings,
409 .sibling_of = "ip4-lookup",
411 .format_buffer = format_mpls_header,
412 .format_trace = format_mpls_lookup_trace,
413 .unformat_buffer = unformat_mpls_header,
416 VLIB_NODE_FUNCTION_MULTIARCH (mpls_lookup_node, mpls_lookup)
422 } mpls_load_balance_trace_t;
425 format_mpls_load_balance_trace (u8 * s, va_list * args)
427 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
428 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
429 mpls_load_balance_trace_t * t = va_arg (*args, mpls_load_balance_trace_t *);
431 s = format (s, "MPLS: next [%d], LB index %d hash %d",
432 t->next_index, t->lb_index, t->hash);
437 mpls_load_balance (vlib_main_t * vm,
438 vlib_node_runtime_t * node,
439 vlib_frame_t * frame)
441 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters;
442 u32 n_left_from, n_left_to_next, * from, * to_next;
443 u32 cpu_index = os_get_cpu_number();
446 from = vlib_frame_vector_args (frame);
447 n_left_from = frame->n_vectors;
448 next = node->cached_next_index;
450 while (n_left_from > 0)
452 vlib_get_next_frame (vm, node, next,
453 to_next, n_left_to_next);
456 while (n_left_from >= 4 && n_left_to_next >= 2)
458 const load_balance_t *lb0, *lb1;
459 vlib_buffer_t * p0, *p1;
460 u32 pi0, lbi0, hc0, pi1, lbi1, hc1, next0, next1;
461 const mpls_unicast_header_t *mpls0, *mpls1;
462 const dpo_id_t *dpo0, *dpo1;
464 /* Prefetch next iteration. */
466 vlib_buffer_t * p2, * p3;
468 p2 = vlib_get_buffer (vm, from[2]);
469 p3 = vlib_get_buffer (vm, from[3]);
471 vlib_prefetch_buffer_header (p2, STORE);
472 vlib_prefetch_buffer_header (p3, STORE);
474 CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), STORE);
475 CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), STORE);
478 pi0 = to_next[0] = from[0];
479 pi1 = to_next[1] = from[1];
486 p0 = vlib_get_buffer (vm, pi0);
487 p1 = vlib_get_buffer (vm, pi1);
489 mpls0 = vlib_buffer_get_current (p0);
490 mpls1 = vlib_buffer_get_current (p1);
491 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
492 lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
494 lb0 = load_balance_get(lbi0);
495 lb1 = load_balance_get(lbi1);
498 * this node is for via FIBs we can re-use the hash value from the
499 * to node if present.
500 * We don't want to use the same hash value at each level in the recursion
501 * graph as that would lead to polarisation
503 hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
504 hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
506 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
508 if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
510 hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
514 hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
517 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
519 if (PREDICT_TRUE (vnet_buffer(p1)->ip.flow_hash))
521 hc1 = vnet_buffer(p1)->ip.flow_hash = vnet_buffer(p1)->ip.flow_hash >> 1;
525 hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1);
529 dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1));
530 dpo1 = load_balance_get_bucket_i(lb1, hc1 & (lb1->lb_n_buckets_minus_1));
532 next0 = dpo0->dpoi_next_node;
533 next1 = dpo1->dpoi_next_node;
535 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
536 vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
538 vlib_increment_combined_counter
539 (cm, cpu_index, lbi0, 1,
540 vlib_buffer_length_in_chain (vm, p0));
541 vlib_increment_combined_counter
542 (cm, cpu_index, lbi1, 1,
543 vlib_buffer_length_in_chain (vm, p1));
545 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
547 mpls_load_balance_trace_t *tr = vlib_add_trace (vm, node,
549 tr->next_index = next0;
554 vlib_validate_buffer_enqueue_x2 (vm, node, next,
555 to_next, n_left_to_next,
556 pi0, pi1, next0, next1);
559 while (n_left_from > 0 && n_left_to_next > 0)
561 const load_balance_t *lb0;
563 u32 pi0, lbi0, hc0, next0;
564 const mpls_unicast_header_t *mpls0;
565 const dpo_id_t *dpo0;
574 p0 = vlib_get_buffer (vm, pi0);
576 mpls0 = vlib_buffer_get_current (p0);
577 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
579 lb0 = load_balance_get(lbi0);
581 hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
582 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
584 if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
586 hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
590 hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
594 dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1));
596 next0 = dpo0->dpoi_next_node;
597 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
599 vlib_increment_combined_counter
600 (cm, cpu_index, lbi0, 1,
601 vlib_buffer_length_in_chain (vm, p0));
603 vlib_validate_buffer_enqueue_x1 (vm, node, next,
604 to_next, n_left_to_next,
608 vlib_put_next_frame (vm, node, next, n_left_to_next);
611 return frame->n_vectors;
614 VLIB_REGISTER_NODE (mpls_load_balance_node) = {
615 .function = mpls_load_balance,
616 .name = "mpls-load-balance",
617 .vector_size = sizeof (u32),
618 .sibling_of = "mpls-lookup",
620 .format_trace = format_mpls_load_balance_trace,
623 VLIB_NODE_FUNCTION_MULTIARCH (mpls_load_balance_node, mpls_load_balance)