Typos. A bunch of typos I've been collecting.
[vpp.git] / src / vnet / bier / bier_lookup.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vnet/buffer.h>
17 #include <vnet/vnet.h>
18
19 #include <vnet/bier/bier_fmask.h>
20 #include <vnet/bier/bier_hdr_inlines.h>
21 #include <vnet/bier/bier_table.h>
22 #include <vnet/bier/bier_fmask.h>
23
24 /**
25  * Struct maintaining the per-worker thread data for BIER lookups
26  */
27 typedef struct bier_lookup_main_t_
28 {
29     /* per-cpu vector of cloned packets */
30     u32 **blm_clones;
31     /* per-cpu vector of BIER fmasks */
32     u32 **blm_fmasks;
33 } bier_lookup_main_t;
34
35 /**
36  * Single instance of the lookup main
37  */
38 static bier_lookup_main_t bier_lookup_main;
39
40 static char * bier_lookup_error_strings[] = {
41 #define bier_error(n,s) s,
42 #include <vnet/bier/bier_lookup_error.def>
43 #undef bier_error
44 };
45
46 /*
47  * Keep these values sematically the same as BIER lookup
48  */
49 #define foreach_bier_lookup_next                \
50     _(DROP, "bier-drop")                        \
51     _(OUTPUT, "bier-output")
52
53 typedef enum {
54 #define _(s,n) BIER_LOOKUP_NEXT_##s,
55     foreach_bier_lookup_next
56 #undef _
57     BIER_LOOKUP_N_NEXT,
58 } bier_lookup_next_t;
59
60 typedef enum {
61 #define bier_error(n,s) BIER_LOOKUP_ERROR_##n,
62 #include <vnet/bier/bier_lookup_error.def>
63 #undef bier_error
64     BIER_LOOKUP_N_ERROR,
65 } bier_lookup_error_t;
66
67 vlib_node_registration_t bier_lookup_node;
68
69 /**
70  * @brief Packet trace recoed for a BIER lookup
71  */
72 typedef struct bier_lookup_trace_t_
73 {
74     u32 next_index;
75     index_t bt_index;
76     index_t bfm_index;
77 } bier_lookup_trace_t;
78
79 static uword
80 bier_lookup (vlib_main_t * vm,
81              vlib_node_runtime_t * node,
82              vlib_frame_t * from_frame)
83 {
84     u32 n_left_from, next_index, * from, * to_next;
85     bier_lookup_main_t *blm = &bier_lookup_main;
86     u32 thread_index = vlib_get_thread_index();
87     bier_bit_mask_bucket_t buckets_copy[BIER_HDR_BUCKETS_4096];
88
89     from = vlib_frame_vector_args (from_frame);
90     n_left_from = from_frame->n_vectors;
91     next_index = BIER_LOOKUP_NEXT_DROP;
92
93     while (n_left_from > 0)
94     {
95         u32 n_left_to_next;
96
97         vlib_get_next_frame (vm, node, next_index,
98                              to_next, n_left_to_next);
99
100         while (n_left_from > 0 && n_left_to_next > 0)
101         {
102             u32 next0, bi0, n_bytes, bti0, bfmi0;
103             const bier_fmask_t *bfm0;
104             const bier_table_t *bt0;
105             u16 index, num_buckets;
106             const bier_hdr_t *bh0;
107             bier_bit_string_t bbs;
108             vlib_buffer_t *b0;
109             bier_bp_t fbs;
110             int bucket;
111
112             bi0 = from[0];
113             from += 1;
114             n_left_from -= 1;
115
116             b0 = vlib_get_buffer (vm, bi0);
117             bh0 = vlib_buffer_get_current (b0);
118             bti0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
119
120             /*
121              * default to drop so that if no bits are matched then
122              * that is where we go - DROP.
123              */
124             next0 = BIER_LOOKUP_NEXT_DROP;
125
126             /*
127              * At the imposition or input node,
128              * we stored the BIER Table index in the TX adjacency
129              */
130             bt0 = bier_table_get(vnet_buffer(b0)->ip.adj_index[VLIB_TX]);
131
132             /*
133              * we should only forward via one for the ECMP tables
134              */
135             ASSERT(!bier_table_is_main(bt0));
136
137             /*
138              * number of integer sized buckets
139              */
140             n_bytes = bier_hdr_len_id_to_num_buckets(bt0->bt_id.bti_hdr_len);
141             vnet_buffer(b0)->mpls.bier.n_bytes = n_bytes;
142             vnet_buffer(b0)->sw_if_index[VLIB_TX] = ~0;
143             num_buckets = n_bytes / sizeof(int);
144             bier_bit_string_init(&bbs,
145                                  bt0->bt_id.bti_hdr_len,
146                                  buckets_copy);
147             memcpy(bbs.bbs_buckets, bh0->bh_bit_string, bbs.bbs_len);
148
149             /*
150              * reset the fmask and clone storage vectors
151              */
152             vec_reset_length (blm->blm_fmasks[thread_index]);
153             vec_reset_length (blm->blm_clones[thread_index]);
154
155             /*
156              * Loop through the buckets in the header
157              */
158             for (index = 0; index < num_buckets; index++) {
159                 /*
160                  * loop through each bit in the bucket
161                  */
162                 bucket = ((int*)bbs.bbs_buckets)[index];
163
164                 while (bucket) {
165                     fbs  = bier_find_first_bit_string_set(bucket);
166                     fbs += (((num_buckets - 1) - index) *
167                             BIER_BIT_MASK_BITS_PER_INT);
168
169                     bfmi0 = bier_table_fwd_lookup(bt0, fbs);
170
171                     /*
172                      * whatever happens, the bit we just looked for
173                      * MUST be cleared from the packet
174                      * otherwise we could be in this loop a while ...
175                      */
176                     bier_bit_string_clear_bit(&bbs, fbs);
177
178                     if (PREDICT_TRUE(INDEX_INVALID != bfmi0))
179                     {
180                         bfm0 = bier_fmask_get(bfmi0);
181
182                         /*
183                          * use the bit-string on the fmask to reset
184                          * the bits in the header we are walking
185                          */
186                         bier_bit_string_clear_string(
187                             &bfm0->bfm_bits.bfmb_input_reset_string,
188                             &bbs);
189                         bucket = ((int*)bbs.bbs_buckets)[index];
190
191                         /*
192                          * the fmask is resolved so replicate a
193                          * packet its way
194                          */
195                         next0 = BIER_LOOKUP_NEXT_OUTPUT;
196
197                         vec_add1 (blm->blm_fmasks[thread_index], bfmi0);
198                     } else {
199                         /*
200                          * go to the next bit-position set
201                          */
202                         vlib_node_increment_counter(
203                             vm, node->node_index,
204                             BIER_LOOKUP_ERROR_FMASK_UNRES, 1);
205                         bucket = ((int*)bbs.bbs_buckets)[index];
206                         continue;
207                     }
208                 }
209             }
210
211             /*
212              * Full mask now processed.
213              * Create the number of clones we need based on the number
214              * of fmasks we are sending to.
215              */
216             u16 num_cloned, clone;
217             u32 n_clones;
218
219             n_clones = vec_len(blm->blm_fmasks[thread_index]);
220
221             if (PREDICT_TRUE(0 != n_clones))
222             {
223                 num_cloned = vlib_buffer_clone(vm, bi0,
224                                                blm->blm_clones[thread_index],
225                                                n_clones,
226                                                VLIB_BUFFER_CLONE_HEAD_SIZE);
227
228                 if (num_cloned != vec_len(blm->blm_fmasks[thread_index]))
229                 {
230                     vlib_node_increment_counter
231                         (vm, node->node_index,
232                          BIER_LOOKUP_ERROR_BUFFER_ALLOCATION_FAILURE, 1);
233                 }
234
235                 for (clone = 0; clone < num_cloned; clone++)
236                 {
237                     vlib_buffer_t *c0;
238                     u32 ci0;
239
240                     ci0 = blm->blm_clones[thread_index][clone];
241                     c0 = vlib_get_buffer(vm, ci0);
242                     vnet_buffer(c0)->ip.adj_index[VLIB_TX] =
243                         blm->blm_fmasks[thread_index][clone];
244
245                     to_next[0] = ci0;
246                     to_next += 1;
247                     n_left_to_next -= 1;
248
249                     if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
250                     {
251                         bier_lookup_trace_t *tr;
252
253                         if (c0 != b0)
254                             vlib_buffer_copy_trace_flag (vm, b0, ci0);
255
256                         tr = vlib_add_trace (vm, node, c0, sizeof (*tr));
257                         tr->bt_index = bti0;
258                         tr->bfm_index = blm->blm_fmasks[thread_index][clone];
259                     }
260
261                     vlib_validate_buffer_enqueue_x1(vm, node, next_index,
262                                                     to_next, n_left_to_next,
263                                                     ci0, next0);
264
265                     /*
266                      * After the enqueue it is possible that we over-flow the
267                      * frame of the to-next node. When this happens we need to
268                      * 'put' that full frame to the node and get a fresh empty
269                      * one. Note that these are macros with side effects that
270                      * change to_next & n_left_to_next
271                      */
272                     if (PREDICT_FALSE(0 == n_left_to_next))
273                     {
274                         vlib_put_next_frame (vm, node, next_index,
275                                              n_left_to_next);
276                         vlib_get_next_frame (vm, node, next_index,
277                                              to_next, n_left_to_next);
278                     }
279                 }
280             }
281             else
282             {
283                 /*
284                  * no clones/replications required. drop this packet
285                  */
286                 next0 = BIER_LOOKUP_NEXT_DROP;
287                 to_next[0] = bi0;
288                 to_next += 1;
289                 n_left_to_next -= 1;
290
291                 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
292                 {
293                     bier_lookup_trace_t *tr;
294
295                     tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
296
297                     tr->bt_index = bti0;
298                     tr->bfm_index = ~0;
299                 }
300
301                 vlib_validate_buffer_enqueue_x1(vm, node, next_index,
302                                                 to_next, n_left_to_next,
303                                                 bi0, next0);
304             }
305         }
306
307         vlib_put_next_frame(vm, node, next_index, n_left_to_next);
308     }
309
310     vlib_node_increment_counter(vm, bier_lookup_node.index,
311                                 BIER_LOOKUP_ERROR_NONE,
312                                 from_frame->n_vectors);
313     return (from_frame->n_vectors);
314 }
315
316 static u8 *
317 format_bier_lookup_trace (u8 * s, va_list * args)
318 {
319     CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
320     CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
321     bier_lookup_trace_t * t = va_arg (*args, bier_lookup_trace_t *);
322
323     s = format (s, "BIER: next [%d], tbl:%d BFM:%d",
324                 t->next_index,
325                 t->bt_index,
326                 t->bfm_index);
327     return s;
328 }
329
330 VLIB_REGISTER_NODE (bier_lookup_node) = {
331     .function = bier_lookup,
332     .name = "bier-lookup",
333     /* Takes a vector of packets. */
334     .vector_size = sizeof (u32),
335
336     .n_errors = BIER_LOOKUP_N_ERROR,
337     .error_strings = bier_lookup_error_strings,
338
339     .format_trace = format_bier_lookup_trace,
340     .n_next_nodes = BIER_LOOKUP_N_NEXT,
341     .next_nodes = {
342         [BIER_LOOKUP_NEXT_DROP] = "bier-drop",
343         [BIER_LOOKUP_NEXT_OUTPUT] = "bier-output",
344     },
345 };
346
347 clib_error_t *
348 bier_lookup_module_init (vlib_main_t * vm)
349 {
350     bier_lookup_main_t *blm = &bier_lookup_main;
351     u32 thread_index;
352
353     vec_validate (blm->blm_clones, vlib_num_workers());
354     vec_validate (blm->blm_fmasks, vlib_num_workers());
355
356     for (thread_index = 0;
357          thread_index <= vlib_num_workers();
358          thread_index++)
359     {
360         /*
361          *  1024 is the most we will ever need to support
362          * a Bit-Mask length of 1024
363          */
364         vec_validate(blm->blm_fmasks[thread_index], 1023);
365         vec_validate(blm->blm_clones[thread_index], 1023);
366     }
367
368     return 0;
369 }
370
371 VLIB_INIT_FUNCTION (bier_lookup_module_init);