Harmonize vec/pool_get_aligned object sizes and alignment requests
[vpp.git] / src / vnet / dpo / load_balance_map.h
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @brief
17  */
18
19 #ifndef __LOAD_BALANCE_MAP_H__
20 #define __LOAD_BALANCE_MAP_H__
21
22 #include <vlib/vlib.h>
23 #include <vnet/fib/fib_types.h>
24 #include <vnet/dpo/load_balance.h>
25
26 struct load_balance_map_path_t_;
27
28 /**
29  */
30 typedef struct load_balance_map_t_ {
31     /**
32      * required for pool_get_aligned.
33      *  memebers used in the switch path come first!
34      */
35     CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
36
37     /**
38      * The buckets of the map that provide the index to index translation.
39      * In the first cacheline.
40      */
41     u16 *lbm_buckets;
42
43     /**
44      * the vector of paths this MAP represents
45      */
46     struct load_balance_map_path_t_ *lbm_paths;
47
48     /**
49      * the sum of the normalised weights. cache for convenience
50      */
51     u32 lbm_sum_of_norm_weights;
52
53     /**
54      * Number of locks. Maps are shared by a large number of recrusvie fib_entry_ts
55      */
56     u32 lbm_locks;
57 } load_balance_map_t;
58
59 extern index_t load_balance_map_add_or_lock(u32 n_buckets,
60                                             u32 sum_of_weights,
61                                             const load_balance_path_t *norm_paths);
62
63 extern void load_balance_map_lock(index_t lmbi);
64 extern void load_balance_map_unlock(index_t lbmi);
65
66 extern void load_balance_map_path_state_change(fib_node_index_t path_index);
67
68 extern u8* format_load_balance_map(u8 *s, va_list *ap);
69 extern void load_balance_map_show_mem(void);
70
71 /**
72  * The encapsulation breakages are for fast DP access
73  */
74 extern load_balance_map_t *load_balance_map_pool;
75
76 static inline load_balance_map_t*
77 load_balance_map_get (index_t lbmi)
78 {
79     return (pool_elt_at_index(load_balance_map_pool, lbmi));
80 }
81
82 static inline u16
83 load_balance_map_translate (index_t lbmi,
84                             u16 bucket)
85 {
86     load_balance_map_t*lbm;
87
88     lbm = load_balance_map_get(lbmi);
89
90     return (lbm->lbm_buckets[bucket]);
91 }
92
93 static inline const dpo_id_t *
94 load_balance_get_fwd_bucket (const load_balance_t *lb,
95                              u16 bucket)
96 {
97     ASSERT(bucket < lb->lb_n_buckets);
98
99     if (INDEX_INVALID != lb->lb_map)
100     {
101         bucket = load_balance_map_translate(lb->lb_map, bucket);
102     }
103
104     if (PREDICT_TRUE(LB_HAS_INLINE_BUCKETS(lb)))
105     {
106         return (&lb->lb_buckets_inline[bucket]);
107     }
108     else
109     {
110         return (&lb->lb_buckets[bucket]);
111     }
112 }
113
114 extern void load_balance_map_module_init(void);
115
116 #endif