Harmonize vec/pool_get_aligned object sizes and alignment requests
[vpp.git] / src / vnet / dpo / load_balance_map.h
index 454bf4b..82dd36b 100644 (file)
@@ -28,6 +28,12 @@ struct load_balance_map_path_t_;
 /**
  */
 typedef struct load_balance_map_t_ {
+    /**
+     * required for pool_get_aligned.
+     *  memebers used in the switch path come first!
+     */
+    CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
     /**
      * The buckets of the map that provide the index to index translation.
      * In the first cacheline.
@@ -59,7 +65,7 @@ extern void load_balance_map_unlock(index_t lbmi);
 
 extern void load_balance_map_path_state_change(fib_node_index_t path_index);
 
-extern u8* format_load_balance_map(u8 *s, va_list ap);
+extern u8* format_load_balance_map(u8 *s, va_list *ap);
 extern void load_balance_map_show_mem(void);
 
 /**
@@ -73,6 +79,37 @@ load_balance_map_get (index_t lbmi)
     return (pool_elt_at_index(load_balance_map_pool, lbmi));
 }
 
+static inline u16
+load_balance_map_translate (index_t lbmi,
+                            u16 bucket)
+{
+    load_balance_map_t*lbm;
+
+    lbm = load_balance_map_get(lbmi);
+
+    return (lbm->lbm_buckets[bucket]);
+}
+
+static inline const dpo_id_t *
+load_balance_get_fwd_bucket (const load_balance_t *lb,
+                             u16 bucket)
+{
+    ASSERT(bucket < lb->lb_n_buckets);
+
+    if (INDEX_INVALID != lb->lb_map)
+    {
+        bucket = load_balance_map_translate(lb->lb_map, bucket);
+    }
+
+    if (PREDICT_TRUE(LB_HAS_INLINE_BUCKETS(lb)))
+    {
+       return (&lb->lb_buckets_inline[bucket]);
+    }
+    else
+    {
+       return (&lb->lb_buckets[bucket]);
+    }
+}
 
 extern void load_balance_map_module_init(void);