X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fipsec.h;fp=src%2Fvnet%2Fipsec%2Fipsec.h;h=968d377cea004e035baae7eb562dade7b967531b;hb=6d7dfcbfa4bc05f1308fc677f19ade44ea699da1;hp=0245c5575e435f47b24a59c220520dfc41934c41;hpb=d9e9870dd941bfb826530815e3196ced0b544b5d;p=vpp.git diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index 0245c5575e4..968d377cea0 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -34,6 +34,26 @@ typedef clib_error_t *(*add_del_sa_sess_cb_t) (u32 sa_index, u8 is_add); typedef clib_error_t *(*check_support_cb_t) (ipsec_sa_t * sa); typedef clib_error_t *(*enable_disable_cb_t) (int is_enable); +typedef struct +{ + u64 key[2]; + u64 value; + i32 bucket_lock; + u32 un_used; +} ipsec4_hash_kv_16_8_t; + +typedef union +{ + struct + { + ip4_address_t ip4_addr[2]; + u16 port[2]; + u8 proto; + u8 pad[3]; + }; + ipsec4_hash_kv_16_8_t kv_16_8; +} ipsec4_spd_5tuple_t; + typedef struct { u8 *name; @@ -130,6 +150,7 @@ typedef struct uword *ipsec_if_real_dev_by_show_dev; uword *ipsec_if_by_sw_if_index; + ipsec4_hash_kv_16_8_t *ipsec4_out_spd_hash_tbl; clib_bihash_8_16_t tun4_protect_by_key; clib_bihash_24_16_t tun6_protect_by_key; @@ -206,8 +227,13 @@ typedef struct u32 esp4_dec_tun_fq_index; u32 esp6_dec_tun_fq_index; + /* Number of buckets for flow cache */ + u32 ipsec4_out_spd_hash_num_buckets; + u32 ipsec4_out_spd_flow_cache_entries; + u32 epoch_count; u8 async_mode; u16 msg_id_base; + u8 flow_cache_flag; } ipsec_main_t; typedef enum ipsec_format_flags_t_ @@ -247,6 +273,51 @@ get_next_output_feature_node_index (vlib_buffer_t * b, return node->next_nodes[next]; } +static_always_inline u64 +ipsec4_hash_16_8 (ipsec4_hash_kv_16_8_t *v) +{ +#ifdef clib_crc32c_uses_intrinsics + return clib_crc32c ((u8 *) v->key, 16); +#else + u64 tmp = v->key[0] ^ v->key[1]; + return clib_xxhash (tmp); +#endif +} + +static_always_inline int +ipsec4_hash_key_compare_16_8 (u64 *a, u64 *b) +{ +#if defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE) + u64x2 v; + v = u64x2_load_unaligned (a) ^ u64x2_load_unaligned (b); + return u64x2_is_all_zero (v); +#else + return ((a[0] ^ b[0]) | (a[1] ^ b[1])) == 0; +#endif +} + +/* clib_spinlock_lock is not used to save another memory indirection */ +static_always_inline void +ipsec_spinlock_lock (i32 *lock) +{ + i32 free = 0; + while (!clib_atomic_cmp_and_swap_acq_relax_n (lock, &free, 1, 0)) + { + /* atomic load limits number of compare_exchange executions */ + while (clib_atomic_load_relax_n (lock)) + CLIB_PAUSE (); + /* on failure, compare_exchange writes lock into free */ + free = 0; + } +} + +static_always_inline void +ipsec_spinlock_unlock (i32 *lock) +{ + /* Make sure all reads/writes are complete before releasing the lock */ + clib_atomic_release (lock); +} + u32 ipsec_register_ah_backend (vlib_main_t * vm, ipsec_main_t * im, const char *name, const char *ah4_encrypt_node_name,