vppinfra: refactor test_and_set spinlocks to use clib_spinlock_t 61/20861/5
authorjaszha03 <jason.zhang2@arm.com>
Thu, 11 Jul 2019 20:47:24 +0000 (20:47 +0000)
committerDave Barach <openvpp@barachs.net>
Wed, 31 Jul 2019 13:53:55 +0000 (13:53 +0000)
Spinlock performance improved when implemented with compare_and_exchange
instead of test_and_set. All instances of test_and_set locks were refactored
to use clib_spinlock_t when possible. Some locks e.g. ssvm synchronize
between processes rather than threads, so they cannot directly use
clib_spinlock_t.

Type: refactor

Change-Id: Ia16b5d4cd49209b2b57b8df6c94615c28b11bb60
Signed-off-by: Jason Zhang <jason.zhang2@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Lijian Zhang <Lijian.Zhang@arm.com>
17 files changed:
src/plugins/ioam/analyse/ioam_analyse.h
src/plugins/ioam/analyse/ioam_summary_export.c
src/plugins/ioam/analyse/ip6/node.c
src/plugins/ioam/export-common/ioam_export.h
src/plugins/lb/lb.c
src/plugins/lb/lb.h
src/plugins/map/map.c
src/plugins/map/map.h
src/vnet/classify/vnet_classify.c
src/vnet/classify/vnet_classify.h
src/vnet/dns/dns.c
src/vnet/dns/dns.h
src/vnet/interface.c
src/vnet/interface.h
src/vnet/ipfix-export/flow_report_classify.c
src/vnet/util/refcount.h
src/vppinfra/lock.h

index 3aec7a7..2a2cc15 100644 (file)
@@ -22,6 +22,7 @@
 #include <ioam/lib-e2e/e2e_util.h>
 #include <ioam/lib-trace/trace_util.h>
 #include <ioam/lib-trace/trace_config.h>
+#include <vppinfra/lock.h>
 
 #define IOAM_FLOW_TEMPLATE_ID    260
 #define IOAM_TRACE_MAX_NODES      10
@@ -123,7 +124,7 @@ typedef struct ioam_analyser_data_t_
   struct ioam_analyser_data_t_ *chached_data_list;
 
   /** Lock to since we use this to export the data in other thread. */
-  volatile u32 *writer_lock;
+  clib_spinlock_t writer_lock;
 } ioam_analyser_data_t;
 
 always_inline f64
@@ -191,8 +192,7 @@ ip6_ioam_analyse_set_paths_down (ioam_analyser_data_t * data)
   ioam_path_map_t *path;
   u8 k, i;
 
-  while (clib_atomic_test_and_set (data->writer_lock))
-    ;
+  clib_spinlock_lock (&data->writer_lock);
 
   trace_data = &data->trace_data;
 
@@ -208,7 +208,7 @@ ip6_ioam_analyse_set_paths_down (ioam_analyser_data_t * data)
       for (k = 0; k < trace_record->num_nodes; k++)
        path[k].state_up = 0;
     }
-  clib_atomic_release (data->writer_lock);
+  clib_spinlock_unlock (&data->writer_lock);
 }
 
 always_inline void
@@ -225,8 +225,7 @@ ip6_ioam_analyse_hbh_trace_loopback (ioam_analyser_data_t * data,
   u16 size_of_traceopt_per_node;
   u16 size_of_all_traceopts;
 
-  while (clib_atomic_test_and_set (data->writer_lock))
-    ;
+  clib_spinlock_lock (&data->writer_lock);
 
   trace_data = &data->trace_data;
 
@@ -277,7 +276,7 @@ ip6_ioam_analyse_hbh_trace_loopback (ioam_analyser_data_t * data,
        }
     }
 end:
-  clib_atomic_release (data->writer_lock);
+  clib_spinlock_unlock (&data->writer_lock);
 }
 
 always_inline int
@@ -295,8 +294,7 @@ ip6_ioam_analyse_hbh_trace (ioam_analyser_data_t * data,
   ioam_path_map_t *path = NULL;
   ioam_analyse_trace_record *trace_record;
 
-  while (clib_atomic_test_and_set (data->writer_lock))
-    ;
+  clib_spinlock_lock (&data->writer_lock);
 
   trace_data = &data->trace_data;
 
@@ -409,7 +407,7 @@ found_match:
        (u32) ((sum + delay) / (data->seqno_data.rx_packets + 1));
     }
 DONE:
-  clib_atomic_release (data->writer_lock);
+  clib_spinlock_unlock (&data->writer_lock);
   return 0;
 }
 
@@ -417,13 +415,12 @@ always_inline int
 ip6_ioam_analyse_hbh_e2e (ioam_analyser_data_t * data,
                          ioam_e2e_packet_t * e2e, u16 len)
 {
-  while (clib_atomic_test_and_set (data->writer_lock))
-    ;
+  clib_spinlock_lock (&data->writer_lock);
 
   ioam_analyze_seqno (&data->seqno_data,
                      (u64) clib_net_to_host_u32 (e2e->e2e_data));
 
-  clib_atomic_release (data->writer_lock);
+  clib_spinlock_unlock (&data->writer_lock);
 
   return 0;
 }
@@ -509,10 +506,7 @@ ioam_analyse_init_data (ioam_analyser_data_t * data)
    * get extended in future to maintain history of data */
   vec_validate_aligned (data->chached_data_list, 0, CLIB_CACHE_LINE_BYTES);
 
-  data->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
-                                             CLIB_CACHE_LINE_BYTES);
-
-  clib_atomic_release (data->writer_lock);
+  clib_spinlock_init (&data->writer_lock);
 
   trace_data = &(data->trace_data);
   for (j = 0; j < IOAM_MAX_PATHS_PER_FLOW; j++)
index 4851491..12907d8 100644 (file)
@@ -150,8 +150,7 @@ ioam_analyse_add_ipfix_record (flow_report_t * fr,
                               ip6_address_t * src, ip6_address_t * dst,
                               u16 src_port, u16 dst_port)
 {
-  while (clib_atomic_test_and_set (record->writer_lock))
-    ;
+  clib_spinlock_lock (&record->writer_lock);
 
   int field_index = 0;
   u16 tmp;
@@ -259,7 +258,7 @@ ioam_analyse_add_ipfix_record (flow_report_t * fr,
   *(record->chached_data_list) = *record;
   record->chached_data_list->chached_data_list = NULL;
 
-  clib_atomic_release (record->writer_lock);
+  clib_spinlock_unlock (&record->writer_lock);
   return offset;
 }
 
index 7a8d71d..2568e30 100644 (file)
@@ -256,17 +256,15 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
                  data0 = ioam_analyse_get_data_from_flow_id (flow_id0);
                  data1 = ioam_analyse_get_data_from_flow_id (flow_id1);
 
-                 while (clib_atomic_test_and_set (data0->writer_lock))
-                   ;
+                 clib_spinlock_lock (&data0->writer_lock);
                  data0->pkt_counter++;
                  data0->bytes_counter += p_len0;
-                 clib_atomic_release (data0->writer_lock);
+                 clib_spinlock_unlock (&data0->writer_lock);
 
-                 while (clib_atomic_test_and_set (data1->writer_lock))
-                   ;
+                 clib_spinlock_lock (&data1->writer_lock);
                  data1->pkt_counter++;
                  data1->bytes_counter += p_len1;
-                 clib_atomic_release (data1->writer_lock);
+                 clib_spinlock_unlock (&data1->writer_lock);
                }
              else if (error0 == 0)
                {
@@ -274,11 +272,10 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
                  pkts_failed++;
 
                  data0 = ioam_analyse_get_data_from_flow_id (flow_id0);
-                 while (clib_atomic_test_and_set (data0->writer_lock))
-                   ;
+                 clib_spinlock_lock (&data0->writer_lock);
                  data0->pkt_counter++;
                  data0->bytes_counter += p_len0;
-                 clib_atomic_release (data0->writer_lock);
+                 clib_spinlock_unlock (&data0->writer_lock);
                }
              else if (error1 == 0)
                {
@@ -286,11 +283,10 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
                  pkts_failed++;
 
                  data1 = ioam_analyse_get_data_from_flow_id (flow_id1);
-                 while (clib_atomic_test_and_set (data1->writer_lock))
-                   ;
+                 clib_spinlock_lock (&data1->writer_lock);
                  data1->pkt_counter++;
                  data1->bytes_counter += p_len1;
-                 clib_atomic_release (data1->writer_lock);
+                 clib_spinlock_unlock (&data1->writer_lock);
                }
              else
                pkts_failed += 2;
@@ -327,12 +323,11 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
                {
                  pkts_analysed++;
                  data0 = ioam_analyse_get_data_from_flow_id (flow_id0);
-                 while (clib_atomic_test_and_set (data0->writer_lock))
-                   ;
+                 clib_spinlock_lock (&data0->writer_lock);
                  data0->pkt_counter++;
                  data0->bytes_counter +=
                    clib_net_to_host_u16 (ip60->payload_length);
-                 clib_atomic_release (data0->writer_lock);
+                 clib_spinlock_unlock (&data0->writer_lock);
                }
              else
                pkts_failed++;
@@ -393,13 +388,12 @@ ip6_ioam_analyse_hbh_pot (u32 flow_id, ip6_hop_by_hop_option_t * opt0,
   pot_profile = pot_profile_get_active ();
   ret = pot_validate (pot_profile, cumulative, random);
 
-  while (clib_atomic_test_and_set (data->writer_lock))
-    ;
+  clib_spinlock_lock (&data->writer_lock);
 
   (0 == ret) ? (data->pot_data.sfc_validated_count++) :
     (data->pot_data.sfc_invalidated_count++);
 
-  clib_atomic_release (data->writer_lock);
+  clib_spinlock_unlock (&data->writer_lock);
   return 0;
 }
 
index 672576e..b1bca9b 100644 (file)
@@ -28,6 +28,7 @@
 #include <vppinfra/hash.h>
 #include <vppinfra/error.h>
 #include <vppinfra/elog.h>
+#include <vppinfra/lock.h>
 
 #include <vlib/threads.h>
 
@@ -62,7 +63,7 @@ typedef struct
   /* Vector of per thread ioam_export_buffer_t to buffer pool index */
   u32 *buffer_per_thread;
   /* Lock per thread to swap buffers between worker and timer process */
-  volatile u32 **lockp;
+  clib_spinlock_t *lockp;
 
   /* time scale transform */
   u32 unix_time_0;
@@ -194,9 +195,7 @@ ioam_export_thread_buffer_init (ioam_export_main_t * em, vlib_main_t * vm)
          ioam_export_thread_buffer_free (em);
          return (-2);
        }
-      em->lockp[i] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
-                                            CLIB_CACHE_LINE_BYTES);
-      clib_memset ((void *) em->lockp[i], 0, CLIB_CACHE_LINE_BYTES);
+      clib_spinlock_init (&em->lockp[i]);
     }
   return (1);
 }
@@ -404,7 +403,7 @@ ioam_export_process_common (ioam_export_main_t * em, vlib_main_t * vm,
       for (i = 0; i < vec_len (em->buffer_per_thread); i++)
        {
          /* If the worker thread is processing export records ignore further checks */
-         if (*em->lockp[i] == 1)
+         if (CLIB_SPINLOCK_IS_LOCKED (&em->lockp[i]))
            continue;
          eb = pool_elt_at_index (em->buffer_pool, em->buffer_per_thread[i]);
          if (eb->records_in_this_buffer > 0
@@ -436,11 +435,10 @@ ioam_export_process_common (ioam_export_main_t * em, vlib_main_t * vm,
           */
          for (i = 0; i < vec_len (thread_index); i++)
            {
-             while (clib_atomic_test_and_set (em->lockp[thread_index[i]]))
-               ;
+             clib_spinlock_lock (&em->lockp[thread_index[i]]);
              em->buffer_per_thread[thread_index[i]] =
                vec_pop (vec_buffer_indices);
-             clib_atomic_release (em->lockp[thread_index[i]]);
+             clib_spinlock_unlock (&em->lockp[thread_index[i]]);
            }
 
          /* Send the buffers */
@@ -479,7 +477,7 @@ do {                                                                           \
   from = vlib_frame_vector_args (F);                                           \
   n_left_from = (F)->n_vectors;                                                \
   next_index = (N)->cached_next_index;                                         \
-  while (clib_atomic_test_and_set ((EM)->lockp[(VM)->thread_index]));         \
+  clib_spinlock_lock (&(EM)->lockp[(VM)->thread_index]);                      \
   my_buf = ioam_export_get_my_buffer (EM, (VM)->thread_index);                 \
   my_buf->touched_at = vlib_time_now (VM);                                     \
   while (n_left_from > 0)                                                      \
@@ -622,7 +620,7 @@ do {                                                                           \
     }                                                                          \
   vlib_node_increment_counter (VM, export_node.index,                          \
                               EXPORT_ERROR_RECORDED, pkts_recorded);          \
-  *(EM)->lockp[(VM)->thread_index] = 0;                                        \
+  clib_spinlock_unlock (&(EM)->lockp[(VM)->thread_index]);                     \
 } while(0)
 
 #endif /* __included_ioam_export_h__ */
index 75ca40f..3ac2b28 100644 (file)
@@ -18,6 +18,7 @@
 #include <vpp/app/version.h>
 #include <vnet/api_errno.h>
 #include <vnet/udp/udp.h>
+#include <vppinfra/lock.h>
 
 //GC runs at most once every so many seconds
 #define LB_GARBAGE_RUN 60
@@ -27,8 +28,8 @@
 
 lb_main_t lb_main;
 
-#define lb_get_writer_lock() do {} while(clib_atomic_test_and_set (lb_main.writer_lock))
-#define lb_put_writer_lock() clib_atomic_release (lb_main.writer_lock)
+#define lb_get_writer_lock() clib_spinlock_lock (&lb_main.writer_lock)
+#define lb_put_writer_lock() clib_spinlock_unlock (&lb_main.writer_lock)
 
 static void lb_as_stack (lb_as_t *as);
 
@@ -289,7 +290,7 @@ static void lb_vip_garbage_collection(lb_vip_t *vip)
   lb_snat6_key_t m_key6;
   clib_bihash_kv_24_8_t kv6, value6;
   lb_snat_mapping_t *m = 0;
-  ASSERT (lbm->writer_lock[0]);
+  CLIB_SPINLOCK_ASSERT_LOCKED (&lbm->writer_lock);
 
   u32 now = (u32) vlib_time_now(vlib_get_main());
   if (!clib_u32_loop_gt(now, vip->last_garbage_collection + LB_GARBAGE_RUN))
@@ -384,7 +385,7 @@ static void lb_vip_update_new_flow_table(lb_vip_t *vip)
   lb_as_t *as;
   lb_pseudorand_t *pr, *sort_arr = 0;
 
-  ASSERT (lbm->writer_lock[0]); //We must have the lock
+  CLIB_SPINLOCK_ASSERT_LOCKED (&lbm->writer_lock); // We must have the lock
 
   //Check if some AS is configured or not
   i = 0;
@@ -496,7 +497,8 @@ int lb_vip_port_find_index(ip46_address_t *prefix, u8 plen,
 {
   lb_main_t *lbm = &lb_main;
   lb_vip_t *vip;
-  ASSERT (lbm->writer_lock[0]); //This must be called with the lock owned
+  /* This must be called with the lock owned */
+  CLIB_SPINLOCK_ASSERT_LOCKED (&lbm->writer_lock);
   ip46_prefix_normalize(prefix, plen);
   pool_foreach(vip, lbm->vips, {
       if ((vip->flags & LB_AS_FLAGS_USED) &&
@@ -560,7 +562,8 @@ int lb_vip_find_index(ip46_address_t *prefix, u8 plen, u8 protocol,
 static int lb_as_find_index_vip(lb_vip_t *vip, ip46_address_t *address, u32 *as_index)
 {
   lb_main_t *lbm = &lb_main;
-  ASSERT (lbm->writer_lock[0]); //This must be called with the lock owned
+  /* This must be called with the lock owned */
+  CLIB_SPINLOCK_ASSERT_LOCKED (&lbm->writer_lock);
   lb_as_t *as;
   u32 *asi;
   pool_foreach(asi, vip->as_indexes, {
@@ -1384,8 +1387,7 @@ lb_init (vlib_main_t * vm)
 
   lbm->per_cpu = 0;
   vec_validate(lbm->per_cpu, tm->n_vlib_mains - 1);
-  lbm->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,  CLIB_CACHE_LINE_BYTES);
-  lbm->writer_lock[0] = 0;
+  clib_spinlock_init (&lbm->writer_lock);
   lbm->per_cpu_sticky_buckets = LB_DEFAULT_PER_CPU_STICKY_BUCKETS;
   lbm->flow_timeout = LB_DEFAULT_FLOW_TIMEOUT;
   lbm->ip4_src_address.as_u32 = 0xffffffff;
index d09ac63..9899eab 100644 (file)
@@ -41,6 +41,7 @@
 #include <vppinfra/bihash_8_8.h>
 #include <vppinfra/bihash_24_8.h>
 #include <lb/lbhash.h>
+#include <vppinfra/lock.h>
 
 #define LB_DEFAULT_PER_CPU_STICKY_BUCKETS 1 << 10
 #define LB_DEFAULT_FLOW_TIMEOUT 40
@@ -561,7 +562,7 @@ typedef struct {
    */
   u16 msg_id_base;
 
-  volatile u32 *writer_lock;
+  clib_spinlock_t writer_lock;
 
   /* convenience */
   vlib_main_t *vlib_main;
index 2f036da..6ec9e72 100644 (file)
@@ -2265,9 +2265,7 @@ map_init (vlib_main_t * vm)
   /* IP4 virtual reassembly */
   mm->ip4_reass_hash_table = 0;
   mm->ip4_reass_pool = 0;
-  mm->ip4_reass_lock =
-    clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
-  *mm->ip4_reass_lock = 0;
+  clib_spinlock_init (&mm->ip4_reass_lock);
   mm->ip4_reass_conf_ht_ratio = MAP_IP4_REASS_HT_RATIO_DEFAULT;
   mm->ip4_reass_conf_lifetime_ms = MAP_IP4_REASS_LIFETIME_DEFAULT;
   mm->ip4_reass_conf_pool_size = MAP_IP4_REASS_POOL_SIZE_DEFAULT;
@@ -2281,9 +2279,7 @@ map_init (vlib_main_t * vm)
   /* IP6 virtual reassembly */
   mm->ip6_reass_hash_table = 0;
   mm->ip6_reass_pool = 0;
-  mm->ip6_reass_lock =
-    clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
-  *mm->ip6_reass_lock = 0;
+  clib_spinlock_init (&mm->ip6_reass_lock);
   mm->ip6_reass_conf_ht_ratio = MAP_IP6_REASS_HT_RATIO_DEFAULT;
   mm->ip6_reass_conf_lifetime_ms = MAP_IP6_REASS_LIFETIME_DEFAULT;
   mm->ip6_reass_conf_pool_size = MAP_IP6_REASS_POOL_SIZE_DEFAULT;
index 6dc5232..a65a16a 100644 (file)
@@ -306,7 +306,7 @@ typedef struct {
   u16 ip4_reass_allocated;
   u16 *ip4_reass_hash_table;
   u16 ip4_reass_fifo_last;
-  volatile u32 *ip4_reass_lock;
+  clib_spinlock_t ip4_reass_lock;
 
   /* Counters */
   u32 ip4_reass_buffered_counter;
@@ -329,7 +329,7 @@ typedef struct {
   u16 ip6_reass_allocated;
   u16 *ip6_reass_hash_table;
   u16 ip6_reass_fifo_last;
-  volatile u32 *ip6_reass_lock;
+  clib_spinlock_t ip6_reass_lock;
 
   /* Counters */
   u32 ip6_reass_buffered_counter;
@@ -502,8 +502,8 @@ map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id,
 void
 map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop);
 
-#define map_ip4_reass_lock() while (clib_atomic_test_and_set (map_main.ip4_reass_lock)) { CLIB_PAUSE (); }
-#define map_ip4_reass_unlock() clib_atomic_release (map_main.ip4_reass_lock)
+#define map_ip4_reass_lock() clib_spinlock_lock (&map_main.ip4_reass_lock)
+#define map_ip4_reass_unlock() clib_spinlock_unlock (&map_main.ip4_reass_lock)
 
 static_always_inline void
 map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi)
@@ -527,8 +527,8 @@ map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id,
 void
 map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop);
 
-#define map_ip6_reass_lock() while (clib_atomic_test_and_set (map_main.ip6_reass_lock)) { CLIB_PAUSE (); }
-#define map_ip6_reass_unlock() clib_atomic_release (map_main.ip6_reass_lock)
+#define map_ip6_reass_lock() clib_spinlock_lock (&map_main.ip6_reass_lock)
+#define map_ip6_reass_unlock() clib_spinlock_unlock (&map_main.ip6_reass_lock)
 
 int
 map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi,
@@ -589,6 +589,7 @@ map_domain_counter_lock (map_main_t *mm)
     while (clib_atomic_test_and_set (mm->counter_lock))
       /* zzzz */ ;
 }
+
 static inline void
 map_domain_counter_unlock (map_main_t *mm)
 {
index f5e4949..b807a26 100755 (executable)
@@ -152,10 +152,7 @@ vnet_classify_new_table (vnet_classify_main_t * cm,
   vec_validate_aligned (t->buckets, nbuckets - 1, CLIB_CACHE_LINE_BYTES);
   oldheap = clib_mem_set_heap (t->mheap);
 
-  t->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
-                                          CLIB_CACHE_LINE_BYTES);
-  t->writer_lock[0] = 0;
-
+  clib_spinlock_init (&t->writer_lock);
   clib_mem_set_heap (oldheap);
   return (t);
 }
@@ -193,7 +190,7 @@ vnet_classify_entry_alloc (vnet_classify_table_t * t, u32 log2_pages)
   u32 required_length;
   void *oldheap;
 
-  ASSERT (t->writer_lock[0]);
+  CLIB_SPINLOCK_ASSERT_LOCKED (&t->writer_lock);
   required_length =
     (sizeof (vnet_classify_entry_t) + (t->match_n_vectors * sizeof (u32x4)))
     * t->entries_per_page * (1 << log2_pages);
@@ -222,7 +219,7 @@ static void
 vnet_classify_entry_free (vnet_classify_table_t * t,
                          vnet_classify_entry_t * v, u32 log2_pages)
 {
-  ASSERT (t->writer_lock[0]);
+  CLIB_SPINLOCK_ASSERT_LOCKED (&t->writer_lock);
 
   ASSERT (vec_len (t->freelists) > log2_pages);
 
@@ -447,8 +444,7 @@ vnet_classify_add_del (vnet_classify_table_t * t,
 
   hash >>= t->log2_nbuckets;
 
-  while (clib_atomic_test_and_set (t->writer_lock))
-    CLIB_PAUSE ();
+  clib_spinlock_lock (&t->writer_lock);
 
   /* First elt in the bucket? */
   if (b->offset == 0)
@@ -640,7 +636,7 @@ expand_ok:
   vnet_classify_entry_free (t, v, old_log2_pages);
 
 unlock:
-  clib_atomic_release (&t->writer_lock[0]);
+  clib_spinlock_unlock (&t->writer_lock);
   return rv;
 }
 
index 2bc1224..986e0a6 100644 (file)
@@ -187,7 +187,7 @@ typedef struct
   void *mheap;
 
   /* Writer (only) lock for this table */
-  volatile u32 *writer_lock;
+  clib_spinlock_t writer_lock;
 
 } vnet_classify_table_t;
 
index bae6cb4..471728b 100644 (file)
@@ -103,8 +103,7 @@ dns_enable_disable (dns_main_t * dm, int is_enable)
       if (dm->cache_entry_by_name == 0)
        {
          if (n_vlib_mains > 1)
-           dm->cache_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
-                                                    CLIB_CACHE_LINE_BYTES);
+           clib_spinlock_init (&dm->cache_lock);
 
          dm->cache_entry_by_name = hash_create_string (0, sizeof (uword));
        }
index d5029e5..494ac67 100644 (file)
@@ -97,7 +97,7 @@ typedef struct
 
   /** Find cached record by name */
   uword *cache_entry_by_name;
-  uword *cache_lock;
+  clib_spinlock_t cache_lock;
 
   /** enable / disable flag */
   int is_enabled;
@@ -196,8 +196,7 @@ dns_cache_lock (dns_main_t * dm)
 {
   if (dm->cache_lock)
     {
-      while (clib_atomic_test_and_set (dm->cache_lock))
-       CLIB_PAUSE ();
+      clib_spinlock_lock (&dm->cache_lock);
     }
 }
 
@@ -206,7 +205,7 @@ dns_cache_unlock (dns_main_t * dm)
 {
   if (dm->cache_lock)
     {
-      clib_atomic_release (dm->cache_lock);
+      clib_spinlock_unlock (&dm->cache_lock);
     }
 }
 
index 1702cdc..889ba50 100644 (file)
@@ -1269,9 +1269,8 @@ vnet_interface_init (vlib_main_t * vm)
         sizeof (b->opaque), sizeof (vnet_buffer_opaque_t));
     }
 
-  im->sw_if_counter_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
-                                                  CLIB_CACHE_LINE_BYTES);
-  im->sw_if_counter_lock[0] = 1;       /* should be no need */
+  clib_spinlock_init (&im->sw_if_counter_lock);
+  clib_spinlock_lock (&im->sw_if_counter_lock);        /* should be no need */
 
   vec_validate (im->sw_if_counters, VNET_N_SIMPLE_INTERFACE_COUNTER - 1);
 #define _(E,n,p)                                                       \
@@ -1286,7 +1285,7 @@ vnet_interface_init (vlib_main_t * vm)
   im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_##E].stat_segment_name = "/" #p "/" #n;
   foreach_combined_interface_counter_name
 #undef _
-    im->sw_if_counter_lock[0] = 0;
+    clib_spinlock_unlock (&im->sw_if_counter_lock);
 
   im->device_class_by_name = hash_create_string ( /* size */ 0,
                                                 sizeof (uword));
index c6400ce..d3065dc 100644 (file)
@@ -43,6 +43,7 @@
 #include <vlib/vlib.h>
 #include <vppinfra/pcap.h>
 #include <vnet/l3_types.h>
+#include <vppinfra/lock.h>
 
 struct vnet_main_t;
 struct vnet_hw_interface_t;
@@ -836,7 +837,7 @@ typedef struct
 
   /* Software interface counters both simple and combined
      packet and byte counters. */
-  volatile u32 *sw_if_counter_lock;
+  clib_spinlock_t sw_if_counter_lock;
   vlib_simple_counter_main_t *sw_if_counters;
   vlib_combined_counter_main_t *combined_sw_if_counters;
 
@@ -868,15 +869,14 @@ static inline void
 vnet_interface_counter_lock (vnet_interface_main_t * im)
 {
   if (im->sw_if_counter_lock)
-    while (clib_atomic_test_and_set (im->sw_if_counter_lock))
-      /* zzzz */ ;
+    clib_spinlock_lock (&im->sw_if_counter_lock);
 }
 
 static inline void
 vnet_interface_counter_unlock (vnet_interface_main_t * im)
 {
   if (im->sw_if_counter_lock)
-    clib_atomic_release (im->sw_if_counter_lock);
+    clib_spinlock_unlock (&im->sw_if_counter_lock);
 }
 
 void vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add);
index 6bddb14..15118f9 100644 (file)
@@ -197,8 +197,7 @@ ipfix_classify_send_flows (flow_report_main_t * frm,
 
   t = pool_elt_at_index (vcm->tables, table->classify_table_index);
 
-  while (clib_atomic_test_and_set (t->writer_lock))
-    ;
+  clib_spinlock_lock (&t->writer_lock);
 
   for (i = 0; i < t->nbuckets; i++)
     {
@@ -385,7 +384,7 @@ flush:
       bi0 = ~0;
     }
 
-  clib_atomic_release (t->writer_lock);
+  clib_spinlock_unlock (&t->writer_lock);
   return f;
 }
 
index 873ab6d..4c7d7bd 100644 (file)
@@ -30,6 +30,7 @@
  */
 
 #include <vnet/vnet.h>
+#include <vppinfra/lock.h>
 
 /*
  * Reference counting
@@ -41,7 +42,7 @@
 */
 typedef struct {
   u32 *counters;
-  volatile u32 *counter_lock;
+  clib_spinlock_t counter_lock;
   CLIB_CACHE_LINE_ALIGN_MARK(o);
 } vlib_refcount_per_cpu_t;
 
@@ -50,16 +51,15 @@ typedef struct {
 } vlib_refcount_t;
 
 static_always_inline
-void vlib_refcount_lock (volatile u32 *counter_lock)
+void vlib_refcount_lock (clib_spinlock_t counter_lock)
 {
-  while (clib_atomic_test_and_set (counter_lock))
-    ;
+  clib_spinlock_lock (&counter_lock);
 }
 
 static_always_inline
-void vlib_refcount_unlock (volatile u32 *counter_lock)
+void vlib_refcount_unlock (clib_spinlock_t counter_lock)
 {
-  clib_atomic_release(counter_lock);
+  clib_spinlock_unlock (&counter_lock);
 }
 
 void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size);
@@ -86,9 +86,7 @@ void vlib_refcount_init(vlib_refcount_t *r)
 
   for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++)
     {
-      r->per_cpu[thread_index].counter_lock =
-         clib_mem_alloc_aligned(CLIB_CACHE_LINE_BYTES,CLIB_CACHE_LINE_BYTES);
-      r->per_cpu[thread_index].counter_lock[0] = 0;
+      clib_spinlock_init (&r->per_cpu[thread_index].counter_lock);
     }
 }
 
index 59ab0e3..cc6a7f0 100644 (file)
@@ -42,6 +42,9 @@ do {                                                  \
 #define CLIB_LOCK_DBG_CLEAR(_p)
 #endif
 
+#define CLIB_SPINLOCK_IS_LOCKED(_p) (*(_p))->lock
+#define CLIB_SPINLOCK_ASSERT_LOCKED(_p) ASSERT(CLIB_SPINLOCK_IS_LOCKED((_p)))
+
 typedef struct
 {
   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);