vppinfra: add atomic macros for __sync builtins 81/15181/5
authorSirshak Das <sirshak.das@arm.com>
Wed, 3 Oct 2018 22:53:51 +0000 (22:53 +0000)
committerDamjan Marion <dmarion@me.com>
Fri, 19 Oct 2018 07:10:47 +0000 (07:10 +0000)
This is first part of addition of atomic macros with only macros for
__sync builtins.

- Based on earlier patch by Damjan (https://gerrit.fd.io/r/#/c/10729/)
Additionally
- clib_atomic_release macro added and used in the absence
of any memory barrier.
- clib_atomic_bool_cmp_and_swap added

Change-Id: Ie4e48c1e184a652018d1d0d87c4be80ddd180a3b
Original-patch-by: Damjan Marion <damarion@cisco.com>
Signed-off-by: Sirshak Das <sirshak.das@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Steve Capper <steve.capper@arm.com>
40 files changed:
extras/japi/java/jvpp-common/jvpp_common.h
src/plugins/acl/session_inlines.h
src/plugins/dpdk/device/device.c
src/plugins/ioam/analyse/ioam_analyse.h
src/plugins/ioam/analyse/ioam_summary_export.c
src/plugins/ioam/analyse/ip6/node.c
src/plugins/ioam/export-common/ioam_export.h
src/plugins/lb/lb.c
src/plugins/map/map.h
src/plugins/nat/nat_det.h
src/svm/message_queue.c
src/svm/ssvm.h
src/svm/svm_fifo.c
src/svm/svm_fifo.h
src/vlib/buffer_funcs.h
src/vlib/threads.c
src/vlib/threads.h
src/vlib/unix/cj.c
src/vnet/classify/vnet_classify.c
src/vnet/devices/virtio/vhost_user_output.c
src/vnet/dns/dns.h
src/vnet/gre/gre.c
src/vnet/interface.h
src/vnet/ip/ip4_mtrie.c
src/vnet/ipfix-export/flow_report_classify.c
src/vnet/mfib/mfib_forward.c
src/vnet/mfib/mfib_signal.c
src/vnet/pg/output.c
src/vnet/session-apps/echo_client.c
src/vnet/util/refcount.h
src/vpp/stats/stats_to_be_deprecated.c
src/vppinfra/CMakeLists.txt
src/vppinfra/atomics.h [new file with mode: 0644]
src/vppinfra/clib.h
src/vppinfra/elog.c
src/vppinfra/elog.h
src/vppinfra/lock.h
src/vppinfra/maplog.h
src/vppinfra/mheap.c
src/vppinfra/smp.h

index 14027a9..dd48138 100644 (file)
@@ -43,11 +43,11 @@ typedef struct {
 extern jvpp_main_t jvpp_main __attribute__((aligned (64)));
 
 static_always_inline u32 vppjni_get_context_id(jvpp_main_t * jm) {
-    return __sync_add_and_fetch(&jm->context_id, 1);
+    return clib_atomic_add_fetch(&jm->context_id, 1);
 }
 
 static_always_inline void vppjni_lock(jvpp_main_t * jm, u32 tag) {
-    while (__sync_lock_test_and_set(&jm->lock, 1))
+    while (clib_atomic_test_and_set(&jm->lock))
         ;
     jm->tag = tag;
 }
index 6ac5983..cd23f39 100644 (file)
@@ -410,7 +410,7 @@ acl_fa_deactivate_session (acl_main_t * am, u32 sw_if_index,
     }
 
   sess->deleted = 1;
-  clib_smp_atomic_add (&am->fa_session_total_deactivations, 1);
+  clib_atomic_fetch_add (&am->fa_session_total_deactivations, 1);
   clib_mem_set_heap (oldheap);
 }
 
@@ -432,7 +432,7 @@ acl_fa_put_session (acl_main_t * am, u32 sw_if_index,
   vec_validate (pw->fa_session_dels_by_sw_if_index, sw_if_index);
   clib_mem_set_heap (oldheap);
   pw->fa_session_dels_by_sw_if_index[sw_if_index]++;
-  clib_smp_atomic_add (&am->fa_session_total_dels, 1);
+  clib_atomic_fetch_add (&am->fa_session_total_dels, 1);
 }
 
 always_inline int
@@ -571,7 +571,7 @@ acl_fa_add_session (acl_main_t * am, int is_input, int is_ip6,
   vec_validate (pw->fa_session_adds_by_sw_if_index, sw_if_index);
   clib_mem_set_heap (oldheap);
   pw->fa_session_adds_by_sw_if_index[sw_if_index]++;
-  clib_smp_atomic_add (&am->fa_session_total_adds, 1);
+  clib_atomic_fetch_add (&am->fa_session_total_adds, 1);
   return sess;
 }
 
index b400133..ea00df2 100644 (file)
@@ -162,7 +162,7 @@ static_always_inline
       if (PREDICT_FALSE (xd->lockp != 0))
        {
          queue_id = queue_id % xd->tx_q_used;
-         while (__sync_lock_test_and_set (xd->lockp[queue_id], 1))
+         while (clib_atomic_test_and_set (xd->lockp[queue_id]))
            /* zzzz */
            queue_id = (queue_id + 1) % xd->tx_q_used;
        }
@@ -191,7 +191,7 @@ static_always_inline
        }
 
       if (PREDICT_FALSE (xd->lockp != 0))
-       *xd->lockp[queue_id] = 0;
+       clib_atomic_release (xd->lockp[queue_id]);
 
       if (PREDICT_FALSE (n_sent < 0))
        {
index 881b045..3aec7a7 100644 (file)
@@ -191,7 +191,7 @@ ip6_ioam_analyse_set_paths_down (ioam_analyser_data_t * data)
   ioam_path_map_t *path;
   u8 k, i;
 
-  while (__sync_lock_test_and_set (data->writer_lock, 1))
+  while (clib_atomic_test_and_set (data->writer_lock))
     ;
 
   trace_data = &data->trace_data;
@@ -208,7 +208,7 @@ ip6_ioam_analyse_set_paths_down (ioam_analyser_data_t * data)
       for (k = 0; k < trace_record->num_nodes; k++)
        path[k].state_up = 0;
     }
-  *(data->writer_lock) = 0;
+  clib_atomic_release (data->writer_lock);
 }
 
 always_inline void
@@ -225,7 +225,7 @@ ip6_ioam_analyse_hbh_trace_loopback (ioam_analyser_data_t * data,
   u16 size_of_traceopt_per_node;
   u16 size_of_all_traceopts;
 
-  while (__sync_lock_test_and_set (data->writer_lock, 1))
+  while (clib_atomic_test_and_set (data->writer_lock))
     ;
 
   trace_data = &data->trace_data;
@@ -277,7 +277,7 @@ ip6_ioam_analyse_hbh_trace_loopback (ioam_analyser_data_t * data,
        }
     }
 end:
-  *(data->writer_lock) = 0;
+  clib_atomic_release (data->writer_lock);
 }
 
 always_inline int
@@ -295,7 +295,7 @@ ip6_ioam_analyse_hbh_trace (ioam_analyser_data_t * data,
   ioam_path_map_t *path = NULL;
   ioam_analyse_trace_record *trace_record;
 
-  while (__sync_lock_test_and_set (data->writer_lock, 1))
+  while (clib_atomic_test_and_set (data->writer_lock))
     ;
 
   trace_data = &data->trace_data;
@@ -409,7 +409,7 @@ found_match:
        (u32) ((sum + delay) / (data->seqno_data.rx_packets + 1));
     }
 DONE:
-  *(data->writer_lock) = 0;
+  clib_atomic_release (data->writer_lock);
   return 0;
 }
 
@@ -417,13 +417,14 @@ always_inline int
 ip6_ioam_analyse_hbh_e2e (ioam_analyser_data_t * data,
                          ioam_e2e_packet_t * e2e, u16 len)
 {
-  while (__sync_lock_test_and_set (data->writer_lock, 1))
+  while (clib_atomic_test_and_set (data->writer_lock))
     ;
 
   ioam_analyze_seqno (&data->seqno_data,
                      (u64) clib_net_to_host_u32 (e2e->e2e_data));
 
-  *(data->writer_lock) = 0;
+  clib_atomic_release (data->writer_lock);
+
   return 0;
 }
 
@@ -510,7 +511,8 @@ ioam_analyse_init_data (ioam_analyser_data_t * data)
 
   data->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
                                              CLIB_CACHE_LINE_BYTES);
-  *(data->writer_lock) = 0;
+
+  clib_atomic_release (data->writer_lock);
 
   trace_data = &(data->trace_data);
   for (j = 0; j < IOAM_MAX_PATHS_PER_FLOW; j++)
index c118bfa..440dcca 100644 (file)
@@ -150,7 +150,7 @@ ioam_analyse_add_ipfix_record (flow_report_t * fr,
                               ip6_address_t * src, ip6_address_t * dst,
                               u16 src_port, u16 dst_port)
 {
-  while (__sync_lock_test_and_set (record->writer_lock, 1))
+  while (clib_atomic_test_and_set (record->writer_lock))
     ;
 
   int field_index = 0;
@@ -259,7 +259,7 @@ ioam_analyse_add_ipfix_record (flow_report_t * fr,
   *(record->chached_data_list) = *record;
   record->chached_data_list->chached_data_list = NULL;
 
-  *(record->writer_lock) = 0;
+  clib_atomic_release (record->writer_lock);
   return offset;
 }
 
index 902fb9b..7a8d71d 100644 (file)
@@ -256,17 +256,17 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
                  data0 = ioam_analyse_get_data_from_flow_id (flow_id0);
                  data1 = ioam_analyse_get_data_from_flow_id (flow_id1);
 
-                 while (__sync_lock_test_and_set (data0->writer_lock, 1))
+                 while (clib_atomic_test_and_set (data0->writer_lock))
                    ;
                  data0->pkt_counter++;
                  data0->bytes_counter += p_len0;
-                 *(data0->writer_lock) = 0;
+                 clib_atomic_release (data0->writer_lock);
 
-                 while (__sync_lock_test_and_set (data1->writer_lock, 1))
+                 while (clib_atomic_test_and_set (data1->writer_lock))
                    ;
                  data1->pkt_counter++;
                  data1->bytes_counter += p_len1;
-                 *(data1->writer_lock) = 0;
+                 clib_atomic_release (data1->writer_lock);
                }
              else if (error0 == 0)
                {
@@ -274,11 +274,11 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
                  pkts_failed++;
 
                  data0 = ioam_analyse_get_data_from_flow_id (flow_id0);
-                 while (__sync_lock_test_and_set (data0->writer_lock, 1))
+                 while (clib_atomic_test_and_set (data0->writer_lock))
                    ;
                  data0->pkt_counter++;
                  data0->bytes_counter += p_len0;
-                 *(data0->writer_lock) = 0;
+                 clib_atomic_release (data0->writer_lock);
                }
              else if (error1 == 0)
                {
@@ -286,11 +286,11 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
                  pkts_failed++;
 
                  data1 = ioam_analyse_get_data_from_flow_id (flow_id1);
-                 while (__sync_lock_test_and_set (data1->writer_lock, 1))
+                 while (clib_atomic_test_and_set (data1->writer_lock))
                    ;
                  data1->pkt_counter++;
                  data1->bytes_counter += p_len1;
-                 *(data1->writer_lock) = 0;
+                 clib_atomic_release (data1->writer_lock);
                }
              else
                pkts_failed += 2;
@@ -327,12 +327,12 @@ ip6_ioam_analyse_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
                {
                  pkts_analysed++;
                  data0 = ioam_analyse_get_data_from_flow_id (flow_id0);
-                 while (__sync_lock_test_and_set (data0->writer_lock, 1))
+                 while (clib_atomic_test_and_set (data0->writer_lock))
                    ;
                  data0->pkt_counter++;
                  data0->bytes_counter +=
                    clib_net_to_host_u16 (ip60->payload_length);
-                 *(data0->writer_lock) = 0;
+                 clib_atomic_release (data0->writer_lock);
                }
              else
                pkts_failed++;
@@ -393,13 +393,13 @@ ip6_ioam_analyse_hbh_pot (u32 flow_id, ip6_hop_by_hop_option_t * opt0,
   pot_profile = pot_profile_get_active ();
   ret = pot_validate (pot_profile, cumulative, random);
 
-  while (__sync_lock_test_and_set (data->writer_lock, 1))
+  while (clib_atomic_test_and_set (data->writer_lock))
     ;
 
   (0 == ret) ? (data->pot_data.sfc_validated_count++) :
     (data->pot_data.sfc_invalidated_count++);
 
-  *(data->writer_lock) = 0;
+  clib_atomic_release (data->writer_lock);
   return 0;
 }
 
index 36d71d2..6e64095 100644 (file)
@@ -436,11 +436,11 @@ ioam_export_process_common (ioam_export_main_t * em, vlib_main_t * vm,
           */
          for (i = 0; i < vec_len (thread_index); i++)
            {
-             while (__sync_lock_test_and_set (em->lockp[thread_index[i]], 1))
+             while (clib_atomic_test_and_set (em->lockp[thread_index[i]]))
                ;
              em->buffer_per_thread[thread_index[i]] =
                vec_pop (vec_buffer_indices);
-             *em->lockp[thread_index[i]] = 0;
+             clib_atomic_release (em->lockp[thread_index[i]]);
            }
 
          /* Send the buffers */
@@ -479,7 +479,7 @@ do {                                                                           \
   from = vlib_frame_vector_args (F);                                           \
   n_left_from = (F)->n_vectors;                                                \
   next_index = (N)->cached_next_index;                                         \
-  while (__sync_lock_test_and_set ((EM)->lockp[(VM)->thread_index], 1));       \
+  while (clib_atomic_test_and_set ((EM)->lockp[(VM)->thread_index]));         \
   my_buf = ioam_export_get_my_buffer (EM, (VM)->thread_index);                 \
   my_buf->touched_at = vlib_time_now (VM);                                     \
   while (n_left_from > 0)                                                      \
index 1936c19..76bbfa0 100644 (file)
@@ -27,8 +27,8 @@
 
 lb_main_t lb_main;
 
-#define lb_get_writer_lock() do {} while(__sync_lock_test_and_set (lb_main.writer_lock, 1))
-#define lb_put_writer_lock() lb_main.writer_lock[0] = 0
+#define lb_get_writer_lock() do {} while(clib_atomic_test_and_set (lb_main.writer_lock))
+#define lb_put_writer_lock() clib_atomic_release (lb_main.writer_lock)
 
 static void lb_as_stack (lb_as_t *as);
 
index 6587a8a..1e07b59 100644 (file)
@@ -474,7 +474,7 @@ map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id,
 void
 map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop);
 
-#define map_ip4_reass_lock() while (__sync_lock_test_and_set(map_main.ip4_reass_lock, 1)) {}
+#define map_ip4_reass_lock() while (clib_atomic_test_and_set (map_main.ip4_reass_lock)) {}
 #define map_ip4_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip4_reass_lock = 0;} while(0)
 
 static_always_inline void
@@ -499,7 +499,7 @@ map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id,
 void
 map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop);
 
-#define map_ip6_reass_lock() while (__sync_lock_test_and_set(map_main.ip6_reass_lock, 1)) {}
+#define map_ip6_reass_lock() while (clib_atomic_test_and_set (map_main.ip6_reass_lock)) {}
 #define map_ip6_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip6_reass_lock = 0;} while(0)
 
 int
@@ -555,14 +555,14 @@ static inline void
 map_domain_counter_lock (map_main_t *mm)
 {
   if (mm->counter_lock)
-    while (__sync_lock_test_and_set(mm->counter_lock, 1))
+    while (clib_atomic_test_and_set (mm->counter_lock))
       /* zzzz */ ;
 }
 static inline void
 map_domain_counter_unlock (map_main_t *mm)
 {
   if (mm->counter_lock)
-    *mm->counter_lock = 0;
+    clib_atomic_release (mm->counter_lock);
 }
 
 
index ce876bc..7878a4c 100644 (file)
@@ -159,13 +159,13 @@ snat_det_ses_create (snat_det_map_t * dm, ip4_address_t * in_addr,
     {
       if (!dm->sessions[i + user_offset].in_port)
        {
-         if (__sync_bool_compare_and_swap
+         if (clib_atomic_bool_cmp_and_swap
              (&dm->sessions[i + user_offset].in_port, 0, in_port))
            {
              dm->sessions[i + user_offset].out.as_u64 = out->as_u64;
              dm->sessions[i + user_offset].state = SNAT_SESSION_UNKNOWN;
              dm->sessions[i + user_offset].expire = 0;
-             __sync_add_and_fetch (&dm->ses_num, 1);
+             clib_atomic_add_fetch (&dm->ses_num, 1);
              return &dm->sessions[i + user_offset];
            }
        }
@@ -179,10 +179,10 @@ snat_det_ses_create (snat_det_map_t * dm, ip4_address_t * in_addr,
 always_inline void
 snat_det_ses_close (snat_det_map_t * dm, snat_det_session_t * ses)
 {
-  if (__sync_bool_compare_and_swap (&ses->in_port, ses->in_port, 0))
+  if (clib_atomic_bool_cmp_and_swap (&ses->in_port, ses->in_port, 0))
     {
       ses->out.as_u64 = 0;
-      __sync_add_and_fetch (&dm->ses_num, -1);
+      clib_atomic_add_fetch (&dm->ses_num, -1);
     }
 }
 
index a73a56d..13d089a 100644 (file)
@@ -108,7 +108,7 @@ svm_msg_q_alloc_msg_w_ring (svm_msg_q_t * mq, u32 ring_index)
   msg.ring_index = ring - mq->rings;
   msg.elt_index = ring->tail;
   ring->tail = (ring->tail + 1) % ring->nitems;
-  __sync_fetch_and_add (&ring->cursize, 1);
+  clib_atomic_fetch_add (&ring->cursize, 1);
   return msg;
 }
 
@@ -155,7 +155,7 @@ svm_msg_q_alloc_msg (svm_msg_q_t * mq, u32 nbytes)
     msg.ring_index = ring - mq->rings;
     msg.elt_index = ring->tail;
     ring->tail = (ring->tail + 1) % ring->nitems;
-    __sync_fetch_and_add (&ring->cursize, 1);
+    clib_atomic_fetch_add (&ring->cursize, 1);
     break;
   }
   return msg;
@@ -185,7 +185,7 @@ svm_msg_q_free_msg (svm_msg_q_t * mq, svm_msg_q_msg_t * msg)
       /* for now, expect messages to be processed in order */
       ASSERT (0);
     }
-  __sync_fetch_and_sub (&ring->cursize, 1);
+  clib_atomic_fetch_sub (&ring->cursize, 1);
 }
 
 static int
index 8677f56..5b2bf0d 100644 (file)
@@ -103,7 +103,7 @@ ssvm_lock (ssvm_shared_header_t * h, u32 my_pid, u32 tag)
       return;
     }
 
-  while (__sync_lock_test_and_set (&h->lock, 1))
+  while (clib_atomic_test_and_set (&h->lock))
     ;
 
   h->owner_pid = my_pid;
@@ -114,7 +114,7 @@ ssvm_lock (ssvm_shared_header_t * h, u32 my_pid, u32 tag)
 always_inline void
 ssvm_lock_non_recursive (ssvm_shared_header_t * h, u32 tag)
 {
-  while (__sync_lock_test_and_set (&h->lock, 1))
+  while (clib_atomic_test_and_set (&h->lock))
     ;
 
   h->tag = tag;
index 4eae0a1..aa523c6 100644 (file)
@@ -513,7 +513,7 @@ CLIB_MARCH_FN (svm_fifo_enqueue_nowait, int, svm_fifo_t * f, u32 max_bytes,
 
   /* Atomically increase the queue length */
   ASSERT (cursize + total_copy_bytes <= nitems);
-  __sync_fetch_and_add (&f->cursize, total_copy_bytes);
+  clib_atomic_fetch_add (&f->cursize, total_copy_bytes);
 
   return (total_copy_bytes);
 }
@@ -659,7 +659,7 @@ CLIB_MARCH_FN (svm_fifo_dequeue_nowait, int, svm_fifo_t * f, u32 max_bytes,
 
   ASSERT (f->head <= nitems);
   ASSERT (cursize >= total_copy_bytes);
-  __sync_fetch_and_sub (&f->cursize, total_copy_bytes);
+  clib_atomic_fetch_sub (&f->cursize, total_copy_bytes);
 
   return (total_copy_bytes);
 }
@@ -757,7 +757,7 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes)
 
   ASSERT (f->head <= nitems);
   ASSERT (cursize >= total_drop_bytes);
-  __sync_fetch_and_sub (&f->cursize, total_drop_bytes);
+  clib_atomic_fetch_sub (&f->cursize, total_drop_bytes);
 
   return total_drop_bytes;
 }
@@ -766,7 +766,7 @@ void
 svm_fifo_dequeue_drop_all (svm_fifo_t * f)
 {
   f->head = f->tail;
-  __sync_fetch_and_sub (&f->cursize, f->cursize);
+  clib_atomic_fetch_sub (&f->cursize, f->cursize);
 }
 
 int
@@ -813,7 +813,7 @@ svm_fifo_segments_free (svm_fifo_t * f, svm_fifo_segment_t * fs)
       f->head = (f->head + fs[0].len) % f->nitems;
       total_drop_bytes = fs[0].len;
     }
-  __sync_fetch_and_sub (&f->cursize, total_drop_bytes);
+  clib_atomic_fetch_sub (&f->cursize, total_drop_bytes);
 }
 
 u32
index d7852a7..e049d3e 100644 (file)
@@ -169,7 +169,7 @@ svm_fifo_set_event (svm_fifo_t * f)
 always_inline void
 svm_fifo_unset_event (svm_fifo_t * f)
 {
-  __sync_lock_release (&f->has_event);
+  clib_atomic_release (&f->has_event);
 }
 
 static inline void
index d8abdf3..438bf7e 100644 (file)
@@ -923,7 +923,7 @@ vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
     tail->total_length_not_including_first_buffer;
 
 next_segment:
-  __sync_add_and_fetch (&tail->n_add_refs, 1);
+  clib_atomic_add_fetch (&tail->n_add_refs, 1);
 
   if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
     {
@@ -1153,7 +1153,7 @@ vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
 
   oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
 
-  while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
+  while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
     ;
 
   p = hash_get (vlib_buffer_state_validation_hash, b);
@@ -1196,7 +1196,7 @@ vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
 
   oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
 
-  while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
+  while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock))
     ;
 
   hash_set (vlib_buffer_state_validation_hash, b, expected);
index 981209b..7f407e9 100644 (file)
@@ -516,7 +516,7 @@ vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
 
   ASSERT (fq);
 
-  new_tail = __sync_add_and_fetch (&fq->tail, 1);
+  new_tail = clib_atomic_add_fetch (&fq->tail, 1);
 
   /* Wait until a ring slot is available */
   while (new_tail >= fq->head + fq->nelts)
@@ -576,12 +576,12 @@ vlib_worker_thread_init (vlib_worker_thread_t * w)
     {
 
       /* Initial barrier sync, for both worker and i/o threads */
-      clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
+      clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
 
       while (*vlib_worker_threads->wait_at_barrier)
        ;
 
-      clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
+      clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
     }
 }
 
@@ -1310,22 +1310,6 @@ cpu_config (vlib_main_t * vm, unformat_input_t * input)
 
 VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
 
-#if !defined (__x86_64__) && !defined (__i386__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__)
-void
-__sync_fetch_and_add_8 (void)
-{
-  fformat (stderr, "%s called\n", __FUNCTION__);
-  abort ();
-}
-
-void
-__sync_add_and_fetch_8 (void)
-{
-  fformat (stderr, "%s called\n", __FUNCTION__);
-  abort ();
-}
-#endif
-
 void vnet_main_fixup (vlib_fork_fixup_t which) __attribute__ ((weak));
 void
 vnet_main_fixup (vlib_fork_fixup_t which)
@@ -1493,8 +1477,8 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm)
 
       /* Do per thread rebuilds in parallel */
       refork_needed = 1;
-      clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
-                          (vec_len (vlib_mains) - 1));
+      clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
+                            (vec_len (vlib_mains) - 1));
       now = vlib_time_now (vm);
       t_update_main = now - vm->barrier_epoch;
     }
index bb7c164..0e9cba5 100644 (file)
@@ -414,7 +414,7 @@ vlib_worker_thread_barrier_check (void)
          ed->thread_index = thread_index;
        }
 
-      clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
+      clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
       if (CLIB_DEBUG > 0)
        {
          vm = vlib_get_main ();
@@ -424,7 +424,7 @@ vlib_worker_thread_barrier_check (void)
        ;
       if (CLIB_DEBUG > 0)
        vm->parked_at_barrier = 0;
-      clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
+      clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
 
       if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required))
        {
@@ -450,8 +450,8 @@ vlib_worker_thread_barrier_check (void)
            }
 
          vlib_worker_thread_node_refork ();
-         clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
-                              -1);
+         clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
+                                -1);
          while (*vlib_worker_threads->node_reforks_required)
            ;
        }
@@ -519,7 +519,7 @@ vlib_get_frame_queue_elt (u32 frame_queue_index, u32 index)
   fq = fqm->vlib_frame_queues[index];
   ASSERT (fq);
 
-  new_tail = __sync_add_and_fetch (&fq->tail, 1);
+  new_tail = clib_atomic_add_fetch (&fq->tail, 1);
 
   /* Wait until a ring slot is available */
   while (new_tail >= fq->head_hint + fq->nelts)
index 0232ea2..7757146 100644 (file)
@@ -44,7 +44,7 @@ cj_log (u32 type, void *data0, void *data1)
   if (cjm->enable == 0)
     return;
 
-  new_tail = __sync_add_and_fetch (&cjm->tail, 1);
+  new_tail = clib_atomic_add_fetch (&cjm->tail, 1);
 
   r = (cj_record_t *) & (cjm->records[new_tail & (cjm->num_records - 1)]);
   r->time = vlib_time_now (cjm->vlib_main);
index 9d8694a..52cabbc 100644 (file)
@@ -444,7 +444,7 @@ vnet_classify_add_del (vnet_classify_table_t * t,
 
   hash >>= t->log2_nbuckets;
 
-  while (__sync_lock_test_and_set (t->writer_lock, 1))
+  while (clib_atomic_test_and_set (t->writer_lock))
     ;
 
   /* First elt in the bucket? */
index c77cdb6..dab8fa5 100644 (file)
@@ -122,7 +122,7 @@ vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
 static_always_inline int
 vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
 {
-  return __sync_lock_test_and_set (vui->vring_locks[qid], 1);
+  return clib_atomic_test_and_set (vui->vring_locks[qid]);
 }
 
 /**
@@ -141,7 +141,7 @@ vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
 static_always_inline void
 vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
 {
-  *vui->vring_locks[qid] = 0;
+  clib_atomic_release (vui->vring_locks[qid]);
 }
 
 static_always_inline void
index f0edd8c..59a61ed 100644 (file)
@@ -187,7 +187,7 @@ dns_cache_lock (dns_main_t * dm)
 {
   if (dm->cache_lock)
     {
-      while (__sync_lock_test_and_set (dm->cache_lock, 1))
+      while (clib_atomic_test_and_set (dm->cache_lock))
        ;
     }
 }
index e82befe..070c78e 100644 (file)
@@ -406,7 +406,7 @@ gre_interface_tx (vlib_main_t * vm,
              /* Encap GRE seq# and ERSPAN type II header */
              vlib_buffer_advance (b0, -sizeof (erspan_t2_t));
              erspan_t2_t *h0 = vlib_buffer_get_current (b0);
-             u32 seq_num = clib_smp_atomic_add (&gt0->gre_sn->seq_num, 1);
+             u32 seq_num = clib_atomic_fetch_add (&gt0->gre_sn->seq_num, 1);
              u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2);
              h0->seq_num = clib_host_to_net_u32 (seq_num);
              h0->t2_u64 = hdr;
@@ -418,7 +418,7 @@ gre_interface_tx (vlib_main_t * vm,
              /* Encap GRE seq# and ERSPAN type II header */
              vlib_buffer_advance (b1, -sizeof (erspan_t2_t));
              erspan_t2_t *h1 = vlib_buffer_get_current (b1);
-             u32 seq_num = clib_smp_atomic_add (&gt1->gre_sn->seq_num, 1);
+             u32 seq_num = clib_atomic_fetch_add (&gt1->gre_sn->seq_num, 1);
              u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2);
              h1->seq_num = clib_host_to_net_u32 (seq_num);
              h1->t2_u64 = hdr;
@@ -473,7 +473,7 @@ gre_interface_tx (vlib_main_t * vm,
              /* Encap GRE seq# and ERSPAN type II header */
              vlib_buffer_advance (b0, -sizeof (erspan_t2_t));
              erspan_t2_t *h0 = vlib_buffer_get_current (b0);
-             u32 seq_num = clib_smp_atomic_add (&gt0->gre_sn->seq_num, 1);
+             u32 seq_num = clib_atomic_fetch_add (&gt0->gre_sn->seq_num, 1);
              u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2);
              h0->seq_num = clib_host_to_net_u32 (seq_num);
              h0->t2_u64 = hdr;
index 6ca2b0d..7ce6aaf 100644 (file)
@@ -872,7 +872,7 @@ static inline void
 vnet_interface_counter_lock (vnet_interface_main_t * im)
 {
   if (im->sw_if_counter_lock)
-    while (__sync_lock_test_and_set (im->sw_if_counter_lock, 1))
+    while (clib_atomic_test_and_set (im->sw_if_counter_lock))
       /* zzzz */ ;
 }
 
@@ -880,7 +880,7 @@ static inline void
 vnet_interface_counter_unlock (vnet_interface_main_t * im)
 {
   if (im->sw_if_counter_lock)
-    *im->sw_if_counter_lock = 0;
+    clib_atomic_release (im->sw_if_counter_lock);
 }
 
 void vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add);
index fbb8a74..e6425ca 100755 (executable)
@@ -254,7 +254,7 @@ set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m,
       else if (new_leaf_dst_address_bits >=
               ply->dst_address_bits_of_leaves[i])
        {
-         __sync_val_compare_and_swap (&ply->leaves[i], old_leaf, new_leaf);
+         clib_atomic_cmp_and_swap (&ply->leaves[i], old_leaf, new_leaf);
          ASSERT (ply->leaves[i] == new_leaf);
          ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits;
          ply->n_non_empty_leafs += ip4_fib_mtrie_leaf_is_non_empty (ply, i);
@@ -319,8 +319,8 @@ set_leaf (ip4_fib_mtrie_t * m,
 
                  old_ply->dst_address_bits_of_leaves[i] =
                    a->dst_address_length;
-                 __sync_val_compare_and_swap (&old_ply->leaves[i], old_leaf,
-                                              new_leaf);
+                 clib_atomic_cmp_and_swap (&old_ply->leaves[i], old_leaf,
+                                           new_leaf);
                  ASSERT (old_ply->leaves[i] == new_leaf);
 
                  old_ply->n_non_empty_leafs +=
@@ -378,8 +378,8 @@ set_leaf (ip4_fib_mtrie_t * m,
          /* Refetch since ply_create may move pool. */
          old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
 
-         __sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf,
-                                      new_leaf);
+         clib_atomic_cmp_and_swap (&old_ply->leaves[dst_byte], old_leaf,
+                                   new_leaf);
          ASSERT (old_ply->leaves[dst_byte] == new_leaf);
          old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
 
@@ -451,8 +451,8 @@ set_root_leaf (ip4_fib_mtrie_t * m,
                   * the new one */
                  old_ply->dst_address_bits_of_leaves[slot] =
                    a->dst_address_length;
-                 __sync_val_compare_and_swap (&old_ply->leaves[slot],
-                                              old_leaf, new_leaf);
+                 clib_atomic_cmp_and_swap (&old_ply->leaves[slot],
+                                           old_leaf, new_leaf);
                  ASSERT (old_ply->leaves[slot] == new_leaf);
                }
              else
@@ -498,8 +498,8 @@ set_root_leaf (ip4_fib_mtrie_t * m,
                        ply_base_len);
          new_ply = get_next_ply_for_leaf (m, new_leaf);
 
-         __sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf,
-                                      new_leaf);
+         clib_atomic_cmp_and_swap (&old_ply->leaves[dst_byte], old_leaf,
+                                   new_leaf);
          ASSERT (old_ply->leaves[dst_byte] == new_leaf);
          old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
        }
index 8fb73fc..196cb72 100644 (file)
@@ -197,7 +197,7 @@ ipfix_classify_send_flows (flow_report_main_t * frm,
 
   t = pool_elt_at_index (vcm->tables, table->classify_table_index);
 
-  while (__sync_lock_test_and_set (t->writer_lock, 1))
+  while (clib_atomic_test_and_set (t->writer_lock))
     ;
 
   for (i = 0; i < t->nbuckets; i++)
@@ -385,7 +385,7 @@ flush:
       bi0 = ~0;
     }
 
-  *(t->writer_lock) = 0;
+  clib_atomic_release (t->writer_lock);
   return f;
 }
 
index 3d0e0d4..4b12132 100644 (file)
@@ -300,8 +300,8 @@ mfib_forward_itf_signal (vlib_main_t *vm,
 {
     mfib_itf_flags_t old_flags;
 
-    old_flags = __sync_fetch_and_or(&mfi->mfi_flags,
-                                    MFIB_ITF_FLAG_SIGNAL_PRESENT);
+    old_flags = clib_atomic_fetch_or(&mfi->mfi_flags,
+                                    MFIB_ITF_FLAG_SIGNAL_PRESENT);
 
     if (!(old_flags & MFIB_ITF_FLAG_SIGNAL_PRESENT))
     {
index ce9a664..176e8ec 100644 (file)
@@ -71,14 +71,14 @@ mfib_signal_module_init (void)
 static inline void
 mfib_signal_lock_aquire (void)
 {
-    while (__sync_lock_test_and_set (&mfib_signal_pending.mip_lock, 1))
+    while (clib_atomic_test_and_set (&mfib_signal_pending.mip_lock))
         ;
 }
 
 static inline void
 mfib_signal_lock_release (void)
 {
-    mfib_signal_pending.mip_lock = 0;
+  clib_atomic_release(&mfib_signal_pending.mip_lock);
 }
 
 #define MFIB_SIGNAL_CRITICAL_SECTION(_body) \
@@ -117,8 +117,8 @@ mfib_signal_send_one (struct vl_api_registration_ *reg,
         mfs = pool_elt_at_index(mfib_signal_pool, si);
         mfi = mfib_itf_get(mfs->mfs_itf);
         mfi->mfi_si = INDEX_INVALID;
-        __sync_fetch_and_and(&mfi->mfi_flags,
-                             ~MFIB_ITF_FLAG_SIGNAL_PRESENT);
+        clib_atomic_fetch_and(&mfi->mfi_flags,
+                             ~MFIB_ITF_FLAG_SIGNAL_PRESENT);
 
 
         vl_mfib_signal_send_one(reg, context, mfs);
index ab57dee..016e5b3 100644 (file)
@@ -54,7 +54,7 @@ pg_output (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
   pg_interface_t *pif = pool_elt_at_index (pg->interfaces, rd->dev_instance);
 
   if (PREDICT_FALSE (pif->lockp != 0))
-    while (__sync_lock_test_and_set (pif->lockp, 1))
+    while (clib_atomic_test_and_set (pif->lockp))
       ;
 
   while (n_left > 0)
@@ -82,7 +82,8 @@ pg_output (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
 
   vlib_buffer_free (vm, vlib_frame_args (frame), n_buffers);
   if (PREDICT_FALSE (pif->lockp != 0))
-    *pif->lockp = 0;
+    clib_atomic_release (pif->lockp);
+
   return n_buffers;
 }
 
index 1ece019..c3d838d 100644 (file)
@@ -263,8 +263,8 @@ echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
        {
          stream_session_t *s;
 
-         __sync_fetch_and_add (&ecm->tx_total, sp->bytes_sent);
-         __sync_fetch_and_add (&ecm->rx_total, sp->bytes_received);
+         clib_atomic_fetch_add (&ecm->tx_total, sp->bytes_sent);
+         clib_atomic_fetch_add (&ecm->rx_total, sp->bytes_received);
          s = session_get_from_handle_if_valid (sp->vpp_session_handle);
 
          if (s)
@@ -276,7 +276,7 @@ echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
 
              vec_delete (connections_this_batch, 1, i);
              i--;
-             __sync_fetch_and_add (&ecm->ready_connections, -1);
+             clib_atomic_fetch_add (&ecm->ready_connections, -1);
            }
          else
            {
@@ -408,7 +408,7 @@ echo_clients_session_connected_callback (u32 app_index, u32 api_context,
     }
 
   vec_add1 (ecm->connection_index_by_thread[thread_index], session_index);
-  __sync_fetch_and_add (&ecm->ready_connections, 1);
+  clib_atomic_fetch_add (&ecm->ready_connections, 1);
   if (ecm->ready_connections == ecm->expected_connections)
     {
       ecm->run_test = ECHO_CLIENTS_RUNNING;
index ea92148..873ab6d 100644 (file)
@@ -52,14 +52,14 @@ typedef struct {
 static_always_inline
 void vlib_refcount_lock (volatile u32 *counter_lock)
 {
-  while (__sync_lock_test_and_set (counter_lock, 1))
+  while (clib_atomic_test_and_set (counter_lock))
     ;
 }
 
 static_always_inline
 void vlib_refcount_unlock (volatile u32 *counter_lock)
 {
-  *counter_lock = 0;
+  clib_atomic_release(counter_lock);
 }
 
 void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size);
index 94d68c8..241bdd9 100644 (file)
@@ -221,7 +221,7 @@ dslock (stats_main_t * sm, int release_hint, int tag)
   if (release_hint)
     l->release_hint++;
 
-  while (__sync_lock_test_and_set (&l->lock, 1))
+  while (clib_atomic_test_and_set (&l->lock))
     /* zzzz */ ;
   l->tag = tag;
   l->thread_index = thread_index;
index b279f90..7103d60 100644 (file)
@@ -158,6 +158,7 @@ set(VPPINFRA_HEADERS
   tw_timer_template.c
   tw_timer_template.h
   types.h
+  atomics.h
   unix.h
   valgrind.h
   valloc.h
diff --git a/src/vppinfra/atomics.h b/src/vppinfra/atomics.h
new file mode 100644 (file)
index 0000000..8ddf138
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Copyright (c) 2018 Arm Limited. and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_clib_atomics_h
+#define included_clib_atomics_h
+
+/* Legacy __sync builtins */
+
+/* Full Barrier */
+#define clib_atomic_fetch_add(a, b) __sync_fetch_and_add(a, b)
+#define clib_atomic_fetch_sub(a, b) __sync_fetch_and_sub(a, b)
+#define clib_atomic_fetch_and(a, b) __sync_fetch_and_and(a, b)
+#define clib_atomic_fetch_xor(a, b) __sync_fetch_and_xor(a, b)
+#define clib_atomic_fetch_or(a, b) __sync_fetch_and_or(a, b)
+#define clib_atomic_fetch_nand(a, b) __sync_fetch_nand(a, b)
+
+#define clib_atomic_add_fetch(a, b) __sync_add_and_fetch(a, b)
+#define clib_atomic_sub_fetch(a, b) __sync_sub_and_fetch(a, b)
+#define clib_atomic_and_fetch(a, b) __sync_and_and_fetch(a, b)
+#define clib_atomic_xor_fetch(a, b) __sync_xor_and_fetch(a, b)
+#define clib_atomic_or_fetch(a, b) __sync_or_and_fetch(a, b)
+#define clib_atomic_nand_fetch(a, b) __sync_nand_and_fetch(a, b)
+
+#define clib_atomic_cmp_and_swap(addr,old,new) __sync_val_compare_and_swap(addr, old, new)
+#define clib_atomic_bool_cmp_and_swap(addr,old,new) __sync_bool_compare_and_swap(addr, old, new)
+
+/*Accquire Barrier*/
+#define clib_atomic_test_and_set(a) __sync_lock_test_and_set(a, 1)
+/*Release Barrier*/
+#define clib_atomic_release(a) __sync_lock_release(a)
+
+#endif /* included_clib_atomics_h */
index a6f8824..95dadd9 100644 (file)
@@ -46,6 +46,7 @@
 #endif
 
 #include <vppinfra/types.h>
+#include <vppinfra/atomics.h>
 
 /* Global DEBUG flag.  Setting this to 1 or 0 turns off
    ASSERT (see vppinfra/error.h) & other debugging code. */
index a86fade..c6902eb 100644 (file)
@@ -46,7 +46,7 @@ static inline void
 elog_lock (elog_main_t * em)
 {
   if (PREDICT_FALSE (em->lock != 0))
-    while (__sync_lock_test_and_set (em->lock, 1))
+    while (clib_atomic_test_and_set (em->lock))
       ;
 }
 
index d50c9a6..322c2c6 100644 (file)
@@ -313,7 +313,7 @@ elog_event_data_inline (elog_main_t * em,
   ASSERT (is_pow2 (vec_len (em->event_ring)));
 
   if (em->lock)
-    ei = clib_smp_atomic_add (&em->n_total_events, 1);
+    ei = clib_atomic_fetch_add (&em->n_total_events, 1);
   else
     ei = em->n_total_events++;
 
index dd79c40..4645378 100644 (file)
@@ -73,7 +73,7 @@ clib_spinlock_free (clib_spinlock_t * p)
 static_always_inline void
 clib_spinlock_lock (clib_spinlock_t * p)
 {
-  while (__sync_lock_test_and_set (&(*p)->lock, 1))
+  while (clib_atomic_test_and_set (&(*p)->lock))
     CLIB_PAUSE ();
   CLIB_LOCK_DBG (p);
 }
@@ -138,13 +138,13 @@ clib_rwlock_free (clib_rwlock_t * p)
 always_inline void
 clib_rwlock_reader_lock (clib_rwlock_t * p)
 {
-  while (__sync_lock_test_and_set (&(*p)->n_readers_lock, 1))
+  while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
     CLIB_PAUSE ();
 
   (*p)->n_readers += 1;
   if ((*p)->n_readers == 1)
     {
-      while (__sync_lock_test_and_set (&(*p)->writer_lock, 1))
+      while (clib_atomic_test_and_set (&(*p)->writer_lock))
        CLIB_PAUSE ();
     }
   CLIB_MEMORY_BARRIER ();
@@ -159,7 +159,7 @@ clib_rwlock_reader_unlock (clib_rwlock_t * p)
   ASSERT ((*p)->n_readers > 0);
   CLIB_LOCK_DBG_CLEAR (p);
 
-  while (__sync_lock_test_and_set (&(*p)->n_readers_lock, 1))
+  while (clib_atomic_test_and_set (&(*p)->n_readers_lock))
     CLIB_PAUSE ();
 
   (*p)->n_readers -= 1;
@@ -176,7 +176,7 @@ clib_rwlock_reader_unlock (clib_rwlock_t * p)
 always_inline void
 clib_rwlock_writer_lock (clib_rwlock_t * p)
 {
-  while (__sync_lock_test_and_set (&(*p)->writer_lock, 1))
+  while (clib_atomic_test_and_set (&(*p)->writer_lock))
     CLIB_PAUSE ();
   CLIB_LOCK_DBG (p);
 }
index f7d2f75..ea6a835 100644 (file)
@@ -137,7 +137,7 @@ clib_maplog_get_entry (clib_maplog_main_t * mm)
 
   ASSERT (mm->flags & CLIB_MAPLOG_FLAG_INIT);
 
-  my_record_index = __sync_fetch_and_add (&mm->next_record_index, 1);
+  my_record_index = clib_atomic_fetch_add (&mm->next_record_index, 1);
 
   /* Time to unmap and create a new logfile? */
   if (PREDICT_FALSE ((my_record_index & (mm->file_size_in_records - 1)) == 0))
index 5b71873..0a62943 100644 (file)
@@ -63,7 +63,7 @@ mheap_maybe_lock (void *v)
          return;
        }
 
-      while (__sync_lock_test_and_set (&h->lock, 1))
+      while (clib_atomic_test_and_set (&h->lock))
        ;
 
       h->owner_cpu = my_cpu;
index e4ab66a..7146e51 100644 (file)
@@ -41,9 +41,7 @@
 #include <vppinfra/cache.h>
 #include <vppinfra/os.h>       /* for os_panic */
 
-#define clib_smp_compare_and_swap(addr,new,old) __sync_val_compare_and_swap(addr,old,new)
 #define clib_smp_swap(addr,new) __sync_lock_test_and_set(addr,new)
-#define clib_smp_atomic_add(addr,increment) __sync_fetch_and_add(addr,increment)
 
 #if defined (i386) || defined (__x86_64__)
 #define clib_smp_pause() do { asm volatile ("pause"); } while (0)