return s;
}
-static_always_inline void
-memif_interface_lock (memif_if_t * mif)
-{
- if (PREDICT_FALSE (mif->lockp != 0))
- {
- while (__sync_lock_test_and_set (mif->lockp, 1))
- ;
- }
-}
-
-static_always_inline void
-memif_interface_unlock (memif_if_t * mif)
-{
- if (PREDICT_FALSE (mif->lockp != 0))
- *mif->lockp = 0;
-}
-
static_always_inline void
memif_prefetch_buffer_and_data (vlib_main_t * vm, u32 bi)
{
u16 head, tail;
u16 free_slots;
- memif_interface_lock (mif);
+ clib_spinlock_lock_if_init (&mif->lockp);
/* free consumed buffers */
CLIB_MEMORY_STORE_BARRIER ();
ring->head = head;
- memif_interface_unlock (mif);
+ clib_spinlock_unlock (&mif->lockp);
if (n_left)
{
}
}
- if (mif->lockp != 0)
- {
- clib_mem_free ((void *) mif->lockp);
- mif->lockp = 0;
- }
+ clib_spinlock_free (&mif->lockp);
mhash_unset (&mm->if_index_by_key, &mif->key, &mif->if_index);
vec_free (mif->socket_filename);
mif->connection.fd = mif->interrupt_line.fd = -1;
if (tm->n_vlib_mains > 1)
- {
- mif->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- memset ((void *) mif->lockp, 0, CLIB_CACHE_LINE_BYTES);
- }
+ clib_spinlock_init (&mif->lockp);
if (!args->hw_addr_set)
{
*------------------------------------------------------------------
*/
+#include <vppinfra/lock.h>
+
typedef struct
{
u16 version;
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
- volatile u32 *lockp;
+ clib_spinlock_t lockp;
u32 flags;
#define MEMIF_IF_FLAG_ADMIN_UP (1 << 0)
#define MEMIF_IF_FLAG_IS_SLAVE (1 << 1)
apif->next_rx_frame = 0;
if (tm->n_vlib_mains > 1)
- {
- apif->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- memset ((void *) apif->lockp, 0, CLIB_CACHE_LINE_BYTES);
- }
+ clib_spinlock_init (&apif->lockp);
{
unix_file_t template = { 0 };
*------------------------------------------------------------------
*/
+#include <vppinfra/lock.h>
+
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
- volatile u32 *lockp;
+ clib_spinlock_t lockp;
u8 *host_if_name;
int fd;
struct tpacket_req *rx_req;
struct tpacket2_hdr *tph;
u32 frame_not_ready = 0;
- if (PREDICT_FALSE (apif->lockp != 0))
- {
- while (__sync_lock_test_and_set (apif->lockp, 1))
- ;
- }
+ clib_spinlock_lock_if_init (&apif->lockp);
while (n_left > 0)
{
}
}
- if (PREDICT_FALSE (apif->lockp != 0))
- *apif->lockp = 0;
+ clib_spinlock_unlock_if_init (&apif->lockp);
if (PREDICT_FALSE (frame_not_ready))
vlib_error_count (vm, node->node_index,
netmap_if_t *nif = pool_elt_at_index (nm->interfaces, rd->dev_instance);
int cur_ring;
- if (PREDICT_FALSE (nif->lockp != 0))
- {
- while (__sync_lock_test_and_set (nif->lockp, 1))
- ;
- }
+ clib_spinlock_lock_if_init (&nif->lockp);
cur_ring = nif->first_tx_ring;
if (n_left < frame->n_vectors)
ioctl (nif->fd, NIOCTXSYNC, NULL);
- if (PREDICT_FALSE (nif->lockp != 0))
- *nif->lockp = 0;
+ clib_spinlock_unlock_if_init (&nif->lockp);
if (n_left)
vlib_error_count (vm, node->node_index,
nif->per_interface_next_index = ~0;
if (tm->n_vlib_mains > 1)
- {
- nif->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- memset ((void *) nif->lockp, 0, CLIB_CACHE_LINE_BYTES);
- }
+ clib_spinlock_init (&nif->lockp);
{
unix_file_t template = { 0 };
* SUCH DAMAGE.
*/
+#include <vppinfra/lock.h>
+
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
- volatile u32 *lockp;
+ clib_spinlock_t lockp;
u8 *host_if_name;
uword if_index;
u32 hw_if_index;
vppinfra/graph.h \
vppinfra/hash.h \
vppinfra/heap.h \
+ vppinfra/lock.h \
vppinfra/longjmp.h \
vppinfra/macros.h \
vppinfra/math.h \
--- /dev/null
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_clib_lock_h
+#define included_clib_lock_h
+
+#include <vppinfra/clib.h>
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 lock;
+#if CLIB_DEBUG > 0
+ pid_t pid;
+ uword cpu_index;
+ void *frame_address;
+#endif
+} *clib_spinlock_t;
+
+static inline void
+clib_spinlock_init (clib_spinlock_t * p)
+{
+ *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
+ memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES);
+}
+
+static inline void
+clib_spinlock_free (clib_spinlock_t * p)
+{
+ if (*p)
+ {
+ clib_mem_free ((void *) *p);
+ *p = 0;
+ }
+}
+
+static_always_inline void
+clib_spinlock_lock (clib_spinlock_t * p)
+{
+ while (__sync_lock_test_and_set (&(*p)->lock, 1))
+#if __x86_64__
+ __builtin_ia32_pause ()
+#endif
+ ;
+#if CLIB_DEBUG > 0
+ (*p)->frame_address = __builtin_frame_address (0);
+ (*p)->pid = getpid ();
+ (*p)->cpu_index = os_get_cpu_number ();
+#endif
+}
+
+static_always_inline void
+clib_spinlock_lock_if_init (clib_spinlock_t * p)
+{
+ if (PREDICT_FALSE (*p != 0))
+ clib_spinlock_lock (p);
+}
+
+static_always_inline void
+clib_spinlock_unlock (clib_spinlock_t * p)
+{
+ (*p)->lock = 0;
+#if CLIB_DEBUG > 0
+ (*p)->frame_address = 0;
+ (*p)->pid = 0;
+ (*p)->cpu_index = 0;
+#endif
+}
+
+static_always_inline void
+clib_spinlock_unlock_if_init (clib_spinlock_t * p)
+{
+ if (PREDICT_FALSE (*p != 0))
+ clib_spinlock_unlock (p);
+}
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */