* imapacts observed timings.
*/
-static u32
-elog_id_for_msg_name (const char *msg_name)
+u32
+elog_global_id_for_msg_name (const char *msg_name)
{
uword *p, r;
static uword *h;
ed = ELOG_DATA (&vlib_global_main.elog_main, e);
ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
- ed->caller = elog_id_for_msg_name (vlib_worker_threads[0].barrier_caller);
+ ed->caller = elog_global_id_for_msg_name
+ (vlib_worker_threads[0].barrier_caller);
ed->t_entry = (int) (1000000.0 * t_entry);
ed->t_open = (int) (1000000.0 * t_open);
ed->t_closed = (int) (1000000.0 * t_closed);
ed = ELOG_DATA (&vlib_global_main.elog_main, e);
ed->depth = (int) vlib_worker_threads[0].recursion_level - 1;
- ed->caller = elog_id_for_msg_name (vlib_worker_threads[0].barrier_caller);
+ ed->caller = elog_global_id_for_msg_name
+ (vlib_worker_threads[0].barrier_caller);
}
static inline void
vlib_frame_queue_t *fq;
fq = clib_mem_alloc_aligned (sizeof (*fq), CLIB_CACHE_LINE_BYTES);
- memset (fq, 0, sizeof (*fq));
+ clib_memset (fq, 0, sizeof (*fq));
fq->nelts = nelts;
fq->vector_threshold = 128; // packets
vec_validate_aligned (fq->elts, nelts - 1, CLIB_CACHE_LINE_BYTES);
ASSERT (fq);
- new_tail = __sync_add_and_fetch (&fq->tail, 1);
+ new_tail = clib_atomic_add_fetch (&fq->tail, 1);
/* Wait until a ring slot is available */
while (new_tail >= fq->head + fq->nelts)
{
/* Initial barrier sync, for both worker and i/o threads */
- clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
+ clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
while (*vlib_worker_threads->wait_at_barrier)
;
- clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
+ clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
}
}
vlib_worker_threads->node_reforks_required =
clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
+ /* We'll need the rpc vector lock... */
+ clib_spinlock_init (&vm->pending_rpc_lock);
+
/* Ask for an initial barrier sync */
*vlib_worker_threads->workers_at_barrier = 0;
*vlib_worker_threads->wait_at_barrier = 1;
vm_clone->pending_rpc_requests = 0;
vec_validate (vm_clone->pending_rpc_requests, 0);
_vec_len (vm_clone->pending_rpc_requests) = 0;
- memset (&vm_clone->random_buffer, 0,
- sizeof (vm_clone->random_buffer));
+ clib_memset (&vm_clone->random_buffer, 0,
+ sizeof (vm_clone->random_buffer));
nm = &vlib_mains[0]->node_main;
nm_clone = &vm_clone->node_main;
clib_memcpy (n, nm->nodes[j], sizeof (*n));
/* none of the copied nodes have enqueue rights given out */
n->owner_node_index = VLIB_INVALID_NODE_INDEX;
- memset (&n->stats_total, 0, sizeof (n->stats_total));
- memset (&n->stats_last_clear, 0,
- sizeof (n->stats_last_clear));
+ clib_memset (&n->stats_total, 0, sizeof (n->stats_total));
+ clib_memset (&n->stats_last_clear, 0,
+ sizeof (n->stats_last_clear));
vec_add1 (nm_clone->nodes, n);
n++;
}
u64 *old_counters = vm_clone->error_main.counters;
u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear;
- clib_memcpy (&vm_clone->error_main, &vm->error_main,
- sizeof (vm->error_main));
+ clib_memcpy_fast (&vm_clone->error_main, &vm->error_main,
+ sizeof (vm->error_main));
j = vec_len (vm->error_main.counters) - 1;
vec_validate_aligned (old_counters, j, CLIB_CACHE_LINE_BYTES);
vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES);
new_n = nm->nodes[j];
old_n_clone = old_nodes_clone[j];
- clib_memcpy (new_n_clone, new_n, sizeof (*new_n));
+ clib_memcpy_fast (new_n_clone, new_n, sizeof (*new_n));
/* none of the copied nodes have enqueue rights given out */
new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX;
if (j >= vec_len (old_nodes_clone))
{
/* new node, set to zero */
- memset (&new_n_clone->stats_total, 0,
- sizeof (new_n_clone->stats_total));
- memset (&new_n_clone->stats_last_clear, 0,
- sizeof (new_n_clone->stats_last_clear));
+ clib_memset (&new_n_clone->stats_total, 0,
+ sizeof (new_n_clone->stats_total));
+ clib_memset (&new_n_clone->stats_last_clear, 0,
+ sizeof (new_n_clone->stats_last_clear));
}
else
{
/* Copy stats if the old data is valid */
- clib_memcpy (&new_n_clone->stats_total,
- &old_n_clone->stats_total,
- sizeof (new_n_clone->stats_total));
- clib_memcpy (&new_n_clone->stats_last_clear,
- &old_n_clone->stats_last_clear,
- sizeof (new_n_clone->stats_last_clear));
+ clib_memcpy_fast (&new_n_clone->stats_total,
+ &old_n_clone->stats_total,
+ sizeof (new_n_clone->stats_total));
+ clib_memcpy_fast (&new_n_clone->stats_last_clear,
+ &old_n_clone->stats_last_clear,
+ sizeof (new_n_clone->stats_last_clear));
/* keep previous node state */
new_n_clone->state = old_n_clone->state;
rt->thread_index = vm_clone->thread_index;
/* copy runtime_data, will be overwritten later for existing rt */
if (n->runtime_data && n->runtime_data_bytes > 0)
- clib_memcpy (rt->runtime_data, n->runtime_data,
- clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
- n->runtime_data_bytes));
+ clib_memcpy_fast (rt->runtime_data, n->runtime_data,
+ clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
+ n->runtime_data_bytes));
}
for (j = 0; j < vec_len (old_rt); j++)
{
rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
rt->state = old_rt[j].state;
- clib_memcpy (rt->runtime_data, old_rt[j].runtime_data,
- VLIB_NODE_RUNTIME_DATA_SIZE);
+ clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
+ VLIB_NODE_RUNTIME_DATA_SIZE);
}
vec_free (old_rt);
rt->thread_index = vm_clone->thread_index;
/* copy runtime_data, will be overwritten later for existing rt */
if (n->runtime_data && n->runtime_data_bytes > 0)
- clib_memcpy (rt->runtime_data, n->runtime_data,
- clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
- n->runtime_data_bytes));
+ clib_memcpy_fast (rt->runtime_data, n->runtime_data,
+ clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
+ n->runtime_data_bytes));
}
for (j = 0; j < vec_len (old_rt); j++)
{
rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
rt->state = old_rt[j].state;
- clib_memcpy (rt->runtime_data, old_rt[j].runtime_data,
- VLIB_NODE_RUNTIME_DATA_SIZE);
+ clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
+ VLIB_NODE_RUNTIME_DATA_SIZE);
}
vec_free (old_rt);
VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
-#if !defined (__x86_64__) && !defined (__i386__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__)
-void
-__sync_fetch_and_add_8 (void)
-{
- fformat (stderr, "%s called\n", __FUNCTION__);
- abort ();
-}
-
-void
-__sync_add_and_fetch_8 (void)
-{
- fformat (stderr, "%s called\n", __FUNCTION__);
- abort ();
-}
-#endif
-
void vnet_main_fixup (vlib_fork_fixup_t which) __attribute__ ((weak));
void
vnet_main_fixup (vlib_fork_fixup_t which)
/* Do per thread rebuilds in parallel */
refork_needed = 1;
- clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
- (vec_len (vlib_mains) - 1));
+ clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
+ (vec_len (vlib_mains) - 1));
now = vlib_time_now (vm);
t_update_main = now - vm->barrier_epoch;
}