u8 * src_mac, u8 * dst_mac,
u16 ethertype, u64 timestamp, u16 length, int do_flush)
{
- u32 my_cpu_number = vm->thread_index;
+ u32 my_thread_index = vm->thread_index;
flow_report_main_t *frm = &flow_report_main;
ip4_header_t *ip;
udp_header_t *udp;
vlib_buffer_free_list_t *fl;
/* Find or allocate a buffer */
- b0 = fm->l2_buffers_per_worker[my_cpu_number];
+ b0 = fm->l2_buffers_per_worker[my_thread_index];
/* Need to allocate a buffer? */
if (PREDICT_FALSE (b0 == 0))
return;
/* Initialize the buffer */
- b0 = fm->l2_buffers_per_worker[my_cpu_number] =
+ b0 = fm->l2_buffers_per_worker[my_thread_index] =
vlib_get_buffer (vm, bi0);
fl =
vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
{
/* use the current buffer */
bi0 = vlib_get_buffer_index (vm, b0);
- offset = fm->l2_next_record_offset_per_worker[my_cpu_number];
+ offset = fm->l2_next_record_offset_per_worker[my_thread_index];
}
/* Find or allocate a frame */
- f = fm->l2_frames_per_worker[my_cpu_number];
+ f = fm->l2_frames_per_worker[my_thread_index];
if (PREDICT_FALSE (f == 0))
{
u32 *to_next;
f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
- fm->l2_frames_per_worker[my_cpu_number] = f;
+ fm->l2_frames_per_worker[my_thread_index] = f;
/* Enqueue the buffer */
to_next = vlib_frame_vector_args (f);
}
vlib_put_frame_to_node (vm, ip4_lookup_node.index,
- fm->l2_frames_per_worker[my_cpu_number]);
- fm->l2_frames_per_worker[my_cpu_number] = 0;
- fm->l2_buffers_per_worker[my_cpu_number] = 0;
+ fm->l2_frames_per_worker[my_thread_index]);
+ fm->l2_frames_per_worker[my_thread_index] = 0;
+ fm->l2_buffers_per_worker[my_thread_index] = 0;
offset = 0;
}
- fm->l2_next_record_offset_per_worker[my_cpu_number] = offset;
+ fm->l2_next_record_offset_per_worker[my_thread_index] = offset;
}
void
u32 src_address, u32 dst_address,
u8 tos, u64 timestamp, u16 length, int do_flush)
{
- u32 my_cpu_number = vm->thread_index;
+ u32 my_thread_index = vm->thread_index;
flow_report_main_t *frm = &flow_report_main;
ip4_header_t *ip;
udp_header_t *udp;
vlib_buffer_free_list_t *fl;
/* Find or allocate a buffer */
- b0 = fm->ipv4_buffers_per_worker[my_cpu_number];
+ b0 = fm->ipv4_buffers_per_worker[my_thread_index];
/* Need to allocate a buffer? */
if (PREDICT_FALSE (b0 == 0))
return;
/* Initialize the buffer */
- b0 = fm->ipv4_buffers_per_worker[my_cpu_number] =
+ b0 = fm->ipv4_buffers_per_worker[my_thread_index] =
vlib_get_buffer (vm, bi0);
fl =
vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
{
/* use the current buffer */
bi0 = vlib_get_buffer_index (vm, b0);
- offset = fm->ipv4_next_record_offset_per_worker[my_cpu_number];
+ offset = fm->ipv4_next_record_offset_per_worker[my_thread_index];
}
/* Find or allocate a frame */
- f = fm->ipv4_frames_per_worker[my_cpu_number];
+ f = fm->ipv4_frames_per_worker[my_thread_index];
if (PREDICT_FALSE (f == 0))
{
u32 *to_next;
f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
- fm->ipv4_frames_per_worker[my_cpu_number] = f;
+ fm->ipv4_frames_per_worker[my_thread_index] = f;
/* Enqueue the buffer */
to_next = vlib_frame_vector_args (f);
}
vlib_put_frame_to_node (vm, ip4_lookup_node.index,
- fm->ipv4_frames_per_worker[my_cpu_number]);
- fm->ipv4_frames_per_worker[my_cpu_number] = 0;
- fm->ipv4_buffers_per_worker[my_cpu_number] = 0;
+ fm->ipv4_frames_per_worker[my_thread_index]);
+ fm->ipv4_frames_per_worker[my_thread_index] = 0;
+ fm->ipv4_buffers_per_worker[my_thread_index] = 0;
offset = 0;
}
- fm->ipv4_next_record_offset_per_worker[my_cpu_number] = offset;
+ fm->ipv4_next_record_offset_per_worker[my_thread_index] = offset;
}
void
u32 pkts_processed = 0;
snat_main_t * sm = &snat_main;
u32 now = (u32) vlib_time_now (vm);
- u32 thread_index = os_get_cpu_number ();
+ u32 thread_index = vlib_get_thread_index ();
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
snat_out2in_next_t next_index;
u32 pkts_processed = 0;
snat_main_t * sm = &snat_main;
- u32 thread_index = os_get_cpu_number ();
+ u32 thread_index = vlib_get_thread_index ();
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
/* Main routine. */
int vlib_main (vlib_main_t * vm, unformat_input_t * input);
-/* Thread stacks, for os_get_cpu_number */
+/* Thread stacks, for os_get_thread_index */
extern u8 **vlib_thread_stacks;
/* Number of thread stacks that the application needs */
vlib_worker_thread_t *vlib_worker_threads;
vlib_thread_main_t vlib_thread_main;
-__thread uword vlib_thread_index = 0;
-
-uword
-os_get_cpu_number (void)
-{
- return vlib_thread_index;
-}
-
uword
-os_get_ncpus (void)
+os_get_nthreads (void)
{
u32 len;
w->lwp = syscall (SYS_gettid);
w->thread_id = pthread_self ();
- vlib_thread_index = w - vlib_worker_threads;
+ __os_thread_index = w - vlib_worker_threads;
rv = (void *) clib_calljmp
((uword (*)(uword)) w->thread_function,
void vlib_worker_thread_barrier_sync (vlib_main_t * vm);
void vlib_worker_thread_barrier_release (vlib_main_t * vm);
-extern __thread uword vlib_thread_index;
static_always_inline uword
vlib_get_thread_index (void)
{
- return vlib_thread_index;
+ return __os_thread_index;
}
always_inline void
vlib_thread_stack_init (0);
- vlib_thread_index = 0;
+ __os_thread_index = 0;
i = clib_calljmp (thread0, (uword) vm,
(void *) (vlib_thread_stacks[0] +
unix_shared_memory_queue_t *q;
/* Main thread: call the function directly */
- if (os_get_cpu_number () == 0)
+ if (vlib_get_thread_index () == 0)
{
vlib_main_t *vm = vlib_get_main ();
void (*call_fp) (void *);
vlib_frame_t * from_frame)
{
u32 n_left_from, next_index, * from, * to_next;
- u32 cpu_index = os_get_cpu_number();
+ u32 thread_index = vlib_get_thread_index ();
vnet_interface_main_t *im;
im = &vnet_get_main ()->interface_main;
vlib_increment_combined_counter (im->combined_sw_if_counters
+ VNET_INTERFACE_COUNTER_RX,
- cpu_index,
+ thread_index,
ido0->ido_sw_if_index,
1,
vlib_buffer_length_in_chain (vm, b0));
vlib_increment_combined_counter (im->combined_sw_if_counters
+ VNET_INTERFACE_COUNTER_RX,
- cpu_index,
+ thread_index,
ido1->ido_sw_if_index,
1,
vlib_buffer_length_in_chain (vm, b1));
/* Bump the interface's RX coutners */
vlib_increment_combined_counter (im->combined_sw_if_counters
+ VNET_INTERFACE_COUNTER_RX,
- cpu_index,
+ thread_index,
ido0->ido_sw_if_index,
1,
vlib_buffer_length_in_chain (vm, b0));
/* compute payload length starting after GPE */
u32 bytes = b->current_length - (lisp_data - b->data - b->current_data);
- vlib_increment_combined_counter (&lgm->counters, os_get_cpu_number (),
+ vlib_increment_combined_counter (&lgm->counters, vlib_get_thread_index (),
p[0], 1, bytes);
}
clib_bihash_bucket_t working_bucket __attribute__ ((aligned (8)));
void *oldheap;
BVT (clib_bihash_value) * working_copy;
- u32 cpu_number = os_get_cpu_number ();
+ u32 thread_index = os_get_thread_index ();
- if (cpu_number >= vec_len (h->working_copies))
+ if (thread_index >= vec_len (h->working_copies))
{
oldheap = clib_mem_set_heap (h->mheap);
- vec_validate (h->working_copies, cpu_number);
+ vec_validate (h->working_copies, thread_index);
clib_mem_set_heap (oldheap);
}
* updates from multiple threads will not result in sporadic, spurious
* lookup failures.
*/
- working_copy = h->working_copies[cpu_number];
+ working_copy = h->working_copies[thread_index];
h->saved_bucket.as_u64 = b->as_u64;
oldheap = clib_mem_set_heap (h->mheap);
{
vec_validate_aligned (working_copy, (1 << b->log2_pages) - 1,
sizeof (u64));
- h->working_copies[cpu_number] = working_copy;
+ h->working_copies[thread_index] = working_copy;
}
_vec_len (working_copy) = 1 << b->log2_pages;
working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy);
CLIB_MEMORY_BARRIER ();
b->as_u64 = working_bucket.as_u64;
- h->working_copies[cpu_number] = working_copy;
+ h->working_copies[thread_index] = working_copy;
}
static
int i, limit;
u64 hash, new_hash;
u32 new_log2_pages;
- u32 cpu_number = os_get_cpu_number ();
+ u32 thread_index = os_get_thread_index ();
int mark_bucket_linear;
int resplit_once;
new_log2_pages = h->saved_bucket.log2_pages + 1;
mark_bucket_linear = 0;
- working_copy = h->working_copies[cpu_number];
+ working_copy = h->working_copies[thread_index];
resplit_once = 0;
new_v = BV (split_and_rehash) (h, working_copy, new_log2_pages);
u32 lock;
#if CLIB_DEBUG > 0
pid_t pid;
- uword cpu_index;
+ uword thread_index;
void *frame_address;
#endif
} *clib_spinlock_t;
#if CLIB_DEBUG > 0
(*p)->frame_address = __builtin_frame_address (0);
(*p)->pid = getpid ();
- (*p)->cpu_index = os_get_cpu_number ();
+ (*p)->thread_index = os_get_thread_index ();
#endif
}
#if CLIB_DEBUG > 0
(*p)->frame_address = 0;
(*p)->pid = 0;
- (*p)->cpu_index = 0;
+ (*p)->thread_index = 0;
#endif
}
always_inline void *
clib_mem_get_per_cpu_heap (void)
{
- int cpu = os_get_cpu_number ();
+ int cpu = os_get_thread_index ();
return clib_per_cpu_mheaps[cpu];
}
always_inline void *
clib_mem_set_per_cpu_heap (u8 * new_heap)
{
- int cpu = os_get_cpu_number ();
+ int cpu = os_get_thread_index ();
void *old = clib_per_cpu_mheaps[cpu];
clib_per_cpu_mheaps[cpu] = new_heap;
return old;
align_offset = align;
}
- cpu = os_get_cpu_number ();
+ cpu = os_get_thread_index ();
heap = clib_per_cpu_mheaps[cpu];
heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
clib_per_cpu_mheaps[cpu] = heap;
mhash_set_tmp_key (mhash_t * h, const void *key)
{
u8 *key_tmp;
- int my_cpu = os_get_cpu_number ();
+ int my_cpu = os_get_thread_index ();
vec_validate (h->key_tmps, my_cpu);
key_tmp = h->key_tmps[my_cpu];
{
u8 *key_tmp;
- int my_cpu = os_get_cpu_number ();
+ int my_cpu = os_get_thread_index ();
vec_validate (h->key_tmps, my_cpu);
key_tmp = h->key_tmps[my_cpu];
return key_tmp;
mheap_t *h = mheap_header (v);
if (v && (h->flags & MHEAP_FLAG_THREAD_SAFE))
{
- u32 my_cpu = os_get_cpu_number ();
+ u32 my_cpu = os_get_thread_index ();
if (h->owner_cpu == my_cpu)
{
h->recursion_count++;
mheap_t *h = mheap_header (v);
if (v && h->flags & MHEAP_FLAG_THREAD_SAFE)
{
- ASSERT (os_get_cpu_number () == h->owner_cpu);
+ ASSERT (os_get_thread_index () == h->owner_cpu);
if (--h->recursion_count == 0)
{
h->owner_cpu = ~0;
/* Estimate, measure or divine CPU timestamp clock frequency. */
f64 os_cpu_clock_frequency (void);
-uword os_get_cpu_number (void);
-uword os_get_ncpus (void);
+extern __thread uword __os_thread_index;
+
+static_always_inline uword
+os_get_thread_index (void)
+{
+ return __os_thread_index;
+}
+
+static_always_inline uword
+os_get_cpu_number (void) __attribute__ ((deprecated));
+
+static_always_inline uword
+os_get_cpu_number (void)
+{
+ return __os_thread_index;
+}
+
+uword os_get_nthreads (void);
#include <vppinfra/smp.h>
void *heap;
uword vm_size, stack_size, mheap_flags;
- ASSERT (os_get_cpu_number () == cpu);
+ ASSERT (os_get_thread_index () == cpu);
vm_size = (uword) 1 << m->log2_n_per_cpu_vm_bytes;
stack_size = (uword) 1 << m->log2_n_per_cpu_stack_bytes;
#include <fcntl.h>
#include <stdio.h> /* for sprintf */
+__thread uword __os_thread_index = 0;
+
clib_error_t *
unix_file_n_bytes (char *file, uword * result)
{
void
os_puts (u8 * string, uword string_length, uword is_error)
{
- int cpu = os_get_cpu_number ();
- int ncpus = os_get_ncpus ();
+ int cpu = os_get_thread_index ();
+ int nthreads = os_get_nthreads ();
char buf[64];
int fd = is_error ? 2 : 1;
struct iovec iovs[2];
int n_iovs = 0;
- if (ncpus > 1)
+ if (nthreads > 1)
{
snprintf (buf, sizeof (buf), "%d: ", cpu);
os_panic ();
}
-uword os_get_cpu_number (void) __attribute__ ((weak));
-uword
-os_get_cpu_number (void)
-{
- return 0;
-}
-
-uword os_get_ncpus (void) __attribute__ ((weak));
+uword os_get_nthreads (void) __attribute__ ((weak));
uword
-os_get_ncpus (void)
+os_get_nthreads (void)
{
return 1;
}