* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#define _GNU_SOURCE
+#include <sched.h>
+
#include <signal.h>
#include <math.h>
#include <vppinfra/format.h>
#include <vlib/vlib.h>
#include <vlib/threads.h>
-#include <vlib/unix/physmem.h>
-
#include <vlib/unix/cj.h>
+
#if DPDK==1
#include <rte_config.h>
#include <rte_common.h>
if (!tm->cpu_socket_bitmap)
tm->cpu_socket_bitmap = clib_bitmap_set(0, 0, 1);
+ /* pin main thread to main_lcore */
+#if DPDK==0
+ {
+ cpu_set_t cpuset;
+ CPU_ZERO(&cpuset);
+ CPU_SET(tm->main_lcore, &cpuset);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
+ }
+#endif
+
/* as many threads as stacks... */
vec_validate_aligned (vlib_worker_threads, vec_len(vlib_thread_stacks)-1,
CLIB_CACHE_LINE_BYTES);
static int
vlib_launch_thread (void *fp, vlib_worker_thread_t *w, unsigned lcore_id)
{
- pthread_t dummy;
void *(*fp_arg)(void *) = fp;
#if DPDK==1
return -1;
else
#endif
- return pthread_create (&dummy, NULL /* attr */, fp_arg, (void *)w);
+ {
+ int ret;
+ pthread_t worker;
+ cpu_set_t cpuset;
+ CPU_ZERO(&cpuset);
+ CPU_SET(lcore_id, &cpuset);
+
+ ret = pthread_create (&worker, NULL /* attr */, fp_arg, (void *)w);
+ if(ret == 0)
+ return pthread_setaffinity_np(worker, sizeof(cpu_set_t), &cpuset);
+ else
+ return ret;
+ }
}
static clib_error_t * start_workers (vlib_main_t * vm)
u32 save_flags;
save_node_runtime_index = nf->node_runtime_index;
- save_flags = nf->flags;
+ save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
vlib_next_frame_init (nf);
nf->node_runtime_index = save_node_runtime_index;
nf->flags = save_flags;
u32 save_flags;
save_node_runtime_index = nf->node_runtime_index;
- save_flags = nf->flags;
+ save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
vlib_next_frame_init (nf);
nf->node_runtime_index = save_node_runtime_index;
nf->flags = save_flags;
VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
-#if !defined (__x86_64__) && !defined (__aarch64__) && !defined (__powerpc64__)
+#if !defined (__x86_64__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__)
void __sync_fetch_and_add_8 (void)
{
fformat(stderr, "%s called\n", __FUNCTION__);
if (++vlib_worker_threads[0].recursion_level > 1)
return;
+ vlib_worker_threads[0].barrier_sync_count++;
+
ASSERT (os_get_cpu_number() == 0);
deadline = vlib_time_now (vm) + BARRIER_SYNC_TIMEOUT;