New upstream version 18.08
[deb_dpdk.git] / examples / performance-thread / common / lthread_sched.h
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2015 Intel Corporation.
4  * Copyright 2012 Hasan Alayli <halayli@gmail.com>
5  */
6
7 #ifndef LTHREAD_SCHED_H_
8 #define LTHREAD_SCHED_H_
9
10 #ifdef __cplusplus
11 extern "C" {
12 #endif
13
14 #include "lthread_int.h"
15 #include "lthread_queue.h"
16 #include "lthread_objcache.h"
17 #include "lthread_diag.h"
18 #include "ctx.h"
19
20 /*
21  * insert an lthread into a queue
22  */
23 static inline void
24 _ready_queue_insert(struct lthread_sched *sched, struct lthread *lt)
25 {
26         if (sched == THIS_SCHED)
27                 _lthread_queue_insert_sp((THIS_SCHED)->ready, lt);
28         else
29                 _lthread_queue_insert_mp(sched->pready, lt);
30 }
31
32 /*
33  * remove an lthread from a queue
34  */
35 static inline struct lthread *_ready_queue_remove(struct lthread_queue *q)
36 {
37         return _lthread_queue_remove(q);
38 }
39
40 /**
41  * Return true if the ready queue is empty
42  */
43 static inline int _ready_queue_empty(struct lthread_queue *q)
44 {
45         return _lthread_queue_empty(q);
46 }
47
48 static inline uint64_t _sched_now(void)
49 {
50         uint64_t now = rte_rdtsc();
51
52         if (now > (THIS_SCHED)->birth)
53                 return now - (THIS_SCHED)->birth;
54         if (now < (THIS_SCHED)->birth)
55                 return (THIS_SCHED)->birth - now;
56         /* never return 0 because this means sleep forever */
57         return 1;
58 }
59
60 static __rte_always_inline void
61 _affinitize(void);
62 static inline void
63 _affinitize(void)
64 {
65         struct lthread *lt = THIS_LTHREAD;
66
67         DIAG_EVENT(lt, LT_DIAG_LTHREAD_SUSPENDED, 0, 0);
68         ctx_switch(&(THIS_SCHED)->ctx, &lt->ctx);
69 }
70
71 static __rte_always_inline void
72 _suspend(void);
73 static inline void
74 _suspend(void)
75 {
76         struct lthread *lt = THIS_LTHREAD;
77
78         (THIS_SCHED)->nb_blocked_threads++;
79         DIAG_EVENT(lt, LT_DIAG_LTHREAD_SUSPENDED, 0, 0);
80         ctx_switch(&(THIS_SCHED)->ctx, &lt->ctx);
81         (THIS_SCHED)->nb_blocked_threads--;
82 }
83
84 static __rte_always_inline void
85 _reschedule(void);
86 static inline void
87 _reschedule(void)
88 {
89         struct lthread *lt = THIS_LTHREAD;
90
91         DIAG_EVENT(lt, LT_DIAG_LTHREAD_RESCHEDULED, 0, 0);
92         _ready_queue_insert(THIS_SCHED, lt);
93         ctx_switch(&(THIS_SCHED)->ctx, &lt->ctx);
94 }
95
96 extern struct lthread_sched *schedcore[];
97 void _sched_timer_cb(struct rte_timer *tim, void *arg);
98 void _sched_shutdown(__rte_unused void *arg);
99
100 #ifdef __cplusplus
101 }
102 #endif
103
104 #endif                          /* LTHREAD_SCHED_H_ */