9c3efa3a621dc5a133d9e00b7db1ee5879d21965
[deb_dpdk.git] / app / test-eventdev / test_perf_atq.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium, Inc 2017.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium, Inc nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include "test_perf_common.h"
34
35 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
36
37 static inline int
38 atq_nb_event_queues(struct evt_options *opt)
39 {
40         /* nb_queues = number of producers */
41         return evt_nr_active_lcores(opt->plcores);
42 }
43
44 static inline __attribute__((always_inline)) void
45 atq_mark_fwd_latency(struct rte_event *const ev)
46 {
47         if (unlikely(ev->sub_event_type == 0)) {
48                 struct perf_elt *const m = ev->event_ptr;
49
50                 m->timestamp = rte_get_timer_cycles();
51         }
52 }
53
54 static inline __attribute__((always_inline)) void
55 atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
56                 const uint8_t nb_stages)
57 {
58         ev->sub_event_type++;
59         ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
60         ev->op = RTE_EVENT_OP_FORWARD;
61         ev->event_type = RTE_EVENT_TYPE_CPU;
62 }
63
64 static int
65 perf_atq_worker(void *arg, const int enable_fwd_latency)
66 {
67         PERF_WORKER_INIT;
68         struct rte_event ev;
69
70         while (t->done == false) {
71                 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
72
73                 if (enable_fwd_latency)
74                         rte_prefetch0(ev.event_ptr);
75
76                 if (!event) {
77                         rte_pause();
78                         continue;
79                 }
80
81                 if (enable_fwd_latency)
82                 /* first stage in pipeline, mark ts to compute fwd latency */
83                         atq_mark_fwd_latency(&ev);
84
85                 /* last stage in pipeline */
86                 if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
87                         if (enable_fwd_latency)
88                                 cnt = perf_process_last_stage_latency(pool,
89                                         &ev, w, bufs, sz, cnt);
90                         else
91                                 cnt = perf_process_last_stage(pool, &ev, w,
92                                          bufs, sz, cnt);
93                 } else {
94                         atq_fwd_event(&ev, sched_type_list, nb_stages);
95                         while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
96                                 rte_pause();
97                 }
98         }
99         return 0;
100 }
101
102 static int
103 perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
104 {
105         PERF_WORKER_INIT;
106         uint16_t i;
107         /* +1 to avoid prefetch out of array check */
108         struct rte_event ev[BURST_SIZE + 1];
109
110         while (t->done == false) {
111                 uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
112                                 BURST_SIZE, 0);
113
114                 if (!nb_rx) {
115                         rte_pause();
116                         continue;
117                 }
118
119                 for (i = 0; i < nb_rx; i++) {
120                         if (enable_fwd_latency) {
121                                 rte_prefetch0(ev[i+1].event_ptr);
122                                 /* first stage in pipeline.
123                                  * mark time stamp to compute fwd latency
124                                  */
125                                 atq_mark_fwd_latency(&ev[i]);
126                         }
127                         /* last stage in pipeline */
128                         if (unlikely((ev[i].sub_event_type % nb_stages)
129                                                 == laststage)) {
130                                 if (enable_fwd_latency)
131                                         cnt = perf_process_last_stage_latency(
132                                                 pool, &ev[i], w, bufs, sz, cnt);
133                                 else
134                                         cnt = perf_process_last_stage(pool,
135                                                 &ev[i], w, bufs, sz, cnt);
136
137                                 ev[i].op = RTE_EVENT_OP_RELEASE;
138                         } else {
139                                 atq_fwd_event(&ev[i], sched_type_list,
140                                                 nb_stages);
141                         }
142                 }
143
144                 uint16_t enq;
145
146                 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
147                 while (enq < nb_rx) {
148                         enq += rte_event_enqueue_burst(dev, port,
149                                                         ev + enq, nb_rx - enq);
150                 }
151         }
152         return 0;
153 }
154
155 static int
156 worker_wrapper(void *arg)
157 {
158         struct worker_data *w  = arg;
159         struct evt_options *opt = w->t->opt;
160
161         const bool burst = evt_has_burst_mode(w->dev_id);
162         const int fwd_latency = opt->fwd_latency;
163
164         /* allow compiler to optimize */
165         if (!burst && !fwd_latency)
166                 return perf_atq_worker(arg, 0);
167         else if (!burst && fwd_latency)
168                 return perf_atq_worker(arg, 1);
169         else if (burst && !fwd_latency)
170                 return perf_atq_worker_burst(arg, 0);
171         else if (burst && fwd_latency)
172                 return perf_atq_worker_burst(arg, 1);
173
174         rte_panic("invalid worker\n");
175 }
176
177 static int
178 perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
179 {
180         return perf_launch_lcores(test, opt, worker_wrapper);
181 }
182
183 static int
184 perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
185 {
186         int ret;
187         uint8_t queue;
188
189         const struct rte_event_dev_config config = {
190                         .nb_event_queues = atq_nb_event_queues(opt),
191                         .nb_event_ports = perf_nb_event_ports(opt),
192                         .nb_events_limit  = 4096,
193                         .nb_event_queue_flows = opt->nb_flows,
194                         .nb_event_port_dequeue_depth = 128,
195                         .nb_event_port_enqueue_depth = 128,
196         };
197
198         ret = rte_event_dev_configure(opt->dev_id, &config);
199         if (ret) {
200                 evt_err("failed to configure eventdev %d", opt->dev_id);
201                 return ret;
202         }
203
204         struct rte_event_queue_conf q_conf = {
205                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
206                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
207                         .nb_atomic_flows = opt->nb_flows,
208                         .nb_atomic_order_sequences = opt->nb_flows,
209         };
210         /* queue configurations */
211         for (queue = 0; queue < atq_nb_event_queues(opt); queue++) {
212                 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
213                 if (ret) {
214                         evt_err("failed to setup queue=%d", queue);
215                         return ret;
216                 }
217         }
218
219         ret = perf_event_dev_port_setup(test, opt, 1 /* stride */,
220                                         atq_nb_event_queues(opt));
221         if (ret)
222                 return ret;
223
224         ret = rte_event_dev_start(opt->dev_id);
225         if (ret) {
226                 evt_err("failed to start eventdev %d", opt->dev_id);
227                 return ret;
228         }
229
230         return 0;
231 }
232
233 static void
234 perf_atq_opt_dump(struct evt_options *opt)
235 {
236         perf_opt_dump(opt, atq_nb_event_queues(opt));
237 }
238
239 static int
240 perf_atq_opt_check(struct evt_options *opt)
241 {
242         return perf_opt_check(opt, atq_nb_event_queues(opt));
243 }
244
245 static bool
246 perf_atq_capability_check(struct evt_options *opt)
247 {
248         struct rte_event_dev_info dev_info;
249
250         rte_event_dev_info_get(opt->dev_id, &dev_info);
251         if (dev_info.max_event_queues < atq_nb_event_queues(opt) ||
252                         dev_info.max_event_ports < perf_nb_event_ports(opt)) {
253                 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
254                         atq_nb_event_queues(opt), dev_info.max_event_queues,
255                         perf_nb_event_ports(opt), dev_info.max_event_ports);
256         }
257         if (!evt_has_all_types_queue(opt->dev_id))
258                 return false;
259
260         return true;
261 }
262
263 static const struct evt_test_ops perf_atq =  {
264         .cap_check          = perf_atq_capability_check,
265         .opt_check          = perf_atq_opt_check,
266         .opt_dump           = perf_atq_opt_dump,
267         .test_setup         = perf_test_setup,
268         .mempool_setup      = perf_mempool_setup,
269         .eventdev_setup     = perf_atq_eventdev_setup,
270         .launch_lcores      = perf_atq_launch_lcores,
271         .eventdev_destroy   = perf_eventdev_destroy,
272         .mempool_destroy    = perf_mempool_destroy,
273         .test_result        = perf_test_result,
274         .test_destroy       = perf_test_destroy,
275 };
276
277 EVT_TEST_REGISTER(perf_atq);