New upstream version 17.11-rc3
[deb_dpdk.git] / app / test-eventdev / test_perf_queue.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (C) Cavium, Inc 2017.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium, Inc nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include "test_perf_common.h"
34
35 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
36
37 static inline int
38 perf_queue_nb_event_queues(struct evt_options *opt)
39 {
40         /* nb_queues = number of producers * number of stages */
41         return evt_nr_active_lcores(opt->plcores) * opt->nb_stages;
42 }
43
44 static inline __attribute__((always_inline)) void
45 mark_fwd_latency(struct rte_event *const ev,
46                 const uint8_t nb_stages)
47 {
48         if (unlikely((ev->queue_id % nb_stages) == 0)) {
49                 struct perf_elt *const m = ev->event_ptr;
50
51                 m->timestamp = rte_get_timer_cycles();
52         }
53 }
54
55 static inline __attribute__((always_inline)) void
56 fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
57                 const uint8_t nb_stages)
58 {
59         ev->queue_id++;
60         ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
61         ev->op = RTE_EVENT_OP_FORWARD;
62         ev->event_type = RTE_EVENT_TYPE_CPU;
63 }
64
65 static int
66 perf_queue_worker(void *arg, const int enable_fwd_latency)
67 {
68         PERF_WORKER_INIT;
69         struct rte_event ev;
70
71         while (t->done == false) {
72                 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
73
74                 if (!event) {
75                         rte_pause();
76                         continue;
77                 }
78                 if (enable_fwd_latency)
79                 /* first q in pipeline, mark timestamp to compute fwd latency */
80                         mark_fwd_latency(&ev, nb_stages);
81
82                 /* last stage in pipeline */
83                 if (unlikely((ev.queue_id % nb_stages) == laststage)) {
84                         if (enable_fwd_latency)
85                                 cnt = perf_process_last_stage_latency(pool,
86                                         &ev, w, bufs, sz, cnt);
87                         else
88                                 cnt = perf_process_last_stage(pool,
89                                         &ev, w, bufs, sz, cnt);
90                 } else {
91                         fwd_event(&ev, sched_type_list, nb_stages);
92                         while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
93                                 rte_pause();
94                 }
95         }
96         return 0;
97 }
98
99 static int
100 perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
101 {
102         PERF_WORKER_INIT;
103         uint16_t i;
104         /* +1 to avoid prefetch out of array check */
105         struct rte_event ev[BURST_SIZE + 1];
106
107         while (t->done == false) {
108                 uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
109                                 BURST_SIZE, 0);
110
111                 if (!nb_rx) {
112                         rte_pause();
113                         continue;
114                 }
115
116                 for (i = 0; i < nb_rx; i++) {
117                         if (enable_fwd_latency) {
118                                 rte_prefetch0(ev[i+1].event_ptr);
119                                 /* first queue in pipeline.
120                                  * mark time stamp to compute fwd latency
121                                  */
122                                 mark_fwd_latency(&ev[i], nb_stages);
123                         }
124                         /* last stage in pipeline */
125                         if (unlikely((ev[i].queue_id % nb_stages) ==
126                                                  laststage)) {
127                                 if (enable_fwd_latency)
128                                         cnt = perf_process_last_stage_latency(
129                                                 pool, &ev[i], w, bufs, sz, cnt);
130                                 else
131                                         cnt = perf_process_last_stage(pool,
132                                                 &ev[i], w, bufs, sz, cnt);
133
134                                 ev[i].op = RTE_EVENT_OP_RELEASE;
135                         } else {
136                                 fwd_event(&ev[i], sched_type_list, nb_stages);
137                         }
138                 }
139
140                 uint16_t enq;
141
142                 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
143                 while (enq < nb_rx) {
144                         enq += rte_event_enqueue_burst(dev, port,
145                                                         ev + enq, nb_rx - enq);
146                 }
147         }
148         return 0;
149 }
150
151 static int
152 worker_wrapper(void *arg)
153 {
154         struct worker_data *w  = arg;
155         struct evt_options *opt = w->t->opt;
156
157         const bool burst = evt_has_burst_mode(w->dev_id);
158         const int fwd_latency = opt->fwd_latency;
159
160         /* allow compiler to optimize */
161         if (!burst && !fwd_latency)
162                 return perf_queue_worker(arg, 0);
163         else if (!burst && fwd_latency)
164                 return perf_queue_worker(arg, 1);
165         else if (burst && !fwd_latency)
166                 return perf_queue_worker_burst(arg, 0);
167         else if (burst && fwd_latency)
168                 return perf_queue_worker_burst(arg, 1);
169
170         rte_panic("invalid worker\n");
171 }
172
173 static int
174 perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
175 {
176         return perf_launch_lcores(test, opt, worker_wrapper);
177 }
178
179 static int
180 perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
181 {
182         uint8_t queue;
183         int nb_stages = opt->nb_stages;
184         int ret;
185
186         const struct rte_event_dev_config config = {
187                         .nb_event_queues = perf_queue_nb_event_queues(opt),
188                         .nb_event_ports = perf_nb_event_ports(opt),
189                         .nb_events_limit  = 4096,
190                         .nb_event_queue_flows = opt->nb_flows,
191                         .nb_event_port_dequeue_depth = 128,
192                         .nb_event_port_enqueue_depth = 128,
193         };
194
195         ret = rte_event_dev_configure(opt->dev_id, &config);
196         if (ret) {
197                 evt_err("failed to configure eventdev %d", opt->dev_id);
198                 return ret;
199         }
200
201         struct rte_event_queue_conf q_conf = {
202                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
203                         .nb_atomic_flows = opt->nb_flows,
204                         .nb_atomic_order_sequences = opt->nb_flows,
205         };
206         /* queue configurations */
207         for (queue = 0; queue < perf_queue_nb_event_queues(opt); queue++) {
208                 q_conf.schedule_type =
209                         (opt->sched_type_list[queue % nb_stages]);
210
211                 if (opt->q_priority) {
212                         uint8_t stage_pos = queue % nb_stages;
213                         /* Configure event queues(stage 0 to stage n) with
214                          * RTE_EVENT_DEV_PRIORITY_LOWEST to
215                          * RTE_EVENT_DEV_PRIORITY_HIGHEST.
216                          */
217                         uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
218                                         (nb_stages - 1);
219                         /* Higher prio for the queues closer to last stage */
220                         q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
221                                         (step * stage_pos);
222                 }
223                 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
224                 if (ret) {
225                         evt_err("failed to setup queue=%d", queue);
226                         return ret;
227                 }
228         }
229
230         ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
231                                         perf_queue_nb_event_queues(opt));
232         if (ret)
233                 return ret;
234
235         ret = evt_service_setup(opt->dev_id);
236         if (ret) {
237                 evt_err("No service lcore found to run event dev.");
238                 return ret;
239         }
240
241         ret = rte_event_dev_start(opt->dev_id);
242         if (ret) {
243                 evt_err("failed to start eventdev %d", opt->dev_id);
244                 return ret;
245         }
246
247         return 0;
248 }
249
250 static void
251 perf_queue_opt_dump(struct evt_options *opt)
252 {
253         evt_dump_fwd_latency(opt);
254         perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
255 }
256
257 static int
258 perf_queue_opt_check(struct evt_options *opt)
259 {
260         return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
261 }
262
263 static bool
264 perf_queue_capability_check(struct evt_options *opt)
265 {
266         struct rte_event_dev_info dev_info;
267
268         rte_event_dev_info_get(opt->dev_id, &dev_info);
269         if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
270                         dev_info.max_event_ports < perf_nb_event_ports(opt)) {
271                 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
272                         perf_queue_nb_event_queues(opt),
273                         dev_info.max_event_queues,
274                         perf_nb_event_ports(opt), dev_info.max_event_ports);
275         }
276
277         return true;
278 }
279
280 static const struct evt_test_ops perf_queue =  {
281         .cap_check          = perf_queue_capability_check,
282         .opt_check          = perf_queue_opt_check,
283         .opt_dump           = perf_queue_opt_dump,
284         .test_setup         = perf_test_setup,
285         .mempool_setup      = perf_mempool_setup,
286         .eventdev_setup     = perf_queue_eventdev_setup,
287         .launch_lcores      = perf_queue_launch_lcores,
288         .eventdev_destroy   = perf_eventdev_destroy,
289         .mempool_destroy    = perf_mempool_destroy,
290         .test_result        = perf_test_result,
291         .test_destroy       = perf_test_destroy,
292 };
293
294 EVT_TEST_REGISTER(perf_queue);