New upstream version 18.02
[deb_dpdk.git] / app / test-eventdev / test_perf_atq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include "test_perf_common.h"
6
7 /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
8
9 static inline int
10 atq_nb_event_queues(struct evt_options *opt)
11 {
12         /* nb_queues = number of producers */
13         return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
14                 rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores);
15 }
16
17 static inline __attribute__((always_inline)) void
18 atq_mark_fwd_latency(struct rte_event *const ev)
19 {
20         if (unlikely(ev->sub_event_type == 0)) {
21                 struct perf_elt *const m = ev->event_ptr;
22
23                 m->timestamp = rte_get_timer_cycles();
24         }
25 }
26
27 static inline __attribute__((always_inline)) void
28 atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
29                 const uint8_t nb_stages)
30 {
31         ev->sub_event_type++;
32         ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
33         ev->op = RTE_EVENT_OP_FORWARD;
34         ev->event_type = RTE_EVENT_TYPE_CPU;
35 }
36
37 static int
38 perf_atq_worker(void *arg, const int enable_fwd_latency)
39 {
40         PERF_WORKER_INIT;
41         struct rte_event ev;
42
43         while (t->done == false) {
44                 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
45
46                 if (enable_fwd_latency)
47                         rte_prefetch0(ev.event_ptr);
48
49                 if (!event) {
50                         rte_pause();
51                         continue;
52                 }
53
54                 if (enable_fwd_latency)
55                 /* first stage in pipeline, mark ts to compute fwd latency */
56                         atq_mark_fwd_latency(&ev);
57
58                 /* last stage in pipeline */
59                 if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
60                         if (enable_fwd_latency)
61                                 cnt = perf_process_last_stage_latency(pool,
62                                         &ev, w, bufs, sz, cnt);
63                         else
64                                 cnt = perf_process_last_stage(pool, &ev, w,
65                                          bufs, sz, cnt);
66                 } else {
67                         atq_fwd_event(&ev, sched_type_list, nb_stages);
68                         while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
69                                 rte_pause();
70                 }
71         }
72         return 0;
73 }
74
75 static int
76 perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
77 {
78         PERF_WORKER_INIT;
79         uint16_t i;
80         /* +1 to avoid prefetch out of array check */
81         struct rte_event ev[BURST_SIZE + 1];
82
83         while (t->done == false) {
84                 uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
85                                 BURST_SIZE, 0);
86
87                 if (!nb_rx) {
88                         rte_pause();
89                         continue;
90                 }
91
92                 for (i = 0; i < nb_rx; i++) {
93                         if (enable_fwd_latency) {
94                                 rte_prefetch0(ev[i+1].event_ptr);
95                                 /* first stage in pipeline.
96                                  * mark time stamp to compute fwd latency
97                                  */
98                                 atq_mark_fwd_latency(&ev[i]);
99                         }
100                         /* last stage in pipeline */
101                         if (unlikely((ev[i].sub_event_type % nb_stages)
102                                                 == laststage)) {
103                                 if (enable_fwd_latency)
104                                         cnt = perf_process_last_stage_latency(
105                                                 pool, &ev[i], w, bufs, sz, cnt);
106                                 else
107                                         cnt = perf_process_last_stage(pool,
108                                                 &ev[i], w, bufs, sz, cnt);
109
110                                 ev[i].op = RTE_EVENT_OP_RELEASE;
111                         } else {
112                                 atq_fwd_event(&ev[i], sched_type_list,
113                                                 nb_stages);
114                         }
115                 }
116
117                 uint16_t enq;
118
119                 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
120                 while (enq < nb_rx) {
121                         enq += rte_event_enqueue_burst(dev, port,
122                                                         ev + enq, nb_rx - enq);
123                 }
124         }
125         return 0;
126 }
127
128 static int
129 worker_wrapper(void *arg)
130 {
131         struct worker_data *w  = arg;
132         struct evt_options *opt = w->t->opt;
133
134         const bool burst = evt_has_burst_mode(w->dev_id);
135         const int fwd_latency = opt->fwd_latency;
136
137         /* allow compiler to optimize */
138         if (!burst && !fwd_latency)
139                 return perf_atq_worker(arg, 0);
140         else if (!burst && fwd_latency)
141                 return perf_atq_worker(arg, 1);
142         else if (burst && !fwd_latency)
143                 return perf_atq_worker_burst(arg, 0);
144         else if (burst && fwd_latency)
145                 return perf_atq_worker_burst(arg, 1);
146
147         rte_panic("invalid worker\n");
148 }
149
150 static int
151 perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
152 {
153         return perf_launch_lcores(test, opt, worker_wrapper);
154 }
155
156 static int
157 perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
158 {
159         int ret;
160         uint8_t queue;
161         uint8_t nb_queues;
162         uint8_t nb_ports;
163         struct rte_event_dev_info dev_info;
164
165         nb_ports = evt_nr_active_lcores(opt->wlcores);
166         nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
167                 evt_nr_active_lcores(opt->plcores);
168
169         nb_queues = atq_nb_event_queues(opt);
170
171         memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
172         ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
173         if (ret) {
174                 evt_err("failed to get eventdev info %d", opt->dev_id);
175                 return ret;
176         }
177
178         const struct rte_event_dev_config config = {
179                         .nb_event_queues = nb_queues,
180                         .nb_event_ports = nb_ports,
181                         .nb_events_limit  = dev_info.max_num_events,
182                         .nb_event_queue_flows = opt->nb_flows,
183                         .nb_event_port_dequeue_depth =
184                                 dev_info.max_event_port_dequeue_depth,
185                         .nb_event_port_enqueue_depth =
186                                 dev_info.max_event_port_enqueue_depth,
187         };
188
189         ret = rte_event_dev_configure(opt->dev_id, &config);
190         if (ret) {
191                 evt_err("failed to configure eventdev %d", opt->dev_id);
192                 return ret;
193         }
194
195         struct rte_event_queue_conf q_conf = {
196                         .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
197                         .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
198                         .nb_atomic_flows = opt->nb_flows,
199                         .nb_atomic_order_sequences = opt->nb_flows,
200         };
201         /* queue configurations */
202         for (queue = 0; queue < nb_queues; queue++) {
203                 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
204                 if (ret) {
205                         evt_err("failed to setup queue=%d", queue);
206                         return ret;
207                 }
208         }
209
210         if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
211                 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
212
213         /* port configuration */
214         const struct rte_event_port_conf p_conf = {
215                         .dequeue_depth = opt->wkr_deq_dep,
216                         .enqueue_depth = dev_info.max_event_port_dequeue_depth,
217                         .new_event_threshold = dev_info.max_num_events,
218         };
219
220         ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,
221                         &p_conf);
222         if (ret)
223                 return ret;
224
225         if (!evt_has_distributed_sched(opt->dev_id)) {
226                 uint32_t service_id;
227                 rte_event_dev_service_id_get(opt->dev_id, &service_id);
228                 ret = evt_service_setup(service_id);
229                 if (ret) {
230                         evt_err("No service lcore found to run event dev.");
231                         return ret;
232                 }
233         }
234
235         ret = rte_event_dev_start(opt->dev_id);
236         if (ret) {
237                 evt_err("failed to start eventdev %d", opt->dev_id);
238                 return ret;
239         }
240
241         return 0;
242 }
243
244 static void
245 perf_atq_opt_dump(struct evt_options *opt)
246 {
247         perf_opt_dump(opt, atq_nb_event_queues(opt));
248 }
249
250 static int
251 perf_atq_opt_check(struct evt_options *opt)
252 {
253         return perf_opt_check(opt, atq_nb_event_queues(opt));
254 }
255
256 static bool
257 perf_atq_capability_check(struct evt_options *opt)
258 {
259         struct rte_event_dev_info dev_info;
260
261         rte_event_dev_info_get(opt->dev_id, &dev_info);
262         if (dev_info.max_event_queues < atq_nb_event_queues(opt) ||
263                         dev_info.max_event_ports < perf_nb_event_ports(opt)) {
264                 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
265                         atq_nb_event_queues(opt), dev_info.max_event_queues,
266                         perf_nb_event_ports(opt), dev_info.max_event_ports);
267         }
268         if (!evt_has_all_types_queue(opt->dev_id))
269                 return false;
270
271         return true;
272 }
273
274 static const struct evt_test_ops perf_atq =  {
275         .cap_check          = perf_atq_capability_check,
276         .opt_check          = perf_atq_opt_check,
277         .opt_dump           = perf_atq_opt_dump,
278         .test_setup         = perf_test_setup,
279         .ethdev_setup       = perf_ethdev_setup,
280         .mempool_setup      = perf_mempool_setup,
281         .eventdev_setup     = perf_atq_eventdev_setup,
282         .launch_lcores      = perf_atq_launch_lcores,
283         .eventdev_destroy   = perf_eventdev_destroy,
284         .mempool_destroy    = perf_mempool_destroy,
285         .ethdev_destroy     = perf_ethdev_destroy,
286         .test_result        = perf_test_result,
287         .test_destroy       = perf_test_destroy,
288 };
289
290 EVT_TEST_REGISTER(perf_atq);