4 * Copyright(c) 2016 Cavium, Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_common.h>
34 #include <rte_hexdump.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_eventdev.h>
40 #include <rte_bus_vdev.h>
49 RTE_BUILD_BUG_ON(sizeof(struct rte_event) != 16);
51 count = rte_event_dev_count();
53 printf("Failed to find a valid event device,"
54 " testing with event_skeleton device\n");
55 return rte_vdev_init("event_skeleton", NULL);
61 testsuite_teardown(void)
66 test_eventdev_count(void)
69 count = rte_event_dev_count();
70 TEST_ASSERT(count > 0, "Invalid eventdev count %" PRIu8, count);
75 test_eventdev_get_dev_id(void)
78 ret = rte_event_dev_get_dev_id("not_a_valid_eventdev_driver");
79 TEST_ASSERT_FAIL(ret, "Expected <0 for invalid dev name ret=%d", ret);
84 test_eventdev_socket_id(void)
87 socket_id = rte_event_dev_socket_id(TEST_DEV_ID);
88 TEST_ASSERT(socket_id != -EINVAL, "Failed to get socket_id %d",
90 socket_id = rte_event_dev_socket_id(RTE_EVENT_MAX_DEVS);
91 TEST_ASSERT(socket_id == -EINVAL, "Expected -EINVAL %d", socket_id);
97 test_eventdev_info_get(void)
100 struct rte_event_dev_info info;
101 ret = rte_event_dev_info_get(TEST_DEV_ID, NULL);
102 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
103 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
104 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
105 TEST_ASSERT(info.max_event_ports > 0,
106 "Not enough event ports %d", info.max_event_ports);
107 TEST_ASSERT(info.max_event_queues > 0,
108 "Not enough event queues %d", info.max_event_queues);
113 devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
114 struct rte_event_dev_info *info)
116 memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
117 dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
118 dev_conf->nb_event_ports = info->max_event_ports;
119 dev_conf->nb_event_queues = info->max_event_queues;
120 dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
121 dev_conf->nb_event_port_dequeue_depth =
122 info->max_event_port_dequeue_depth;
123 dev_conf->nb_event_port_enqueue_depth =
124 info->max_event_port_enqueue_depth;
125 dev_conf->nb_event_port_enqueue_depth =
126 info->max_event_port_enqueue_depth;
127 dev_conf->nb_events_limit =
128 info->max_num_events;
132 test_ethdev_config_run(struct rte_event_dev_config *dev_conf,
133 struct rte_event_dev_info *info,
134 void (*fn)(struct rte_event_dev_config *dev_conf,
135 struct rte_event_dev_info *info))
137 devconf_set_default_sane_values(dev_conf, info);
139 return rte_event_dev_configure(TEST_DEV_ID, dev_conf);
143 max_dequeue_limit(struct rte_event_dev_config *dev_conf,
144 struct rte_event_dev_info *info)
146 dev_conf->dequeue_timeout_ns = info->max_dequeue_timeout_ns + 1;
150 max_events_limit(struct rte_event_dev_config *dev_conf,
151 struct rte_event_dev_info *info)
153 dev_conf->nb_events_limit = info->max_num_events + 1;
157 max_event_ports(struct rte_event_dev_config *dev_conf,
158 struct rte_event_dev_info *info)
160 dev_conf->nb_event_ports = info->max_event_ports + 1;
164 max_event_queues(struct rte_event_dev_config *dev_conf,
165 struct rte_event_dev_info *info)
167 dev_conf->nb_event_queues = info->max_event_queues + 1;
171 max_event_queue_flows(struct rte_event_dev_config *dev_conf,
172 struct rte_event_dev_info *info)
174 dev_conf->nb_event_queue_flows = info->max_event_queue_flows + 1;
178 max_event_port_dequeue_depth(struct rte_event_dev_config *dev_conf,
179 struct rte_event_dev_info *info)
181 dev_conf->nb_event_port_dequeue_depth =
182 info->max_event_port_dequeue_depth + 1;
186 max_event_port_enqueue_depth(struct rte_event_dev_config *dev_conf,
187 struct rte_event_dev_info *info)
189 dev_conf->nb_event_port_enqueue_depth =
190 info->max_event_port_enqueue_depth + 1;
195 test_eventdev_configure(void)
198 struct rte_event_dev_config dev_conf;
199 struct rte_event_dev_info info;
200 ret = rte_event_dev_configure(TEST_DEV_ID, NULL);
201 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
203 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
204 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
207 TEST_ASSERT_EQUAL(-EINVAL,
208 test_ethdev_config_run(&dev_conf, &info, max_dequeue_limit),
209 "Config negative test failed");
210 TEST_ASSERT_EQUAL(-EINVAL,
211 test_ethdev_config_run(&dev_conf, &info, max_events_limit),
212 "Config negative test failed");
213 TEST_ASSERT_EQUAL(-EINVAL,
214 test_ethdev_config_run(&dev_conf, &info, max_event_ports),
215 "Config negative test failed");
216 TEST_ASSERT_EQUAL(-EINVAL,
217 test_ethdev_config_run(&dev_conf, &info, max_event_queues),
218 "Config negative test failed");
219 TEST_ASSERT_EQUAL(-EINVAL,
220 test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
221 "Config negative test failed");
223 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) {
224 TEST_ASSERT_EQUAL(-EINVAL,
225 test_ethdev_config_run(&dev_conf, &info,
226 max_event_port_dequeue_depth),
227 "Config negative test failed");
228 TEST_ASSERT_EQUAL(-EINVAL,
229 test_ethdev_config_run(&dev_conf, &info,
230 max_event_port_enqueue_depth),
231 "Config negative test failed");
235 devconf_set_default_sane_values(&dev_conf, &info);
236 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
237 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
240 devconf_set_default_sane_values(&dev_conf, &info);
241 dev_conf.nb_event_ports = RTE_MAX(info.max_event_ports/2, 1);
242 dev_conf.nb_event_queues = RTE_MAX(info.max_event_queues/2, 1);
243 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
244 TEST_ASSERT_SUCCESS(ret, "Failed to re configure eventdev");
246 /* re-configure back to max_event_queues and max_event_ports */
247 devconf_set_default_sane_values(&dev_conf, &info);
248 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
249 TEST_ASSERT_SUCCESS(ret, "Failed to re-configure eventdev");
256 eventdev_configure_setup(void)
259 struct rte_event_dev_config dev_conf;
260 struct rte_event_dev_info info;
262 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
263 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
264 devconf_set_default_sane_values(&dev_conf, &info);
265 ret = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
266 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
272 test_eventdev_queue_default_conf_get(void)
275 struct rte_event_queue_conf qconf;
277 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, NULL);
278 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
280 uint32_t queue_count;
281 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
282 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
283 "Queue count get failed");
285 for (i = 0; i < (int)queue_count; i++) {
286 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
288 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d info", i);
295 test_eventdev_queue_setup(void)
298 struct rte_event_dev_info info;
299 struct rte_event_queue_conf qconf;
301 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
302 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
305 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
306 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
307 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
308 qconf.nb_atomic_flows = info.max_event_queue_flows + 1;
309 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
310 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
312 qconf.nb_atomic_flows = info.max_event_queue_flows;
313 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
314 qconf.nb_atomic_order_sequences = info.max_event_queue_flows + 1;
315 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
316 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
318 ret = rte_event_queue_setup(TEST_DEV_ID, info.max_event_queues,
320 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
323 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
324 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 info");
325 ret = rte_event_queue_setup(TEST_DEV_ID, 0, &qconf);
326 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue0");
328 uint32_t queue_count;
329 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
330 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
331 "Queue count get failed");
333 for (i = 0; i < (int)queue_count; i++) {
334 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
335 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
342 test_eventdev_queue_count(void)
345 struct rte_event_dev_info info;
347 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
348 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
350 uint32_t queue_count;
351 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
352 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
353 "Queue count get failed");
354 TEST_ASSERT_EQUAL(queue_count, info.max_event_queues,
355 "Wrong queue count");
361 test_eventdev_queue_attr_priority(void)
364 struct rte_event_dev_info info;
365 struct rte_event_queue_conf qconf;
368 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
369 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
371 uint32_t queue_count;
372 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
373 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
374 "Queue count get failed");
376 for (i = 0; i < (int)queue_count; i++) {
377 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, i,
379 TEST_ASSERT_SUCCESS(ret, "Failed to get queue%d def conf", i);
380 qconf.priority = i % RTE_EVENT_DEV_PRIORITY_LOWEST;
381 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
382 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
385 for (i = 0; i < (int)queue_count; i++) {
387 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
388 RTE_EVENT_QUEUE_ATTR_PRIORITY, &tmp),
389 "Queue priority get failed");
392 if (info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
393 TEST_ASSERT_EQUAL(priority,
394 i % RTE_EVENT_DEV_PRIORITY_LOWEST,
395 "Wrong priority value for queue%d", i);
397 TEST_ASSERT_EQUAL(priority,
398 RTE_EVENT_DEV_PRIORITY_NORMAL,
399 "Wrong priority value for queue%d", i);
406 test_eventdev_queue_attr_nb_atomic_flows(void)
409 struct rte_event_dev_info info;
410 struct rte_event_queue_conf qconf;
411 uint32_t nb_atomic_flows;
413 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
414 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
416 uint32_t queue_count;
417 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
418 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
419 "Queue count get failed");
421 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
422 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
424 if (qconf.nb_atomic_flows == 0)
425 /* Assume PMD doesn't support atomic flows, return early */
428 qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
430 for (i = 0; i < (int)queue_count; i++) {
431 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
432 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
435 for (i = 0; i < (int)queue_count; i++) {
436 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
437 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS,
439 "Queue nb_atomic_flows get failed");
441 TEST_ASSERT_EQUAL(nb_atomic_flows, qconf.nb_atomic_flows,
442 "Wrong atomic flows value for queue%d", i);
449 test_eventdev_queue_attr_nb_atomic_order_sequences(void)
452 struct rte_event_dev_info info;
453 struct rte_event_queue_conf qconf;
454 uint32_t nb_atomic_order_sequences;
456 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
457 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
459 uint32_t queue_count;
460 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
461 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
462 "Queue count get failed");
464 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
465 TEST_ASSERT_SUCCESS(ret, "Failed to get queue 0's def conf");
467 if (qconf.nb_atomic_order_sequences == 0)
468 /* Assume PMD doesn't support reordering */
471 qconf.schedule_type = RTE_SCHED_TYPE_ORDERED;
473 for (i = 0; i < (int)queue_count; i++) {
474 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
475 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
478 for (i = 0; i < (int)queue_count; i++) {
479 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
480 RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES,
481 &nb_atomic_order_sequences),
482 "Queue nb_atomic_order_sequencess get failed");
484 TEST_ASSERT_EQUAL(nb_atomic_order_sequences,
485 qconf.nb_atomic_order_sequences,
486 "Wrong atomic order sequences value for queue%d",
494 test_eventdev_queue_attr_event_queue_cfg(void)
497 struct rte_event_dev_info info;
498 struct rte_event_queue_conf qconf;
499 uint32_t event_queue_cfg;
501 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
502 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
504 uint32_t queue_count;
505 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
506 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
507 "Queue count get failed");
509 ret = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qconf);
510 TEST_ASSERT_SUCCESS(ret, "Failed to get queue0 def conf");
512 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
514 for (i = 0; i < (int)queue_count; i++) {
515 ret = rte_event_queue_setup(TEST_DEV_ID, i, &qconf);
516 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
519 for (i = 0; i < (int)queue_count; i++) {
520 TEST_ASSERT_SUCCESS(rte_event_queue_attr_get(TEST_DEV_ID, i,
521 RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG,
523 "Queue event_queue_cfg get failed");
525 TEST_ASSERT_EQUAL(event_queue_cfg, qconf.event_queue_cfg,
526 "Wrong event_queue_cfg value for queue%d",
534 test_eventdev_port_default_conf_get(void)
537 struct rte_event_port_conf pconf;
539 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, NULL);
540 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
543 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
544 RTE_EVENT_DEV_ATTR_PORT_COUNT,
545 &port_count), "Port count get failed");
547 ret = rte_event_port_default_conf_get(TEST_DEV_ID,
548 port_count + 1, NULL);
549 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
551 for (i = 0; i < (int)port_count; i++) {
552 ret = rte_event_port_default_conf_get(TEST_DEV_ID, i,
554 TEST_ASSERT_SUCCESS(ret, "Failed to get port%d info", i);
561 test_eventdev_port_setup(void)
564 struct rte_event_dev_info info;
565 struct rte_event_port_conf pconf;
567 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
568 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
571 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
572 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
573 pconf.new_event_threshold = info.max_num_events + 1;
574 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
575 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
577 pconf.new_event_threshold = info.max_num_events;
578 pconf.dequeue_depth = info.max_event_port_dequeue_depth + 1;
579 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
580 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
582 pconf.dequeue_depth = info.max_event_port_dequeue_depth;
583 pconf.enqueue_depth = info.max_event_port_enqueue_depth + 1;
584 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
585 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
587 ret = rte_event_port_setup(TEST_DEV_ID, info.max_event_ports,
589 TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
592 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
593 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
594 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
595 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
598 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
599 RTE_EVENT_DEV_ATTR_PORT_COUNT,
600 &port_count), "Port count get failed");
602 for (i = 0; i < (int)port_count; i++) {
603 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
604 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
611 test_eventdev_port_attr_dequeue_depth(void)
614 struct rte_event_dev_info info;
615 struct rte_event_port_conf pconf;
617 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
618 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
620 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
621 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
622 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
623 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
626 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
627 RTE_EVENT_PORT_ATTR_DEQ_DEPTH, &value),
628 0, "Call to get port dequeue depth failed");
629 TEST_ASSERT_EQUAL(value, pconf.dequeue_depth,
630 "Wrong port dequeue depth");
636 test_eventdev_port_attr_enqueue_depth(void)
639 struct rte_event_dev_info info;
640 struct rte_event_port_conf pconf;
642 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
643 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
645 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
646 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
647 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
648 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
651 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
652 RTE_EVENT_PORT_ATTR_ENQ_DEPTH, &value),
653 0, "Call to get port enqueue depth failed");
654 TEST_ASSERT_EQUAL(value, pconf.enqueue_depth,
655 "Wrong port enqueue depth");
661 test_eventdev_port_attr_new_event_threshold(void)
664 struct rte_event_dev_info info;
665 struct rte_event_port_conf pconf;
667 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
668 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
670 ret = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pconf);
671 TEST_ASSERT_SUCCESS(ret, "Failed to get port0 info");
672 ret = rte_event_port_setup(TEST_DEV_ID, 0, &pconf);
673 TEST_ASSERT_SUCCESS(ret, "Failed to setup port0");
676 TEST_ASSERT_EQUAL(rte_event_port_attr_get(TEST_DEV_ID, 0,
677 RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD, &value),
678 0, "Call to get port new event threshold failed");
679 TEST_ASSERT_EQUAL((int32_t) value, pconf.new_event_threshold,
680 "Wrong port new event threshold");
686 test_eventdev_port_count(void)
689 struct rte_event_dev_info info;
691 ret = rte_event_dev_info_get(TEST_DEV_ID, &info);
692 TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
695 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
696 RTE_EVENT_DEV_ATTR_PORT_COUNT,
697 &port_count), "Port count get failed");
698 TEST_ASSERT_EQUAL(port_count, info.max_event_ports, "Wrong port count");
704 test_eventdev_timeout_ticks(void)
707 uint64_t timeout_ticks;
709 ret = rte_event_dequeue_timeout_ticks(TEST_DEV_ID, 100, &timeout_ticks);
711 TEST_ASSERT_SUCCESS(ret, "Fail to get timeout_ticks");
718 test_eventdev_start_stop(void)
722 ret = eventdev_configure_setup();
723 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
725 uint32_t queue_count;
726 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
727 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
728 "Queue count get failed");
729 for (i = 0; i < (int)queue_count; i++) {
730 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
731 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
735 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
736 RTE_EVENT_DEV_ATTR_PORT_COUNT,
737 &port_count), "Port count get failed");
739 for (i = 0; i < (int)port_count; i++) {
740 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
741 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
744 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
745 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
748 ret = rte_event_dev_start(TEST_DEV_ID);
749 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
751 rte_event_dev_stop(TEST_DEV_ID);
757 eventdev_setup_device(void)
761 ret = eventdev_configure_setup();
762 TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
764 uint32_t queue_count;
765 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
766 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
767 "Queue count get failed");
768 for (i = 0; i < (int)queue_count; i++) {
769 ret = rte_event_queue_setup(TEST_DEV_ID, i, NULL);
770 TEST_ASSERT_SUCCESS(ret, "Failed to setup queue%d", i);
774 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
775 RTE_EVENT_DEV_ATTR_PORT_COUNT,
776 &port_count), "Port count get failed");
778 for (i = 0; i < (int)port_count; i++) {
779 ret = rte_event_port_setup(TEST_DEV_ID, i, NULL);
780 TEST_ASSERT_SUCCESS(ret, "Failed to setup port%d", i);
783 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
784 TEST_ASSERT(ret == (int)queue_count, "Failed to link port, device %d",
787 ret = rte_event_dev_start(TEST_DEV_ID);
788 TEST_ASSERT_SUCCESS(ret, "Failed to start device%d", TEST_DEV_ID);
794 eventdev_stop_device(void)
796 rte_event_dev_stop(TEST_DEV_ID);
800 test_eventdev_link(void)
802 int ret, nb_queues, i;
803 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
804 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
806 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
807 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
810 uint32_t queue_count;
811 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
812 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
813 "Queue count get failed");
814 nb_queues = queue_count;
815 for (i = 0; i < nb_queues; i++) {
817 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
820 ret = rte_event_port_link(TEST_DEV_ID, 0, queues,
821 priorities, nb_queues);
822 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
828 test_eventdev_unlink(void)
830 int ret, nb_queues, i;
831 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
833 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
834 TEST_ASSERT(ret >= 0, "Failed to unlink with NULL device%d",
837 uint32_t queue_count;
838 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
839 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
840 "Queue count get failed");
841 nb_queues = queue_count;
842 for (i = 0; i < nb_queues; i++)
845 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
846 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
849 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
850 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
856 test_eventdev_link_get(void)
859 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
860 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
862 /* link all queues */
863 ret = rte_event_port_link(TEST_DEV_ID, 0, NULL, NULL, 0);
864 TEST_ASSERT(ret >= 0, "Failed to link with NULL device%d",
867 uint32_t queue_count;
868 TEST_ASSERT_SUCCESS(rte_event_dev_attr_get(TEST_DEV_ID,
869 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &queue_count),
870 "Queue count get failed");
871 const int nb_queues = queue_count;
872 for (i = 0; i < nb_queues; i++)
875 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, nb_queues);
876 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
879 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
880 TEST_ASSERT(ret == 0, "(%d)Wrong link get=%d", TEST_DEV_ID, ret);
882 /* link all queues and get the links */
883 for (i = 0; i < nb_queues; i++) {
885 priorities[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
887 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
889 TEST_ASSERT(ret == nb_queues, "Failed to link(device%d) ret=%d",
891 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
892 TEST_ASSERT(ret == nb_queues, "(%d)Wrong link get ret=%d expected=%d",
893 TEST_DEV_ID, ret, nb_queues);
895 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
896 TEST_ASSERT(ret == nb_queues, "Failed to unlink(device%d) ret=%d",
898 /* link just one queue */
900 priorities[0] = RTE_EVENT_DEV_PRIORITY_NORMAL;
902 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities, 1);
903 TEST_ASSERT(ret == 1, "Failed to link(device%d) ret=%d",
905 ret = rte_event_port_links_get(TEST_DEV_ID, 0, queues, priorities);
906 TEST_ASSERT(ret == 1, "(%d)Wrong link get ret=%d expected=%d",
907 TEST_DEV_ID, ret, 1);
908 /* unlink the queue */
909 ret = rte_event_port_unlink(TEST_DEV_ID, 0, NULL, 0);
910 TEST_ASSERT(ret == 1, "Failed to unlink(device%d) ret=%d",
913 /* 4links and 2 unlinks */
914 if (nb_queues >= 4) {
915 for (i = 0; i < 4; i++) {
917 priorities[i] = 0x40;
919 ret = rte_event_port_link(TEST_DEV_ID, 0, queues, priorities,
921 TEST_ASSERT(ret == 4, "Failed to link(device%d) ret=%d",
924 for (i = 0; i < 2; i++)
927 ret = rte_event_port_unlink(TEST_DEV_ID, 0, queues, 2);
928 TEST_ASSERT(ret == 2, "Failed to unlink(device%d) ret=%d",
930 ret = rte_event_port_links_get(TEST_DEV_ID, 0,
932 TEST_ASSERT(ret == 2, "(%d)Wrong link get ret=%d expected=%d",
933 TEST_DEV_ID, ret, 2);
934 TEST_ASSERT(queues[0] == 2, "ret=%d expected=%d", ret, 2);
935 TEST_ASSERT(priorities[0] == 0x40, "ret=%d expected=%d",
937 TEST_ASSERT(queues[1] == 3, "ret=%d expected=%d", ret, 3);
938 TEST_ASSERT(priorities[1] == 0x40, "ret=%d expected=%d",
946 test_eventdev_close(void)
948 rte_event_dev_stop(TEST_DEV_ID);
949 return rte_event_dev_close(TEST_DEV_ID);
952 static struct unit_test_suite eventdev_common_testsuite = {
953 .suite_name = "eventdev common code unit test suite",
954 .setup = testsuite_setup,
955 .teardown = testsuite_teardown,
957 TEST_CASE_ST(NULL, NULL,
958 test_eventdev_count),
959 TEST_CASE_ST(NULL, NULL,
960 test_eventdev_get_dev_id),
961 TEST_CASE_ST(NULL, NULL,
962 test_eventdev_socket_id),
963 TEST_CASE_ST(NULL, NULL,
964 test_eventdev_info_get),
965 TEST_CASE_ST(NULL, NULL,
966 test_eventdev_configure),
967 TEST_CASE_ST(eventdev_configure_setup, NULL,
968 test_eventdev_queue_default_conf_get),
969 TEST_CASE_ST(eventdev_configure_setup, NULL,
970 test_eventdev_queue_setup),
971 TEST_CASE_ST(eventdev_configure_setup, NULL,
972 test_eventdev_queue_count),
973 TEST_CASE_ST(eventdev_configure_setup, NULL,
974 test_eventdev_queue_attr_priority),
975 TEST_CASE_ST(eventdev_configure_setup, NULL,
976 test_eventdev_queue_attr_nb_atomic_flows),
977 TEST_CASE_ST(eventdev_configure_setup, NULL,
978 test_eventdev_queue_attr_nb_atomic_order_sequences),
979 TEST_CASE_ST(eventdev_configure_setup, NULL,
980 test_eventdev_queue_attr_event_queue_cfg),
981 TEST_CASE_ST(eventdev_configure_setup, NULL,
982 test_eventdev_port_default_conf_get),
983 TEST_CASE_ST(eventdev_configure_setup, NULL,
984 test_eventdev_port_setup),
985 TEST_CASE_ST(eventdev_configure_setup, NULL,
986 test_eventdev_port_attr_dequeue_depth),
987 TEST_CASE_ST(eventdev_configure_setup, NULL,
988 test_eventdev_port_attr_enqueue_depth),
989 TEST_CASE_ST(eventdev_configure_setup, NULL,
990 test_eventdev_port_attr_new_event_threshold),
991 TEST_CASE_ST(eventdev_configure_setup, NULL,
992 test_eventdev_port_count),
993 TEST_CASE_ST(eventdev_configure_setup, NULL,
994 test_eventdev_timeout_ticks),
995 TEST_CASE_ST(NULL, NULL,
996 test_eventdev_start_stop),
997 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
999 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
1000 test_eventdev_unlink),
1001 TEST_CASE_ST(eventdev_setup_device, eventdev_stop_device,
1002 test_eventdev_link_get),
1003 TEST_CASE_ST(eventdev_setup_device, NULL,
1004 test_eventdev_close),
1005 TEST_CASES_END() /**< NULL terminate unit test array */
1010 test_eventdev_common(void)
1012 return unit_test_suite_runner(&eventdev_common_testsuite);
1015 REGISTER_TEST_COMMAND(eventdev_common_autotest, test_eventdev_common);