New upstream version 17.11.1
[deb_dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_eal.h>
55 #include <rte_per_lcore.h>
56 #include <rte_lcore.h>
57 #include <rte_atomic.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_common.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
62 #include <rte_mbuf.h>
63 #include <rte_errno.h>
64 #include <rte_spinlock.h>
65 #include <rte_string_fns.h>
66
67 #include "rte_ether.h"
68 #include "rte_ethdev.h"
69 #include "ethdev_profile.h"
70
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t eth_dev_last_created_port;
75
76 /* spinlock for eth device callbacks */
77 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
78
79 /* spinlock for add/remove rx callbacks */
80 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
81
82 /* spinlock for add/remove tx callbacks */
83 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
84
85 /* store statistics names and its offset in stats structure  */
86 struct rte_eth_xstats_name_off {
87         char name[RTE_ETH_XSTATS_NAME_SIZE];
88         unsigned offset;
89 };
90
91 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
92         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
93         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
94         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
95         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
96         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
97         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
98         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
99         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
100                 rx_nombuf)},
101 };
102
103 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
104
105 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
106         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
107         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
108         {"errors", offsetof(struct rte_eth_stats, q_errors)},
109 };
110
111 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
112                 sizeof(rte_rxq_stats_strings[0]))
113
114 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
115         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
116         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
117 };
118 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
119                 sizeof(rte_txq_stats_strings[0]))
120
121
122 /**
123  * The user application callback description.
124  *
125  * It contains callback address to be registered by user application,
126  * the pointer to the parameters for callback, and the event type.
127  */
128 struct rte_eth_dev_callback {
129         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
130         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
131         void *cb_arg;                           /**< Parameter for callback */
132         void *ret_param;                        /**< Return parameter */
133         enum rte_eth_event_type event;          /**< Interrupt event type */
134         uint32_t active;                        /**< Callback is executing */
135 };
136
137 enum {
138         STAT_QMAP_TX = 0,
139         STAT_QMAP_RX
140 };
141
142 uint16_t
143 rte_eth_find_next(uint16_t port_id)
144 {
145         while (port_id < RTE_MAX_ETHPORTS &&
146                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
147                 port_id++;
148
149         if (port_id >= RTE_MAX_ETHPORTS)
150                 return RTE_MAX_ETHPORTS;
151
152         return port_id;
153 }
154
155 static void
156 rte_eth_dev_data_alloc(void)
157 {
158         const unsigned flags = 0;
159         const struct rte_memzone *mz;
160
161         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
162                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
163                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
164                                 rte_socket_id(), flags);
165         } else
166                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
167         if (mz == NULL)
168                 rte_panic("Cannot allocate memzone for ethernet port data\n");
169
170         rte_eth_dev_data = mz->addr;
171         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
172                 memset(rte_eth_dev_data, 0,
173                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
174 }
175
176 struct rte_eth_dev *
177 rte_eth_dev_allocated(const char *name)
178 {
179         unsigned i;
180
181         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
182                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
183                     strcmp(rte_eth_devices[i].data->name, name) == 0)
184                         return &rte_eth_devices[i];
185         }
186         return NULL;
187 }
188
189 static uint16_t
190 rte_eth_dev_find_free_port(void)
191 {
192         unsigned i;
193
194         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
195                 /* Using shared name field to find a free port. */
196                 if (rte_eth_dev_data[i].name[0] == '\0') {
197                         RTE_ASSERT(rte_eth_devices[i].state ==
198                                    RTE_ETH_DEV_UNUSED);
199                         return i;
200                 }
201         }
202         return RTE_MAX_ETHPORTS;
203 }
204
205 static struct rte_eth_dev *
206 eth_dev_get(uint16_t port_id)
207 {
208         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
209
210         eth_dev->data = &rte_eth_dev_data[port_id];
211         eth_dev->state = RTE_ETH_DEV_ATTACHED;
212         TAILQ_INIT(&(eth_dev->link_intr_cbs));
213
214         eth_dev_last_created_port = port_id;
215
216         return eth_dev;
217 }
218
219 struct rte_eth_dev *
220 rte_eth_dev_allocate(const char *name)
221 {
222         uint16_t port_id;
223         struct rte_eth_dev *eth_dev;
224
225         if (rte_eth_dev_data == NULL)
226                 rte_eth_dev_data_alloc();
227
228         port_id = rte_eth_dev_find_free_port();
229         if (port_id == RTE_MAX_ETHPORTS) {
230                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
231                 return NULL;
232         }
233
234         if (rte_eth_dev_allocated(name) != NULL) {
235                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
236                                 name);
237                 return NULL;
238         }
239
240         eth_dev = eth_dev_get(port_id);
241         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
242         eth_dev->data->port_id = port_id;
243         eth_dev->data->mtu = ETHER_MTU;
244
245         return eth_dev;
246 }
247
248 /*
249  * Attach to a port already registered by the primary process, which
250  * makes sure that the same device would have the same port id both
251  * in the primary and secondary process.
252  */
253 struct rte_eth_dev *
254 rte_eth_dev_attach_secondary(const char *name)
255 {
256         uint16_t i;
257         struct rte_eth_dev *eth_dev;
258
259         if (rte_eth_dev_data == NULL)
260                 rte_eth_dev_data_alloc();
261
262         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
263                 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
264                         break;
265         }
266         if (i == RTE_MAX_ETHPORTS) {
267                 RTE_PMD_DEBUG_TRACE(
268                         "device %s is not driven by the primary process\n",
269                         name);
270                 return NULL;
271         }
272
273         eth_dev = eth_dev_get(i);
274         RTE_ASSERT(eth_dev->data->port_id == i);
275
276         return eth_dev;
277 }
278
279 int
280 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
281 {
282         if (eth_dev == NULL)
283                 return -EINVAL;
284
285         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
286         eth_dev->state = RTE_ETH_DEV_UNUSED;
287         return 0;
288 }
289
290 int
291 rte_eth_dev_is_valid_port(uint16_t port_id)
292 {
293         if (port_id >= RTE_MAX_ETHPORTS ||
294             (rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
295              rte_eth_devices[port_id].state != RTE_ETH_DEV_DEFERRED))
296                 return 0;
297         else
298                 return 1;
299 }
300
301 int
302 rte_eth_dev_socket_id(uint16_t port_id)
303 {
304         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
305         return rte_eth_devices[port_id].data->numa_node;
306 }
307
308 void *
309 rte_eth_dev_get_sec_ctx(uint8_t port_id)
310 {
311         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
312         return rte_eth_devices[port_id].security_ctx;
313 }
314
315 uint16_t
316 rte_eth_dev_count(void)
317 {
318         uint16_t p;
319         uint16_t count;
320
321         count = 0;
322
323         RTE_ETH_FOREACH_DEV(p)
324                 count++;
325
326         return count;
327 }
328
329 int
330 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
331 {
332         char *tmp;
333
334         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
335
336         if (name == NULL) {
337                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
338                 return -EINVAL;
339         }
340
341         /* shouldn't check 'rte_eth_devices[i].data',
342          * because it might be overwritten by VDEV PMD */
343         tmp = rte_eth_dev_data[port_id].name;
344         strcpy(name, tmp);
345         return 0;
346 }
347
348 int
349 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
350 {
351         int i;
352
353         if (name == NULL) {
354                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
355                 return -EINVAL;
356         }
357
358         RTE_ETH_FOREACH_DEV(i) {
359                 if (!strncmp(name,
360                         rte_eth_dev_data[i].name, strlen(name))) {
361
362                         *port_id = i;
363
364                         return 0;
365                 }
366         }
367         return -ENODEV;
368 }
369
370 /* attach the new device, then store port_id of the device */
371 int
372 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
373 {
374         int ret = -1;
375         int current = rte_eth_dev_count();
376         char *name = NULL;
377         char *args = NULL;
378
379         if ((devargs == NULL) || (port_id == NULL)) {
380                 ret = -EINVAL;
381                 goto err;
382         }
383
384         /* parse devargs, then retrieve device name and args */
385         if (rte_eal_parse_devargs_str(devargs, &name, &args))
386                 goto err;
387
388         ret = rte_eal_dev_attach(name, args);
389         if (ret < 0)
390                 goto err;
391
392         /* no point looking at the port count if no port exists */
393         if (!rte_eth_dev_count()) {
394                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
395                 ret = -1;
396                 goto err;
397         }
398
399         /* if nothing happened, there is a bug here, since some driver told us
400          * it did attach a device, but did not create a port.
401          */
402         if (current == rte_eth_dev_count()) {
403                 ret = -1;
404                 goto err;
405         }
406
407         *port_id = eth_dev_last_created_port;
408         ret = 0;
409
410 err:
411         free(name);
412         free(args);
413         return ret;
414 }
415
416 /* detach the device, then store the name of the device */
417 int
418 rte_eth_dev_detach(uint16_t port_id, char *name)
419 {
420         uint32_t dev_flags;
421         int ret = -1;
422
423         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
424
425         if (name == NULL) {
426                 ret = -EINVAL;
427                 goto err;
428         }
429
430         dev_flags = rte_eth_devices[port_id].data->dev_flags;
431         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
432                 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
433                         port_id);
434                 ret = -ENOTSUP;
435                 goto err;
436         }
437
438         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
439                  "%s", rte_eth_devices[port_id].data->name);
440
441         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
442         if (ret < 0)
443                 goto err;
444
445         rte_eth_devices[port_id].state = RTE_ETH_DEV_UNUSED;
446         return 0;
447
448 err:
449         return ret;
450 }
451
452 static int
453 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
454 {
455         uint16_t old_nb_queues = dev->data->nb_rx_queues;
456         void **rxq;
457         unsigned i;
458
459         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
460                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
461                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
462                                 RTE_CACHE_LINE_SIZE);
463                 if (dev->data->rx_queues == NULL) {
464                         dev->data->nb_rx_queues = 0;
465                         return -(ENOMEM);
466                 }
467         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
468                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
469
470                 rxq = dev->data->rx_queues;
471
472                 for (i = nb_queues; i < old_nb_queues; i++)
473                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
474                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
475                                 RTE_CACHE_LINE_SIZE);
476                 if (rxq == NULL)
477                         return -(ENOMEM);
478                 if (nb_queues > old_nb_queues) {
479                         uint16_t new_qs = nb_queues - old_nb_queues;
480
481                         memset(rxq + old_nb_queues, 0,
482                                 sizeof(rxq[0]) * new_qs);
483                 }
484
485                 dev->data->rx_queues = rxq;
486
487         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
488                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
489
490                 rxq = dev->data->rx_queues;
491
492                 for (i = nb_queues; i < old_nb_queues; i++)
493                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
494
495                 rte_free(dev->data->rx_queues);
496                 dev->data->rx_queues = NULL;
497         }
498         dev->data->nb_rx_queues = nb_queues;
499         return 0;
500 }
501
502 int
503 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
504 {
505         struct rte_eth_dev *dev;
506
507         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
508
509         dev = &rte_eth_devices[port_id];
510         if (rx_queue_id >= dev->data->nb_rx_queues) {
511                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
512                 return -EINVAL;
513         }
514
515         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
516
517         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
518                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
519                         " already started\n",
520                         rx_queue_id, port_id);
521                 return 0;
522         }
523
524         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
525
526 }
527
528 int
529 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
530 {
531         struct rte_eth_dev *dev;
532
533         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
534
535         dev = &rte_eth_devices[port_id];
536         if (rx_queue_id >= dev->data->nb_rx_queues) {
537                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
538                 return -EINVAL;
539         }
540
541         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
542
543         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
544                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
545                         " already stopped\n",
546                         rx_queue_id, port_id);
547                 return 0;
548         }
549
550         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
551
552 }
553
554 int
555 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
556 {
557         struct rte_eth_dev *dev;
558
559         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
560
561         dev = &rte_eth_devices[port_id];
562         if (tx_queue_id >= dev->data->nb_tx_queues) {
563                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
564                 return -EINVAL;
565         }
566
567         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
568
569         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
570                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
571                         " already started\n",
572                         tx_queue_id, port_id);
573                 return 0;
574         }
575
576         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
577
578 }
579
580 int
581 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
582 {
583         struct rte_eth_dev *dev;
584
585         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
586
587         dev = &rte_eth_devices[port_id];
588         if (tx_queue_id >= dev->data->nb_tx_queues) {
589                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
590                 return -EINVAL;
591         }
592
593         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
594
595         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
596                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
597                         " already stopped\n",
598                         tx_queue_id, port_id);
599                 return 0;
600         }
601
602         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
603
604 }
605
606 static int
607 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
608 {
609         uint16_t old_nb_queues = dev->data->nb_tx_queues;
610         void **txq;
611         unsigned i;
612
613         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
614                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
615                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
616                                                    RTE_CACHE_LINE_SIZE);
617                 if (dev->data->tx_queues == NULL) {
618                         dev->data->nb_tx_queues = 0;
619                         return -(ENOMEM);
620                 }
621         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
622                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
623
624                 txq = dev->data->tx_queues;
625
626                 for (i = nb_queues; i < old_nb_queues; i++)
627                         (*dev->dev_ops->tx_queue_release)(txq[i]);
628                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
629                                   RTE_CACHE_LINE_SIZE);
630                 if (txq == NULL)
631                         return -ENOMEM;
632                 if (nb_queues > old_nb_queues) {
633                         uint16_t new_qs = nb_queues - old_nb_queues;
634
635                         memset(txq + old_nb_queues, 0,
636                                sizeof(txq[0]) * new_qs);
637                 }
638
639                 dev->data->tx_queues = txq;
640
641         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
642                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
643
644                 txq = dev->data->tx_queues;
645
646                 for (i = nb_queues; i < old_nb_queues; i++)
647                         (*dev->dev_ops->tx_queue_release)(txq[i]);
648
649                 rte_free(dev->data->tx_queues);
650                 dev->data->tx_queues = NULL;
651         }
652         dev->data->nb_tx_queues = nb_queues;
653         return 0;
654 }
655
656 uint32_t
657 rte_eth_speed_bitflag(uint32_t speed, int duplex)
658 {
659         switch (speed) {
660         case ETH_SPEED_NUM_10M:
661                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
662         case ETH_SPEED_NUM_100M:
663                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
664         case ETH_SPEED_NUM_1G:
665                 return ETH_LINK_SPEED_1G;
666         case ETH_SPEED_NUM_2_5G:
667                 return ETH_LINK_SPEED_2_5G;
668         case ETH_SPEED_NUM_5G:
669                 return ETH_LINK_SPEED_5G;
670         case ETH_SPEED_NUM_10G:
671                 return ETH_LINK_SPEED_10G;
672         case ETH_SPEED_NUM_20G:
673                 return ETH_LINK_SPEED_20G;
674         case ETH_SPEED_NUM_25G:
675                 return ETH_LINK_SPEED_25G;
676         case ETH_SPEED_NUM_40G:
677                 return ETH_LINK_SPEED_40G;
678         case ETH_SPEED_NUM_50G:
679                 return ETH_LINK_SPEED_50G;
680         case ETH_SPEED_NUM_56G:
681                 return ETH_LINK_SPEED_56G;
682         case ETH_SPEED_NUM_100G:
683                 return ETH_LINK_SPEED_100G;
684         default:
685                 return 0;
686         }
687 }
688
689 /**
690  * A conversion function from rxmode bitfield API.
691  */
692 static void
693 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
694                                     uint64_t *rx_offloads)
695 {
696         uint64_t offloads = 0;
697
698         if (rxmode->header_split == 1)
699                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
700         if (rxmode->hw_ip_checksum == 1)
701                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
702         if (rxmode->hw_vlan_filter == 1)
703                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
704         if (rxmode->hw_vlan_strip == 1)
705                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
706         if (rxmode->hw_vlan_extend == 1)
707                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
708         if (rxmode->jumbo_frame == 1)
709                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
710         if (rxmode->hw_strip_crc == 1)
711                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
712         if (rxmode->enable_scatter == 1)
713                 offloads |= DEV_RX_OFFLOAD_SCATTER;
714         if (rxmode->enable_lro == 1)
715                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
716         if (rxmode->hw_timestamp == 1)
717                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
718         if (rxmode->security == 1)
719                 offloads |= DEV_RX_OFFLOAD_SECURITY;
720
721         *rx_offloads = offloads;
722 }
723
724 /**
725  * A conversion function from rxmode offloads API.
726  */
727 static void
728 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
729                             struct rte_eth_rxmode *rxmode)
730 {
731
732         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
733                 rxmode->header_split = 1;
734         else
735                 rxmode->header_split = 0;
736         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
737                 rxmode->hw_ip_checksum = 1;
738         else
739                 rxmode->hw_ip_checksum = 0;
740         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
741                 rxmode->hw_vlan_filter = 1;
742         else
743                 rxmode->hw_vlan_filter = 0;
744         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
745                 rxmode->hw_vlan_strip = 1;
746         else
747                 rxmode->hw_vlan_strip = 0;
748         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
749                 rxmode->hw_vlan_extend = 1;
750         else
751                 rxmode->hw_vlan_extend = 0;
752         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
753                 rxmode->jumbo_frame = 1;
754         else
755                 rxmode->jumbo_frame = 0;
756         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
757                 rxmode->hw_strip_crc = 1;
758         else
759                 rxmode->hw_strip_crc = 0;
760         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
761                 rxmode->enable_scatter = 1;
762         else
763                 rxmode->enable_scatter = 0;
764         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
765                 rxmode->enable_lro = 1;
766         else
767                 rxmode->enable_lro = 0;
768         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
769                 rxmode->hw_timestamp = 1;
770         else
771                 rxmode->hw_timestamp = 0;
772         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
773                 rxmode->security = 1;
774         else
775                 rxmode->security = 0;
776 }
777
778 int
779 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
780                       const struct rte_eth_conf *dev_conf)
781 {
782         struct rte_eth_dev *dev;
783         struct rte_eth_dev_info dev_info;
784         struct rte_eth_conf local_conf = *dev_conf;
785         int diag;
786
787         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
788
789         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
790                 RTE_PMD_DEBUG_TRACE(
791                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
792                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
793                 return -EINVAL;
794         }
795
796         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
797                 RTE_PMD_DEBUG_TRACE(
798                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
799                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
800                 return -EINVAL;
801         }
802
803         dev = &rte_eth_devices[port_id];
804
805         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
806         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
807
808         if (dev->data->dev_started) {
809                 RTE_PMD_DEBUG_TRACE(
810                     "port %d must be stopped to allow configuration\n", port_id);
811                 return -EBUSY;
812         }
813
814         /*
815          * Convert between the offloads API to enable PMDs to support
816          * only one of them.
817          */
818         if ((dev_conf->rxmode.ignore_offload_bitfield == 0)) {
819                 rte_eth_convert_rx_offload_bitfield(
820                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
821         } else {
822                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
823                                             &local_conf.rxmode);
824         }
825
826         /* Copy the dev_conf parameter into the dev structure */
827         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
828
829         /*
830          * Check that the numbers of RX and TX queues are not greater
831          * than the maximum number of RX and TX queues supported by the
832          * configured device.
833          */
834         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
835
836         if (nb_rx_q == 0 && nb_tx_q == 0) {
837                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
838                 return -EINVAL;
839         }
840
841         if (nb_rx_q > dev_info.max_rx_queues) {
842                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
843                                 port_id, nb_rx_q, dev_info.max_rx_queues);
844                 return -EINVAL;
845         }
846
847         if (nb_tx_q > dev_info.max_tx_queues) {
848                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
849                                 port_id, nb_tx_q, dev_info.max_tx_queues);
850                 return -EINVAL;
851         }
852
853         /* Check that the device supports requested interrupts */
854         if ((dev_conf->intr_conf.lsc == 1) &&
855                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
856                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
857                                         dev->device->driver->name);
858                         return -EINVAL;
859         }
860         if ((dev_conf->intr_conf.rmv == 1) &&
861             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
862                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
863                                     dev->device->driver->name);
864                 return -EINVAL;
865         }
866
867         /*
868          * If jumbo frames are enabled, check that the maximum RX packet
869          * length is supported by the configured device.
870          */
871         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
872                 if (dev_conf->rxmode.max_rx_pkt_len >
873                     dev_info.max_rx_pktlen) {
874                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
875                                 " > max valid value %u\n",
876                                 port_id,
877                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
878                                 (unsigned)dev_info.max_rx_pktlen);
879                         return -EINVAL;
880                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
881                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
882                                 " < min valid value %u\n",
883                                 port_id,
884                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
885                                 (unsigned)ETHER_MIN_LEN);
886                         return -EINVAL;
887                 }
888         } else {
889                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
890                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
891                         /* Use default value */
892                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
893                                                         ETHER_MAX_LEN;
894         }
895
896         /*
897          * Setup new number of RX/TX queues and reconfigure device.
898          */
899         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
900         if (diag != 0) {
901                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
902                                 port_id, diag);
903                 return diag;
904         }
905
906         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
907         if (diag != 0) {
908                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
909                                 port_id, diag);
910                 rte_eth_dev_rx_queue_config(dev, 0);
911                 return diag;
912         }
913
914         diag = (*dev->dev_ops->dev_configure)(dev);
915         if (diag != 0) {
916                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
917                                 port_id, diag);
918                 rte_eth_dev_rx_queue_config(dev, 0);
919                 rte_eth_dev_tx_queue_config(dev, 0);
920                 return diag;
921         }
922
923         /* Initialize Rx profiling if enabled at compilation time. */
924         diag = __rte_eth_profile_rx_init(port_id, dev);
925         if (diag != 0) {
926                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
927                                 port_id, diag);
928                 rte_eth_dev_rx_queue_config(dev, 0);
929                 rte_eth_dev_tx_queue_config(dev, 0);
930                 return diag;
931         }
932
933         return 0;
934 }
935
936 void
937 _rte_eth_dev_reset(struct rte_eth_dev *dev)
938 {
939         if (dev->data->dev_started) {
940                 RTE_PMD_DEBUG_TRACE(
941                         "port %d must be stopped to allow reset\n",
942                         dev->data->port_id);
943                 return;
944         }
945
946         rte_eth_dev_rx_queue_config(dev, 0);
947         rte_eth_dev_tx_queue_config(dev, 0);
948
949         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
950 }
951
952 static void
953 rte_eth_dev_config_restore(uint16_t port_id)
954 {
955         struct rte_eth_dev *dev;
956         struct rte_eth_dev_info dev_info;
957         struct ether_addr *addr;
958         uint16_t i;
959         uint32_t pool = 0;
960         uint64_t pool_mask;
961
962         dev = &rte_eth_devices[port_id];
963
964         rte_eth_dev_info_get(port_id, &dev_info);
965
966         /* replay MAC address configuration including default MAC */
967         addr = &dev->data->mac_addrs[0];
968         if (*dev->dev_ops->mac_addr_set != NULL)
969                 (*dev->dev_ops->mac_addr_set)(dev, addr);
970         else if (*dev->dev_ops->mac_addr_add != NULL)
971                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
972
973         if (*dev->dev_ops->mac_addr_add != NULL) {
974                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
975                         addr = &dev->data->mac_addrs[i];
976
977                         /* skip zero address */
978                         if (is_zero_ether_addr(addr))
979                                 continue;
980
981                         pool = 0;
982                         pool_mask = dev->data->mac_pool_sel[i];
983
984                         do {
985                                 if (pool_mask & 1ULL)
986                                         (*dev->dev_ops->mac_addr_add)(dev,
987                                                 addr, i, pool);
988                                 pool_mask >>= 1;
989                                 pool++;
990                         } while (pool_mask);
991                 }
992         }
993
994         /* replay promiscuous configuration */
995         if (rte_eth_promiscuous_get(port_id) == 1)
996                 rte_eth_promiscuous_enable(port_id);
997         else if (rte_eth_promiscuous_get(port_id) == 0)
998                 rte_eth_promiscuous_disable(port_id);
999
1000         /* replay all multicast configuration */
1001         if (rte_eth_allmulticast_get(port_id) == 1)
1002                 rte_eth_allmulticast_enable(port_id);
1003         else if (rte_eth_allmulticast_get(port_id) == 0)
1004                 rte_eth_allmulticast_disable(port_id);
1005 }
1006
1007 int
1008 rte_eth_dev_start(uint16_t port_id)
1009 {
1010         struct rte_eth_dev *dev;
1011         int diag;
1012
1013         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1014
1015         dev = &rte_eth_devices[port_id];
1016
1017         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1018
1019         if (dev->data->dev_started != 0) {
1020                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1021                         " already started\n",
1022                         port_id);
1023                 return 0;
1024         }
1025
1026         diag = (*dev->dev_ops->dev_start)(dev);
1027         if (diag == 0)
1028                 dev->data->dev_started = 1;
1029         else
1030                 return diag;
1031
1032         rte_eth_dev_config_restore(port_id);
1033
1034         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1035                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1036                 (*dev->dev_ops->link_update)(dev, 0);
1037         }
1038         return 0;
1039 }
1040
1041 void
1042 rte_eth_dev_stop(uint16_t port_id)
1043 {
1044         struct rte_eth_dev *dev;
1045
1046         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1047         dev = &rte_eth_devices[port_id];
1048
1049         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1050
1051         if (dev->data->dev_started == 0) {
1052                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1053                         " already stopped\n",
1054                         port_id);
1055                 return;
1056         }
1057
1058         dev->data->dev_started = 0;
1059         (*dev->dev_ops->dev_stop)(dev);
1060 }
1061
1062 int
1063 rte_eth_dev_set_link_up(uint16_t port_id)
1064 {
1065         struct rte_eth_dev *dev;
1066
1067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1068
1069         dev = &rte_eth_devices[port_id];
1070
1071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1072         return (*dev->dev_ops->dev_set_link_up)(dev);
1073 }
1074
1075 int
1076 rte_eth_dev_set_link_down(uint16_t port_id)
1077 {
1078         struct rte_eth_dev *dev;
1079
1080         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1081
1082         dev = &rte_eth_devices[port_id];
1083
1084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1085         return (*dev->dev_ops->dev_set_link_down)(dev);
1086 }
1087
1088 void
1089 rte_eth_dev_close(uint16_t port_id)
1090 {
1091         struct rte_eth_dev *dev;
1092
1093         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1094         dev = &rte_eth_devices[port_id];
1095
1096         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1097         dev->data->dev_started = 0;
1098         (*dev->dev_ops->dev_close)(dev);
1099
1100         dev->data->nb_rx_queues = 0;
1101         rte_free(dev->data->rx_queues);
1102         dev->data->rx_queues = NULL;
1103         dev->data->nb_tx_queues = 0;
1104         rte_free(dev->data->tx_queues);
1105         dev->data->tx_queues = NULL;
1106 }
1107
1108 int
1109 rte_eth_dev_reset(uint16_t port_id)
1110 {
1111         struct rte_eth_dev *dev;
1112         int ret;
1113
1114         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1115         dev = &rte_eth_devices[port_id];
1116
1117         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1118
1119         rte_eth_dev_stop(port_id);
1120         ret = dev->dev_ops->dev_reset(dev);
1121
1122         return ret;
1123 }
1124
1125 int
1126 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1127                        uint16_t nb_rx_desc, unsigned int socket_id,
1128                        const struct rte_eth_rxconf *rx_conf,
1129                        struct rte_mempool *mp)
1130 {
1131         int ret;
1132         uint32_t mbp_buf_size;
1133         struct rte_eth_dev *dev;
1134         struct rte_eth_dev_info dev_info;
1135         struct rte_eth_rxconf local_conf;
1136         void **rxq;
1137
1138         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1139
1140         dev = &rte_eth_devices[port_id];
1141         if (rx_queue_id >= dev->data->nb_rx_queues) {
1142                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1143                 return -EINVAL;
1144         }
1145
1146         if (dev->data->dev_started) {
1147                 RTE_PMD_DEBUG_TRACE(
1148                     "port %d must be stopped to allow configuration\n", port_id);
1149                 return -EBUSY;
1150         }
1151
1152         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1153         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1154
1155         /*
1156          * Check the size of the mbuf data buffer.
1157          * This value must be provided in the private data of the memory pool.
1158          * First check that the memory pool has a valid private data.
1159          */
1160         rte_eth_dev_info_get(port_id, &dev_info);
1161         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1162                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1163                                 mp->name, (int) mp->private_data_size,
1164                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1165                 return -ENOSPC;
1166         }
1167         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1168
1169         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1170                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1171                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1172                                 "=%d)\n",
1173                                 mp->name,
1174                                 (int)mbp_buf_size,
1175                                 (int)(RTE_PKTMBUF_HEADROOM +
1176                                       dev_info.min_rx_bufsize),
1177                                 (int)RTE_PKTMBUF_HEADROOM,
1178                                 (int)dev_info.min_rx_bufsize);
1179                 return -EINVAL;
1180         }
1181
1182         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1183                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1184                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1185
1186                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1187                         "should be: <= %hu, = %hu, and a product of %hu\n",
1188                         nb_rx_desc,
1189                         dev_info.rx_desc_lim.nb_max,
1190                         dev_info.rx_desc_lim.nb_min,
1191                         dev_info.rx_desc_lim.nb_align);
1192                 return -EINVAL;
1193         }
1194
1195         rxq = dev->data->rx_queues;
1196         if (rxq[rx_queue_id]) {
1197                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1198                                         -ENOTSUP);
1199                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1200                 rxq[rx_queue_id] = NULL;
1201         }
1202
1203         if (rx_conf == NULL)
1204                 rx_conf = &dev_info.default_rxconf;
1205
1206         local_conf = *rx_conf;
1207         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1208                 /**
1209                  * Reflect port offloads to queue offloads in order for
1210                  * offloads to not be discarded.
1211                  */
1212                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1213                                                     &local_conf.offloads);
1214         }
1215
1216         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1217                                               socket_id, &local_conf, mp);
1218         if (!ret) {
1219                 if (!dev->data->min_rx_buf_size ||
1220                     dev->data->min_rx_buf_size > mbp_buf_size)
1221                         dev->data->min_rx_buf_size = mbp_buf_size;
1222         }
1223
1224         return ret;
1225 }
1226
1227 /**
1228  * A conversion function from txq_flags API.
1229  */
1230 static void
1231 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1232 {
1233         uint64_t offloads = 0;
1234
1235         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1236                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1237         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1238                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1239         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1240                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1241         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1242                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1243         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1244                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1245         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1246             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1247                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1248
1249         *tx_offloads = offloads;
1250 }
1251
1252 /**
1253  * A conversion function from offloads API.
1254  */
1255 static void
1256 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1257 {
1258         uint32_t flags = 0;
1259
1260         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1261                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1262         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1263                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1264         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1265                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1266         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1267                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1268         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1269                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1270         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1271                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1272
1273         *txq_flags = flags;
1274 }
1275
1276 int
1277 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1278                        uint16_t nb_tx_desc, unsigned int socket_id,
1279                        const struct rte_eth_txconf *tx_conf)
1280 {
1281         struct rte_eth_dev *dev;
1282         struct rte_eth_dev_info dev_info;
1283         struct rte_eth_txconf local_conf;
1284         void **txq;
1285
1286         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1287
1288         dev = &rte_eth_devices[port_id];
1289         if (tx_queue_id >= dev->data->nb_tx_queues) {
1290                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1291                 return -EINVAL;
1292         }
1293
1294         if (dev->data->dev_started) {
1295                 RTE_PMD_DEBUG_TRACE(
1296                     "port %d must be stopped to allow configuration\n", port_id);
1297                 return -EBUSY;
1298         }
1299
1300         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1301         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1302
1303         rte_eth_dev_info_get(port_id, &dev_info);
1304
1305         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1306             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1307             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1308                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1309                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1310                                 nb_tx_desc,
1311                                 dev_info.tx_desc_lim.nb_max,
1312                                 dev_info.tx_desc_lim.nb_min,
1313                                 dev_info.tx_desc_lim.nb_align);
1314                 return -EINVAL;
1315         }
1316
1317         txq = dev->data->tx_queues;
1318         if (txq[tx_queue_id]) {
1319                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1320                                         -ENOTSUP);
1321                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1322                 txq[tx_queue_id] = NULL;
1323         }
1324
1325         if (tx_conf == NULL)
1326                 tx_conf = &dev_info.default_txconf;
1327
1328         /*
1329          * Convert between the offloads API to enable PMDs to support
1330          * only one of them.
1331          */
1332         local_conf = *tx_conf;
1333         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1334                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1335                                              &local_conf.txq_flags);
1336                 /* Keep the ignore flag. */
1337                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1338         } else {
1339                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1340                                           &local_conf.offloads);
1341         }
1342
1343         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1344                                                socket_id, &local_conf);
1345 }
1346
1347 void
1348 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1349                 void *userdata __rte_unused)
1350 {
1351         unsigned i;
1352
1353         for (i = 0; i < unsent; i++)
1354                 rte_pktmbuf_free(pkts[i]);
1355 }
1356
1357 void
1358 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1359                 void *userdata)
1360 {
1361         uint64_t *count = userdata;
1362         unsigned i;
1363
1364         for (i = 0; i < unsent; i++)
1365                 rte_pktmbuf_free(pkts[i]);
1366
1367         *count += unsent;
1368 }
1369
1370 int
1371 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1372                 buffer_tx_error_fn cbfn, void *userdata)
1373 {
1374         buffer->error_callback = cbfn;
1375         buffer->error_userdata = userdata;
1376         return 0;
1377 }
1378
1379 int
1380 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1381 {
1382         int ret = 0;
1383
1384         if (buffer == NULL)
1385                 return -EINVAL;
1386
1387         buffer->size = size;
1388         if (buffer->error_callback == NULL) {
1389                 ret = rte_eth_tx_buffer_set_err_callback(
1390                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1391         }
1392
1393         return ret;
1394 }
1395
1396 int
1397 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1398 {
1399         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1400
1401         /* Validate Input Data. Bail if not valid or not supported. */
1402         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1403         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1404
1405         /* Call driver to free pending mbufs. */
1406         return (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1407                         free_cnt);
1408 }
1409
1410 void
1411 rte_eth_promiscuous_enable(uint16_t port_id)
1412 {
1413         struct rte_eth_dev *dev;
1414
1415         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1416         dev = &rte_eth_devices[port_id];
1417
1418         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1419         (*dev->dev_ops->promiscuous_enable)(dev);
1420         dev->data->promiscuous = 1;
1421 }
1422
1423 void
1424 rte_eth_promiscuous_disable(uint16_t port_id)
1425 {
1426         struct rte_eth_dev *dev;
1427
1428         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1429         dev = &rte_eth_devices[port_id];
1430
1431         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1432         dev->data->promiscuous = 0;
1433         (*dev->dev_ops->promiscuous_disable)(dev);
1434 }
1435
1436 int
1437 rte_eth_promiscuous_get(uint16_t port_id)
1438 {
1439         struct rte_eth_dev *dev;
1440
1441         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1442
1443         dev = &rte_eth_devices[port_id];
1444         return dev->data->promiscuous;
1445 }
1446
1447 void
1448 rte_eth_allmulticast_enable(uint16_t port_id)
1449 {
1450         struct rte_eth_dev *dev;
1451
1452         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1453         dev = &rte_eth_devices[port_id];
1454
1455         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1456         (*dev->dev_ops->allmulticast_enable)(dev);
1457         dev->data->all_multicast = 1;
1458 }
1459
1460 void
1461 rte_eth_allmulticast_disable(uint16_t port_id)
1462 {
1463         struct rte_eth_dev *dev;
1464
1465         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1466         dev = &rte_eth_devices[port_id];
1467
1468         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1469         dev->data->all_multicast = 0;
1470         (*dev->dev_ops->allmulticast_disable)(dev);
1471 }
1472
1473 int
1474 rte_eth_allmulticast_get(uint16_t port_id)
1475 {
1476         struct rte_eth_dev *dev;
1477
1478         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1479
1480         dev = &rte_eth_devices[port_id];
1481         return dev->data->all_multicast;
1482 }
1483
1484 static inline int
1485 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1486                                 struct rte_eth_link *link)
1487 {
1488         struct rte_eth_link *dst = link;
1489         struct rte_eth_link *src = &(dev->data->dev_link);
1490
1491         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1492                                         *(uint64_t *)src) == 0)
1493                 return -1;
1494
1495         return 0;
1496 }
1497
1498 void
1499 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1500 {
1501         struct rte_eth_dev *dev;
1502
1503         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1504         dev = &rte_eth_devices[port_id];
1505
1506         if (dev->data->dev_conf.intr_conf.lsc != 0)
1507                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1508         else {
1509                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1510                 (*dev->dev_ops->link_update)(dev, 1);
1511                 *eth_link = dev->data->dev_link;
1512         }
1513 }
1514
1515 void
1516 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1517 {
1518         struct rte_eth_dev *dev;
1519
1520         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1521         dev = &rte_eth_devices[port_id];
1522
1523         if (dev->data->dev_conf.intr_conf.lsc != 0)
1524                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1525         else {
1526                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1527                 (*dev->dev_ops->link_update)(dev, 0);
1528                 *eth_link = dev->data->dev_link;
1529         }
1530 }
1531
1532 int
1533 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1534 {
1535         struct rte_eth_dev *dev;
1536
1537         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1538
1539         dev = &rte_eth_devices[port_id];
1540         memset(stats, 0, sizeof(*stats));
1541
1542         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1543         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1544         return (*dev->dev_ops->stats_get)(dev, stats);
1545 }
1546
1547 int
1548 rte_eth_stats_reset(uint16_t port_id)
1549 {
1550         struct rte_eth_dev *dev;
1551
1552         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1553         dev = &rte_eth_devices[port_id];
1554
1555         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1556         (*dev->dev_ops->stats_reset)(dev);
1557         dev->data->rx_mbuf_alloc_failed = 0;
1558
1559         return 0;
1560 }
1561
1562 static inline int
1563 get_xstats_basic_count(struct rte_eth_dev *dev)
1564 {
1565         uint16_t nb_rxqs, nb_txqs;
1566         int count;
1567
1568         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1569         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1570
1571         count = RTE_NB_STATS;
1572         count += nb_rxqs * RTE_NB_RXQ_STATS;
1573         count += nb_txqs * RTE_NB_TXQ_STATS;
1574
1575         return count;
1576 }
1577
1578 static int
1579 get_xstats_count(uint16_t port_id)
1580 {
1581         struct rte_eth_dev *dev;
1582         int count;
1583
1584         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1585         dev = &rte_eth_devices[port_id];
1586         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1587                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1588                                 NULL, 0);
1589                 if (count < 0)
1590                         return count;
1591         }
1592         if (dev->dev_ops->xstats_get_names != NULL) {
1593                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1594                 if (count < 0)
1595                         return count;
1596         } else
1597                 count = 0;
1598
1599
1600         count += get_xstats_basic_count(dev);
1601
1602         return count;
1603 }
1604
1605 int
1606 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1607                 uint64_t *id)
1608 {
1609         int cnt_xstats, idx_xstat;
1610
1611         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1612
1613         if (!id) {
1614                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1615                 return -ENOMEM;
1616         }
1617
1618         if (!xstat_name) {
1619                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1620                 return -ENOMEM;
1621         }
1622
1623         /* Get count */
1624         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1625         if (cnt_xstats  < 0) {
1626                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1627                 return -ENODEV;
1628         }
1629
1630         /* Get id-name lookup table */
1631         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1632
1633         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1634                         port_id, xstats_names, cnt_xstats, NULL)) {
1635                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1636                 return -1;
1637         }
1638
1639         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1640                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1641                         *id = idx_xstat;
1642                         return 0;
1643                 };
1644         }
1645
1646         return -EINVAL;
1647 }
1648
1649 /* retrieve ethdev extended statistics names */
1650 int
1651 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1652         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1653         uint64_t *ids)
1654 {
1655         struct rte_eth_xstat_name *xstats_names_copy;
1656         unsigned int no_basic_stat_requested = 1;
1657         unsigned int expected_entries;
1658         struct rte_eth_dev *dev;
1659         unsigned int i;
1660         int ret;
1661
1662         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1663         dev = &rte_eth_devices[port_id];
1664
1665         ret = get_xstats_count(port_id);
1666         if (ret < 0)
1667                 return ret;
1668         expected_entries = (unsigned int)ret;
1669
1670         /* Return max number of stats if no ids given */
1671         if (!ids) {
1672                 if (!xstats_names)
1673                         return expected_entries;
1674                 else if (xstats_names && size < expected_entries)
1675                         return expected_entries;
1676         }
1677
1678         if (ids && !xstats_names)
1679                 return -EINVAL;
1680
1681         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
1682                 unsigned int basic_count = get_xstats_basic_count(dev);
1683                 uint64_t ids_copy[size];
1684
1685                 for (i = 0; i < size; i++) {
1686                         if (ids[i] < basic_count) {
1687                                 no_basic_stat_requested = 0;
1688                                 break;
1689                         }
1690
1691                         /*
1692                          * Convert ids to xstats ids that PMD knows.
1693                          * ids known by user are basic + extended stats.
1694                          */
1695                         ids_copy[i] = ids[i] - basic_count;
1696                 }
1697
1698                 if (no_basic_stat_requested)
1699                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
1700                                         xstats_names, ids_copy, size);
1701         }
1702
1703         /* Retrieve all stats */
1704         if (!ids) {
1705                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
1706                                 expected_entries);
1707                 if (num_stats < 0 || num_stats > (int)expected_entries)
1708                         return num_stats;
1709                 else
1710                         return expected_entries;
1711         }
1712
1713         xstats_names_copy = calloc(expected_entries,
1714                 sizeof(struct rte_eth_xstat_name));
1715
1716         if (!xstats_names_copy) {
1717                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
1718                 return -ENOMEM;
1719         }
1720
1721         /* Fill xstats_names_copy structure */
1722         rte_eth_xstats_get_names(port_id, xstats_names_copy, expected_entries);
1723
1724         /* Filter stats */
1725         for (i = 0; i < size; i++) {
1726                 if (ids[i] >= expected_entries) {
1727                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1728                         free(xstats_names_copy);
1729                         return -1;
1730                 }
1731                 xstats_names[i] = xstats_names_copy[ids[i]];
1732         }
1733
1734         free(xstats_names_copy);
1735         return size;
1736 }
1737
1738 int
1739 rte_eth_xstats_get_names(uint16_t port_id,
1740         struct rte_eth_xstat_name *xstats_names,
1741         unsigned int size)
1742 {
1743         struct rte_eth_dev *dev;
1744         int cnt_used_entries;
1745         int cnt_expected_entries;
1746         int cnt_driver_entries;
1747         uint32_t idx, id_queue;
1748         uint16_t num_q;
1749
1750         cnt_expected_entries = get_xstats_count(port_id);
1751         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1752                         (int)size < cnt_expected_entries)
1753                 return cnt_expected_entries;
1754
1755         /* port_id checked in get_xstats_count() */
1756         dev = &rte_eth_devices[port_id];
1757         cnt_used_entries = 0;
1758
1759         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1760                 snprintf(xstats_names[cnt_used_entries].name,
1761                         sizeof(xstats_names[0].name),
1762                         "%s", rte_stats_strings[idx].name);
1763                 cnt_used_entries++;
1764         }
1765         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1766         for (id_queue = 0; id_queue < num_q; id_queue++) {
1767                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1768                         snprintf(xstats_names[cnt_used_entries].name,
1769                                 sizeof(xstats_names[0].name),
1770                                 "rx_q%u%s",
1771                                 id_queue, rte_rxq_stats_strings[idx].name);
1772                         cnt_used_entries++;
1773                 }
1774
1775         }
1776         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1777         for (id_queue = 0; id_queue < num_q; id_queue++) {
1778                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1779                         snprintf(xstats_names[cnt_used_entries].name,
1780                                 sizeof(xstats_names[0].name),
1781                                 "tx_q%u%s",
1782                                 id_queue, rte_txq_stats_strings[idx].name);
1783                         cnt_used_entries++;
1784                 }
1785         }
1786
1787         if (dev->dev_ops->xstats_get_names != NULL) {
1788                 /* If there are any driver-specific xstats, append them
1789                  * to end of list.
1790                  */
1791                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1792                         dev,
1793                         xstats_names + cnt_used_entries,
1794                         size - cnt_used_entries);
1795                 if (cnt_driver_entries < 0)
1796                         return cnt_driver_entries;
1797                 cnt_used_entries += cnt_driver_entries;
1798         }
1799
1800         return cnt_used_entries;
1801 }
1802
1803 /* retrieve ethdev extended statistics */
1804 int
1805 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
1806                          uint64_t *values, unsigned int size)
1807 {
1808         unsigned int no_basic_stat_requested = 1;
1809         unsigned int num_xstats_filled;
1810         uint16_t expected_entries;
1811         struct rte_eth_dev *dev;
1812         unsigned int i;
1813         int ret;
1814
1815         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1816         expected_entries = get_xstats_count(port_id);
1817         struct rte_eth_xstat xstats[expected_entries];
1818         dev = &rte_eth_devices[port_id];
1819
1820         /* Return max number of stats if no ids given */
1821         if (!ids) {
1822                 if (!values)
1823                         return expected_entries;
1824                 else if (values && size < expected_entries)
1825                         return expected_entries;
1826         }
1827
1828         if (ids && !values)
1829                 return -EINVAL;
1830
1831         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
1832                 unsigned int basic_count = get_xstats_basic_count(dev);
1833                 uint64_t ids_copy[size];
1834
1835                 for (i = 0; i < size; i++) {
1836                         if (ids[i] < basic_count) {
1837                                 no_basic_stat_requested = 0;
1838                                 break;
1839                         }
1840
1841                         /*
1842                          * Convert ids to xstats ids that PMD knows.
1843                          * ids known by user are basic + extended stats.
1844                          */
1845                         ids_copy[i] = ids[i] - basic_count;
1846                 }
1847
1848                 if (no_basic_stat_requested)
1849                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
1850                                         values, size);
1851         }
1852
1853         /* Fill the xstats structure */
1854         ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
1855         if (ret < 0)
1856                 return ret;
1857         num_xstats_filled = (unsigned int)ret;
1858
1859         /* Return all stats */
1860         if (!ids) {
1861                 for (i = 0; i < num_xstats_filled; i++)
1862                         values[i] = xstats[i].value;
1863                 return expected_entries;
1864         }
1865
1866         /* Filter stats */
1867         for (i = 0; i < size; i++) {
1868                 if (ids[i] >= expected_entries) {
1869                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
1870                         return -1;
1871                 }
1872                 values[i] = xstats[ids[i]].value;
1873         }
1874         return size;
1875 }
1876
1877 int
1878 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
1879         unsigned int n)
1880 {
1881         struct rte_eth_stats eth_stats;
1882         struct rte_eth_dev *dev;
1883         unsigned int count = 0, i, q;
1884         signed int xcount = 0;
1885         uint64_t val, *stats_ptr;
1886         uint16_t nb_rxqs, nb_txqs;
1887
1888         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1889
1890         dev = &rte_eth_devices[port_id];
1891
1892         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1893         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1894
1895         /* Return generic statistics */
1896         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1897                 (nb_txqs * RTE_NB_TXQ_STATS);
1898
1899         /* implemented by the driver */
1900         if (dev->dev_ops->xstats_get != NULL) {
1901                 /* Retrieve the xstats from the driver at the end of the
1902                  * xstats struct.
1903                  */
1904                 xcount = (*dev->dev_ops->xstats_get)(dev,
1905                                      xstats ? xstats + count : NULL,
1906                                      (n > count) ? n - count : 0);
1907
1908                 if (xcount < 0)
1909                         return xcount;
1910         }
1911
1912         if (n < count + xcount || xstats == NULL)
1913                 return count + xcount;
1914
1915         /* now fill the xstats structure */
1916         count = 0;
1917         rte_eth_stats_get(port_id, &eth_stats);
1918
1919         /* global stats */
1920         for (i = 0; i < RTE_NB_STATS; i++) {
1921                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1922                                         rte_stats_strings[i].offset);
1923                 val = *stats_ptr;
1924                 xstats[count++].value = val;
1925         }
1926
1927         /* per-rxq stats */
1928         for (q = 0; q < nb_rxqs; q++) {
1929                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1930                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1931                                         rte_rxq_stats_strings[i].offset +
1932                                         q * sizeof(uint64_t));
1933                         val = *stats_ptr;
1934                         xstats[count++].value = val;
1935                 }
1936         }
1937
1938         /* per-txq stats */
1939         for (q = 0; q < nb_txqs; q++) {
1940                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1941                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1942                                         rte_txq_stats_strings[i].offset +
1943                                         q * sizeof(uint64_t));
1944                         val = *stats_ptr;
1945                         xstats[count++].value = val;
1946                 }
1947         }
1948
1949         for (i = 0; i < count; i++)
1950                 xstats[i].id = i;
1951         /* add an offset to driver-specific stats */
1952         for ( ; i < count + xcount; i++)
1953                 xstats[i].id += count;
1954
1955         return count + xcount;
1956 }
1957
1958 /* reset ethdev extended statistics */
1959 void
1960 rte_eth_xstats_reset(uint16_t port_id)
1961 {
1962         struct rte_eth_dev *dev;
1963
1964         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1965         dev = &rte_eth_devices[port_id];
1966
1967         /* implemented by the driver */
1968         if (dev->dev_ops->xstats_reset != NULL) {
1969                 (*dev->dev_ops->xstats_reset)(dev);
1970                 return;
1971         }
1972
1973         /* fallback to default */
1974         rte_eth_stats_reset(port_id);
1975 }
1976
1977 static int
1978 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
1979                 uint8_t is_rx)
1980 {
1981         struct rte_eth_dev *dev;
1982
1983         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1984
1985         dev = &rte_eth_devices[port_id];
1986
1987         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1988         return (*dev->dev_ops->queue_stats_mapping_set)
1989                         (dev, queue_id, stat_idx, is_rx);
1990 }
1991
1992
1993 int
1994 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
1995                 uint8_t stat_idx)
1996 {
1997         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1998                         STAT_QMAP_TX);
1999 }
2000
2001
2002 int
2003 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2004                 uint8_t stat_idx)
2005 {
2006         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
2007                         STAT_QMAP_RX);
2008 }
2009
2010 int
2011 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2012 {
2013         struct rte_eth_dev *dev;
2014
2015         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2016         dev = &rte_eth_devices[port_id];
2017
2018         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2019         return (*dev->dev_ops->fw_version_get)(dev, fw_version, fw_size);
2020 }
2021
2022 void
2023 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2024 {
2025         struct rte_eth_dev *dev;
2026         const struct rte_eth_desc_lim lim = {
2027                 .nb_max = UINT16_MAX,
2028                 .nb_min = 0,
2029                 .nb_align = 1,
2030         };
2031
2032         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2033         dev = &rte_eth_devices[port_id];
2034
2035         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2036         dev_info->rx_desc_lim = lim;
2037         dev_info->tx_desc_lim = lim;
2038
2039         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2040         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2041         dev_info->driver_name = dev->device->driver->name;
2042         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2043         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2044 }
2045
2046 int
2047 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2048                                  uint32_t *ptypes, int num)
2049 {
2050         int i, j;
2051         struct rte_eth_dev *dev;
2052         const uint32_t *all_ptypes;
2053
2054         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2055         dev = &rte_eth_devices[port_id];
2056         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2057         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2058
2059         if (!all_ptypes)
2060                 return 0;
2061
2062         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2063                 if (all_ptypes[i] & ptype_mask) {
2064                         if (j < num)
2065                                 ptypes[j] = all_ptypes[i];
2066                         j++;
2067                 }
2068
2069         return j;
2070 }
2071
2072 void
2073 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2074 {
2075         struct rte_eth_dev *dev;
2076
2077         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2078         dev = &rte_eth_devices[port_id];
2079         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2080 }
2081
2082
2083 int
2084 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2085 {
2086         struct rte_eth_dev *dev;
2087
2088         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2089
2090         dev = &rte_eth_devices[port_id];
2091         *mtu = dev->data->mtu;
2092         return 0;
2093 }
2094
2095 int
2096 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2097 {
2098         int ret;
2099         struct rte_eth_dev *dev;
2100
2101         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2102         dev = &rte_eth_devices[port_id];
2103         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2104
2105         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2106         if (!ret)
2107                 dev->data->mtu = mtu;
2108
2109         return ret;
2110 }
2111
2112 int
2113 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2114 {
2115         struct rte_eth_dev *dev;
2116         int ret;
2117
2118         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2119         dev = &rte_eth_devices[port_id];
2120         if (!(dev->data->dev_conf.rxmode.offloads &
2121               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2122                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2123                 return -ENOSYS;
2124         }
2125
2126         if (vlan_id > 4095) {
2127                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2128                                 port_id, (unsigned) vlan_id);
2129                 return -EINVAL;
2130         }
2131         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2132
2133         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2134         if (ret == 0) {
2135                 struct rte_vlan_filter_conf *vfc;
2136                 int vidx;
2137                 int vbit;
2138
2139                 vfc = &dev->data->vlan_filter_conf;
2140                 vidx = vlan_id / 64;
2141                 vbit = vlan_id % 64;
2142
2143                 if (on)
2144                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2145                 else
2146                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2147         }
2148
2149         return ret;
2150 }
2151
2152 int
2153 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2154                                     int on)
2155 {
2156         struct rte_eth_dev *dev;
2157
2158         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2159         dev = &rte_eth_devices[port_id];
2160         if (rx_queue_id >= dev->data->nb_rx_queues) {
2161                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2162                 return -EINVAL;
2163         }
2164
2165         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2166         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2167
2168         return 0;
2169 }
2170
2171 int
2172 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2173                                 enum rte_vlan_type vlan_type,
2174                                 uint16_t tpid)
2175 {
2176         struct rte_eth_dev *dev;
2177
2178         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2179         dev = &rte_eth_devices[port_id];
2180         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2181
2182         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
2183 }
2184
2185 int
2186 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2187 {
2188         struct rte_eth_dev *dev;
2189         int ret = 0;
2190         int mask = 0;
2191         int cur, org = 0;
2192         uint64_t orig_offloads;
2193
2194         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2195         dev = &rte_eth_devices[port_id];
2196
2197         /* save original values in case of failure */
2198         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2199
2200         /*check which option changed by application*/
2201         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2202         org = !!(dev->data->dev_conf.rxmode.offloads &
2203                  DEV_RX_OFFLOAD_VLAN_STRIP);
2204         if (cur != org) {
2205                 if (cur)
2206                         dev->data->dev_conf.rxmode.offloads |=
2207                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2208                 else
2209                         dev->data->dev_conf.rxmode.offloads &=
2210                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2211                 mask |= ETH_VLAN_STRIP_MASK;
2212         }
2213
2214         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2215         org = !!(dev->data->dev_conf.rxmode.offloads &
2216                  DEV_RX_OFFLOAD_VLAN_FILTER);
2217         if (cur != org) {
2218                 if (cur)
2219                         dev->data->dev_conf.rxmode.offloads |=
2220                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2221                 else
2222                         dev->data->dev_conf.rxmode.offloads &=
2223                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2224                 mask |= ETH_VLAN_FILTER_MASK;
2225         }
2226
2227         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2228         org = !!(dev->data->dev_conf.rxmode.offloads &
2229                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2230         if (cur != org) {
2231                 if (cur)
2232                         dev->data->dev_conf.rxmode.offloads |=
2233                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2234                 else
2235                         dev->data->dev_conf.rxmode.offloads &=
2236                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2237                 mask |= ETH_VLAN_EXTEND_MASK;
2238         }
2239
2240         /*no change*/
2241         if (mask == 0)
2242                 return ret;
2243
2244         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2245
2246         /*
2247          * Convert to the offload bitfield API just in case the underlying PMD
2248          * still supporting it.
2249          */
2250         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2251                                     &dev->data->dev_conf.rxmode);
2252         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2253         if (ret) {
2254                 /* hit an error restore  original values */
2255                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2256                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2257                                             &dev->data->dev_conf.rxmode);
2258         }
2259
2260         return ret;
2261 }
2262
2263 int
2264 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2265 {
2266         struct rte_eth_dev *dev;
2267         int ret = 0;
2268
2269         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2270         dev = &rte_eth_devices[port_id];
2271
2272         if (dev->data->dev_conf.rxmode.offloads &
2273             DEV_RX_OFFLOAD_VLAN_STRIP)
2274                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2275
2276         if (dev->data->dev_conf.rxmode.offloads &
2277             DEV_RX_OFFLOAD_VLAN_FILTER)
2278                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2279
2280         if (dev->data->dev_conf.rxmode.offloads &
2281             DEV_RX_OFFLOAD_VLAN_EXTEND)
2282                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2283
2284         return ret;
2285 }
2286
2287 int
2288 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2289 {
2290         struct rte_eth_dev *dev;
2291
2292         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2293         dev = &rte_eth_devices[port_id];
2294         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2295         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
2296
2297         return 0;
2298 }
2299
2300 int
2301 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2302 {
2303         struct rte_eth_dev *dev;
2304
2305         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2306         dev = &rte_eth_devices[port_id];
2307         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2308         memset(fc_conf, 0, sizeof(*fc_conf));
2309         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
2310 }
2311
2312 int
2313 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2314 {
2315         struct rte_eth_dev *dev;
2316
2317         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2318         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2319                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2320                 return -EINVAL;
2321         }
2322
2323         dev = &rte_eth_devices[port_id];
2324         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2325         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
2326 }
2327
2328 int
2329 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2330                                    struct rte_eth_pfc_conf *pfc_conf)
2331 {
2332         struct rte_eth_dev *dev;
2333
2334         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2335         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2336                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2337                 return -EINVAL;
2338         }
2339
2340         dev = &rte_eth_devices[port_id];
2341         /* High water, low water validation are device specific */
2342         if  (*dev->dev_ops->priority_flow_ctrl_set)
2343                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
2344         return -ENOTSUP;
2345 }
2346
2347 static int
2348 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2349                         uint16_t reta_size)
2350 {
2351         uint16_t i, num;
2352
2353         if (!reta_conf)
2354                 return -EINVAL;
2355
2356         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2357         for (i = 0; i < num; i++) {
2358                 if (reta_conf[i].mask)
2359                         return 0;
2360         }
2361
2362         return -EINVAL;
2363 }
2364
2365 static int
2366 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2367                          uint16_t reta_size,
2368                          uint16_t max_rxq)
2369 {
2370         uint16_t i, idx, shift;
2371
2372         if (!reta_conf)
2373                 return -EINVAL;
2374
2375         if (max_rxq == 0) {
2376                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2377                 return -EINVAL;
2378         }
2379
2380         for (i = 0; i < reta_size; i++) {
2381                 idx = i / RTE_RETA_GROUP_SIZE;
2382                 shift = i % RTE_RETA_GROUP_SIZE;
2383                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2384                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2385                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2386                                 "the maximum rxq index: %u\n", idx, shift,
2387                                 reta_conf[idx].reta[shift], max_rxq);
2388                         return -EINVAL;
2389                 }
2390         }
2391
2392         return 0;
2393 }
2394
2395 int
2396 rte_eth_dev_rss_reta_update(uint16_t port_id,
2397                             struct rte_eth_rss_reta_entry64 *reta_conf,
2398                             uint16_t reta_size)
2399 {
2400         struct rte_eth_dev *dev;
2401         int ret;
2402
2403         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2404         /* Check mask bits */
2405         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2406         if (ret < 0)
2407                 return ret;
2408
2409         dev = &rte_eth_devices[port_id];
2410
2411         /* Check entry value */
2412         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2413                                 dev->data->nb_rx_queues);
2414         if (ret < 0)
2415                 return ret;
2416
2417         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2418         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
2419 }
2420
2421 int
2422 rte_eth_dev_rss_reta_query(uint16_t port_id,
2423                            struct rte_eth_rss_reta_entry64 *reta_conf,
2424                            uint16_t reta_size)
2425 {
2426         struct rte_eth_dev *dev;
2427         int ret;
2428
2429         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2430
2431         /* Check mask bits */
2432         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2433         if (ret < 0)
2434                 return ret;
2435
2436         dev = &rte_eth_devices[port_id];
2437         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2438         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
2439 }
2440
2441 int
2442 rte_eth_dev_rss_hash_update(uint16_t port_id,
2443                             struct rte_eth_rss_conf *rss_conf)
2444 {
2445         struct rte_eth_dev *dev;
2446
2447         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2448         dev = &rte_eth_devices[port_id];
2449         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2450         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
2451 }
2452
2453 int
2454 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2455                               struct rte_eth_rss_conf *rss_conf)
2456 {
2457         struct rte_eth_dev *dev;
2458
2459         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2460         dev = &rte_eth_devices[port_id];
2461         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2462         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2463 }
2464
2465 int
2466 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2467                                 struct rte_eth_udp_tunnel *udp_tunnel)
2468 {
2469         struct rte_eth_dev *dev;
2470
2471         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2472         if (udp_tunnel == NULL) {
2473                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2474                 return -EINVAL;
2475         }
2476
2477         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2478                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2479                 return -EINVAL;
2480         }
2481
2482         dev = &rte_eth_devices[port_id];
2483         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2484         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2485 }
2486
2487 int
2488 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2489                                    struct rte_eth_udp_tunnel *udp_tunnel)
2490 {
2491         struct rte_eth_dev *dev;
2492
2493         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2494         dev = &rte_eth_devices[port_id];
2495
2496         if (udp_tunnel == NULL) {
2497                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2498                 return -EINVAL;
2499         }
2500
2501         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2502                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2503                 return -EINVAL;
2504         }
2505
2506         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2507         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2508 }
2509
2510 int
2511 rte_eth_led_on(uint16_t port_id)
2512 {
2513         struct rte_eth_dev *dev;
2514
2515         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2516         dev = &rte_eth_devices[port_id];
2517         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2518         return (*dev->dev_ops->dev_led_on)(dev);
2519 }
2520
2521 int
2522 rte_eth_led_off(uint16_t port_id)
2523 {
2524         struct rte_eth_dev *dev;
2525
2526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2527         dev = &rte_eth_devices[port_id];
2528         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2529         return (*dev->dev_ops->dev_led_off)(dev);
2530 }
2531
2532 /*
2533  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2534  * an empty spot.
2535  */
2536 static int
2537 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2538 {
2539         struct rte_eth_dev_info dev_info;
2540         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2541         unsigned i;
2542
2543         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2544         rte_eth_dev_info_get(port_id, &dev_info);
2545
2546         for (i = 0; i < dev_info.max_mac_addrs; i++)
2547                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2548                         return i;
2549
2550         return -1;
2551 }
2552
2553 static const struct ether_addr null_mac_addr;
2554
2555 int
2556 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2557                         uint32_t pool)
2558 {
2559         struct rte_eth_dev *dev;
2560         int index;
2561         uint64_t pool_mask;
2562         int ret;
2563
2564         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2565         dev = &rte_eth_devices[port_id];
2566         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2567
2568         if (is_zero_ether_addr(addr)) {
2569                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2570                         port_id);
2571                 return -EINVAL;
2572         }
2573         if (pool >= ETH_64_POOLS) {
2574                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2575                 return -EINVAL;
2576         }
2577
2578         index = get_mac_addr_index(port_id, addr);
2579         if (index < 0) {
2580                 index = get_mac_addr_index(port_id, &null_mac_addr);
2581                 if (index < 0) {
2582                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2583                                 port_id);
2584                         return -ENOSPC;
2585                 }
2586         } else {
2587                 pool_mask = dev->data->mac_pool_sel[index];
2588
2589                 /* Check if both MAC address and pool is already there, and do nothing */
2590                 if (pool_mask & (1ULL << pool))
2591                         return 0;
2592         }
2593
2594         /* Update NIC */
2595         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2596
2597         if (ret == 0) {
2598                 /* Update address in NIC data structure */
2599                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2600
2601                 /* Update pool bitmap in NIC data structure */
2602                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2603         }
2604
2605         return ret;
2606 }
2607
2608 int
2609 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2610 {
2611         struct rte_eth_dev *dev;
2612         int index;
2613
2614         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2615         dev = &rte_eth_devices[port_id];
2616         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2617
2618         index = get_mac_addr_index(port_id, addr);
2619         if (index == 0) {
2620                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2621                 return -EADDRINUSE;
2622         } else if (index < 0)
2623                 return 0;  /* Do nothing if address wasn't found */
2624
2625         /* Update NIC */
2626         (*dev->dev_ops->mac_addr_remove)(dev, index);
2627
2628         /* Update address in NIC data structure */
2629         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2630
2631         /* reset pool bitmap */
2632         dev->data->mac_pool_sel[index] = 0;
2633
2634         return 0;
2635 }
2636
2637 int
2638 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
2639 {
2640         struct rte_eth_dev *dev;
2641
2642         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2643
2644         if (!is_valid_assigned_ether_addr(addr))
2645                 return -EINVAL;
2646
2647         dev = &rte_eth_devices[port_id];
2648         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2649
2650         /* Update default address in NIC data structure */
2651         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2652
2653         (*dev->dev_ops->mac_addr_set)(dev, addr);
2654
2655         return 0;
2656 }
2657
2658
2659 /*
2660  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2661  * an empty spot.
2662  */
2663 static int
2664 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2665 {
2666         struct rte_eth_dev_info dev_info;
2667         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2668         unsigned i;
2669
2670         rte_eth_dev_info_get(port_id, &dev_info);
2671         if (!dev->data->hash_mac_addrs)
2672                 return -1;
2673
2674         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2675                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2676                         ETHER_ADDR_LEN) == 0)
2677                         return i;
2678
2679         return -1;
2680 }
2681
2682 int
2683 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
2684                                 uint8_t on)
2685 {
2686         int index;
2687         int ret;
2688         struct rte_eth_dev *dev;
2689
2690         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2691
2692         dev = &rte_eth_devices[port_id];
2693         if (is_zero_ether_addr(addr)) {
2694                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2695                         port_id);
2696                 return -EINVAL;
2697         }
2698
2699         index = get_hash_mac_addr_index(port_id, addr);
2700         /* Check if it's already there, and do nothing */
2701         if ((index >= 0) && (on))
2702                 return 0;
2703
2704         if (index < 0) {
2705                 if (!on) {
2706                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2707                                 "set in UTA\n", port_id);
2708                         return -EINVAL;
2709                 }
2710
2711                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2712                 if (index < 0) {
2713                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2714                                         port_id);
2715                         return -ENOSPC;
2716                 }
2717         }
2718
2719         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2720         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2721         if (ret == 0) {
2722                 /* Update address in NIC data structure */
2723                 if (on)
2724                         ether_addr_copy(addr,
2725                                         &dev->data->hash_mac_addrs[index]);
2726                 else
2727                         ether_addr_copy(&null_mac_addr,
2728                                         &dev->data->hash_mac_addrs[index]);
2729         }
2730
2731         return ret;
2732 }
2733
2734 int
2735 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
2736 {
2737         struct rte_eth_dev *dev;
2738
2739         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2740
2741         dev = &rte_eth_devices[port_id];
2742
2743         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2744         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2745 }
2746
2747 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
2748                                         uint16_t tx_rate)
2749 {
2750         struct rte_eth_dev *dev;
2751         struct rte_eth_dev_info dev_info;
2752         struct rte_eth_link link;
2753
2754         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2755
2756         dev = &rte_eth_devices[port_id];
2757         rte_eth_dev_info_get(port_id, &dev_info);
2758         link = dev->data->dev_link;
2759
2760         if (queue_idx > dev_info.max_tx_queues) {
2761                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2762                                 "invalid queue id=%d\n", port_id, queue_idx);
2763                 return -EINVAL;
2764         }
2765
2766         if (tx_rate > link.link_speed) {
2767                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2768                                 "bigger than link speed= %d\n",
2769                         tx_rate, link.link_speed);
2770                 return -EINVAL;
2771         }
2772
2773         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2774         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2775 }
2776
2777 int
2778 rte_eth_mirror_rule_set(uint16_t port_id,
2779                         struct rte_eth_mirror_conf *mirror_conf,
2780                         uint8_t rule_id, uint8_t on)
2781 {
2782         struct rte_eth_dev *dev;
2783
2784         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2785         if (mirror_conf->rule_type == 0) {
2786                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2787                 return -EINVAL;
2788         }
2789
2790         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2791                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2792                                 ETH_64_POOLS - 1);
2793                 return -EINVAL;
2794         }
2795
2796         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2797              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2798             (mirror_conf->pool_mask == 0)) {
2799                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2800                 return -EINVAL;
2801         }
2802
2803         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2804             mirror_conf->vlan.vlan_mask == 0) {
2805                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2806                 return -EINVAL;
2807         }
2808
2809         dev = &rte_eth_devices[port_id];
2810         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2811
2812         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2813 }
2814
2815 int
2816 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
2817 {
2818         struct rte_eth_dev *dev;
2819
2820         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2821
2822         dev = &rte_eth_devices[port_id];
2823         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2824
2825         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2826 }
2827
2828 int
2829 rte_eth_dev_callback_register(uint16_t port_id,
2830                         enum rte_eth_event_type event,
2831                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2832 {
2833         struct rte_eth_dev *dev;
2834         struct rte_eth_dev_callback *user_cb;
2835
2836         if (!cb_fn)
2837                 return -EINVAL;
2838
2839         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2840
2841         dev = &rte_eth_devices[port_id];
2842         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2843
2844         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2845                 if (user_cb->cb_fn == cb_fn &&
2846                         user_cb->cb_arg == cb_arg &&
2847                         user_cb->event == event) {
2848                         break;
2849                 }
2850         }
2851
2852         /* create a new callback. */
2853         if (user_cb == NULL) {
2854                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2855                                         sizeof(struct rte_eth_dev_callback), 0);
2856                 if (user_cb != NULL) {
2857                         user_cb->cb_fn = cb_fn;
2858                         user_cb->cb_arg = cb_arg;
2859                         user_cb->event = event;
2860                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2861                 }
2862         }
2863
2864         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2865         return (user_cb == NULL) ? -ENOMEM : 0;
2866 }
2867
2868 int
2869 rte_eth_dev_callback_unregister(uint16_t port_id,
2870                         enum rte_eth_event_type event,
2871                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2872 {
2873         int ret;
2874         struct rte_eth_dev *dev;
2875         struct rte_eth_dev_callback *cb, *next;
2876
2877         if (!cb_fn)
2878                 return -EINVAL;
2879
2880         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2881
2882         dev = &rte_eth_devices[port_id];
2883         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2884
2885         ret = 0;
2886         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2887
2888                 next = TAILQ_NEXT(cb, next);
2889
2890                 if (cb->cb_fn != cb_fn || cb->event != event ||
2891                                 (cb->cb_arg != (void *)-1 &&
2892                                 cb->cb_arg != cb_arg))
2893                         continue;
2894
2895                 /*
2896                  * if this callback is not executing right now,
2897                  * then remove it.
2898                  */
2899                 if (cb->active == 0) {
2900                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2901                         rte_free(cb);
2902                 } else {
2903                         ret = -EAGAIN;
2904                 }
2905         }
2906
2907         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2908         return ret;
2909 }
2910
2911 int
2912 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2913         enum rte_eth_event_type event, void *cb_arg, void *ret_param)
2914 {
2915         struct rte_eth_dev_callback *cb_lst;
2916         struct rte_eth_dev_callback dev_cb;
2917         int rc = 0;
2918
2919         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2920         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2921                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2922                         continue;
2923                 dev_cb = *cb_lst;
2924                 cb_lst->active = 1;
2925                 if (cb_arg != NULL)
2926                         dev_cb.cb_arg = cb_arg;
2927                 if (ret_param != NULL)
2928                         dev_cb.ret_param = ret_param;
2929
2930                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2931                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2932                                 dev_cb.cb_arg, dev_cb.ret_param);
2933                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2934                 cb_lst->active = 0;
2935         }
2936         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2937         return rc;
2938 }
2939
2940 int
2941 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
2942 {
2943         uint32_t vec;
2944         struct rte_eth_dev *dev;
2945         struct rte_intr_handle *intr_handle;
2946         uint16_t qid;
2947         int rc;
2948
2949         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2950
2951         dev = &rte_eth_devices[port_id];
2952
2953         if (!dev->intr_handle) {
2954                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
2955                 return -ENOTSUP;
2956         }
2957
2958         intr_handle = dev->intr_handle;
2959         if (!intr_handle->intr_vec) {
2960                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2961                 return -EPERM;
2962         }
2963
2964         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2965                 vec = intr_handle->intr_vec[qid];
2966                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2967                 if (rc && rc != -EEXIST) {
2968                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2969                                         " op %d epfd %d vec %u\n",
2970                                         port_id, qid, op, epfd, vec);
2971                 }
2972         }
2973
2974         return 0;
2975 }
2976
2977 const struct rte_memzone *
2978 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2979                          uint16_t queue_id, size_t size, unsigned align,
2980                          int socket_id)
2981 {
2982         char z_name[RTE_MEMZONE_NAMESIZE];
2983         const struct rte_memzone *mz;
2984
2985         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2986                  dev->device->driver->name, ring_name,
2987                  dev->data->port_id, queue_id);
2988
2989         mz = rte_memzone_lookup(z_name);
2990         if (mz)
2991                 return mz;
2992
2993         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
2994 }
2995
2996 int
2997 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
2998                           int epfd, int op, void *data)
2999 {
3000         uint32_t vec;
3001         struct rte_eth_dev *dev;
3002         struct rte_intr_handle *intr_handle;
3003         int rc;
3004
3005         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3006
3007         dev = &rte_eth_devices[port_id];
3008         if (queue_id >= dev->data->nb_rx_queues) {
3009                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3010                 return -EINVAL;
3011         }
3012
3013         if (!dev->intr_handle) {
3014                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3015                 return -ENOTSUP;
3016         }
3017
3018         intr_handle = dev->intr_handle;
3019         if (!intr_handle->intr_vec) {
3020                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3021                 return -EPERM;
3022         }
3023
3024         vec = intr_handle->intr_vec[queue_id];
3025         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3026         if (rc && rc != -EEXIST) {
3027                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3028                                 " op %d epfd %d vec %u\n",
3029                                 port_id, queue_id, op, epfd, vec);
3030                 return rc;
3031         }
3032
3033         return 0;
3034 }
3035
3036 int
3037 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3038                            uint16_t queue_id)
3039 {
3040         struct rte_eth_dev *dev;
3041
3042         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3043
3044         dev = &rte_eth_devices[port_id];
3045
3046         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3047         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
3048 }
3049
3050 int
3051 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3052                             uint16_t queue_id)
3053 {
3054         struct rte_eth_dev *dev;
3055
3056         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3057
3058         dev = &rte_eth_devices[port_id];
3059
3060         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3061         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
3062 }
3063
3064
3065 int
3066 rte_eth_dev_filter_supported(uint16_t port_id,
3067                              enum rte_filter_type filter_type)
3068 {
3069         struct rte_eth_dev *dev;
3070
3071         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3072
3073         dev = &rte_eth_devices[port_id];
3074         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3075         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3076                                 RTE_ETH_FILTER_NOP, NULL);
3077 }
3078
3079 int
3080 rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
3081                        enum rte_filter_op filter_op, void *arg)
3082 {
3083         struct rte_eth_dev *dev;
3084
3085         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3086
3087         dev = &rte_eth_devices[port_id];
3088         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3089         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
3090 }
3091
3092 void *
3093 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3094                 rte_rx_callback_fn fn, void *user_param)
3095 {
3096 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3097         rte_errno = ENOTSUP;
3098         return NULL;
3099 #endif
3100         /* check input parameters */
3101         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3102                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3103                 rte_errno = EINVAL;
3104                 return NULL;
3105         }
3106         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3107
3108         if (cb == NULL) {
3109                 rte_errno = ENOMEM;
3110                 return NULL;
3111         }
3112
3113         cb->fn.rx = fn;
3114         cb->param = user_param;
3115
3116         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3117         /* Add the callbacks in fifo order. */
3118         struct rte_eth_rxtx_callback *tail =
3119                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3120
3121         if (!tail) {
3122                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3123
3124         } else {
3125                 while (tail->next)
3126                         tail = tail->next;
3127                 tail->next = cb;
3128         }
3129         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3130
3131         return cb;
3132 }
3133
3134 void *
3135 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3136                 rte_rx_callback_fn fn, void *user_param)
3137 {
3138 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3139         rte_errno = ENOTSUP;
3140         return NULL;
3141 #endif
3142         /* check input parameters */
3143         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3144                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3145                 rte_errno = EINVAL;
3146                 return NULL;
3147         }
3148
3149         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3150
3151         if (cb == NULL) {
3152                 rte_errno = ENOMEM;
3153                 return NULL;
3154         }
3155
3156         cb->fn.rx = fn;
3157         cb->param = user_param;
3158
3159         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3160         /* Add the callbacks at fisrt position*/
3161         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3162         rte_smp_wmb();
3163         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3164         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3165
3166         return cb;
3167 }
3168
3169 void *
3170 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3171                 rte_tx_callback_fn fn, void *user_param)
3172 {
3173 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3174         rte_errno = ENOTSUP;
3175         return NULL;
3176 #endif
3177         /* check input parameters */
3178         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3179                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3180                 rte_errno = EINVAL;
3181                 return NULL;
3182         }
3183
3184         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3185
3186         if (cb == NULL) {
3187                 rte_errno = ENOMEM;
3188                 return NULL;
3189         }
3190
3191         cb->fn.tx = fn;
3192         cb->param = user_param;
3193
3194         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3195         /* Add the callbacks in fifo order. */
3196         struct rte_eth_rxtx_callback *tail =
3197                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3198
3199         if (!tail) {
3200                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3201
3202         } else {
3203                 while (tail->next)
3204                         tail = tail->next;
3205                 tail->next = cb;
3206         }
3207         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3208
3209         return cb;
3210 }
3211
3212 int
3213 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3214                 struct rte_eth_rxtx_callback *user_cb)
3215 {
3216 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3217         return -ENOTSUP;
3218 #endif
3219         /* Check input parameters. */
3220         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3221         if (user_cb == NULL ||
3222                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3223                 return -EINVAL;
3224
3225         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3226         struct rte_eth_rxtx_callback *cb;
3227         struct rte_eth_rxtx_callback **prev_cb;
3228         int ret = -EINVAL;
3229
3230         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3231         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3232         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3233                 cb = *prev_cb;
3234                 if (cb == user_cb) {
3235                         /* Remove the user cb from the callback list. */
3236                         *prev_cb = cb->next;
3237                         ret = 0;
3238                         break;
3239                 }
3240         }
3241         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3242
3243         return ret;
3244 }
3245
3246 int
3247 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3248                 struct rte_eth_rxtx_callback *user_cb)
3249 {
3250 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3251         return -ENOTSUP;
3252 #endif
3253         /* Check input parameters. */
3254         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3255         if (user_cb == NULL ||
3256                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3257                 return -EINVAL;
3258
3259         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3260         int ret = -EINVAL;
3261         struct rte_eth_rxtx_callback *cb;
3262         struct rte_eth_rxtx_callback **prev_cb;
3263
3264         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3265         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3266         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3267                 cb = *prev_cb;
3268                 if (cb == user_cb) {
3269                         /* Remove the user cb from the callback list. */
3270                         *prev_cb = cb->next;
3271                         ret = 0;
3272                         break;
3273                 }
3274         }
3275         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3276
3277         return ret;
3278 }
3279
3280 int
3281 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3282         struct rte_eth_rxq_info *qinfo)
3283 {
3284         struct rte_eth_dev *dev;
3285
3286         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3287
3288         if (qinfo == NULL)
3289                 return -EINVAL;
3290
3291         dev = &rte_eth_devices[port_id];
3292         if (queue_id >= dev->data->nb_rx_queues) {
3293                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3294                 return -EINVAL;
3295         }
3296
3297         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3298
3299         memset(qinfo, 0, sizeof(*qinfo));
3300         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3301         return 0;
3302 }
3303
3304 int
3305 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3306         struct rte_eth_txq_info *qinfo)
3307 {
3308         struct rte_eth_dev *dev;
3309
3310         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3311
3312         if (qinfo == NULL)
3313                 return -EINVAL;
3314
3315         dev = &rte_eth_devices[port_id];
3316         if (queue_id >= dev->data->nb_tx_queues) {
3317                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3318                 return -EINVAL;
3319         }
3320
3321         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3322
3323         memset(qinfo, 0, sizeof(*qinfo));
3324         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3325         return 0;
3326 }
3327
3328 int
3329 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3330                              struct ether_addr *mc_addr_set,
3331                              uint32_t nb_mc_addr)
3332 {
3333         struct rte_eth_dev *dev;
3334
3335         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3336
3337         dev = &rte_eth_devices[port_id];
3338         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3339         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3340 }
3341
3342 int
3343 rte_eth_timesync_enable(uint16_t port_id)
3344 {
3345         struct rte_eth_dev *dev;
3346
3347         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3348         dev = &rte_eth_devices[port_id];
3349
3350         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3351         return (*dev->dev_ops->timesync_enable)(dev);
3352 }
3353
3354 int
3355 rte_eth_timesync_disable(uint16_t port_id)
3356 {
3357         struct rte_eth_dev *dev;
3358
3359         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3360         dev = &rte_eth_devices[port_id];
3361
3362         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3363         return (*dev->dev_ops->timesync_disable)(dev);
3364 }
3365
3366 int
3367 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3368                                    uint32_t flags)
3369 {
3370         struct rte_eth_dev *dev;
3371
3372         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3373         dev = &rte_eth_devices[port_id];
3374
3375         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3376         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3377 }
3378
3379 int
3380 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3381                                    struct timespec *timestamp)
3382 {
3383         struct rte_eth_dev *dev;
3384
3385         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3386         dev = &rte_eth_devices[port_id];
3387
3388         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3389         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3390 }
3391
3392 int
3393 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3394 {
3395         struct rte_eth_dev *dev;
3396
3397         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3398         dev = &rte_eth_devices[port_id];
3399
3400         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3401         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3402 }
3403
3404 int
3405 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3406 {
3407         struct rte_eth_dev *dev;
3408
3409         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3410         dev = &rte_eth_devices[port_id];
3411
3412         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3413         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3414 }
3415
3416 int
3417 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3418 {
3419         struct rte_eth_dev *dev;
3420
3421         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3422         dev = &rte_eth_devices[port_id];
3423
3424         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3425         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3426 }
3427
3428 int
3429 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
3430 {
3431         struct rte_eth_dev *dev;
3432
3433         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3434
3435         dev = &rte_eth_devices[port_id];
3436         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3437         return (*dev->dev_ops->get_reg)(dev, info);
3438 }
3439
3440 int
3441 rte_eth_dev_get_eeprom_length(uint16_t port_id)
3442 {
3443         struct rte_eth_dev *dev;
3444
3445         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3446
3447         dev = &rte_eth_devices[port_id];
3448         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3449         return (*dev->dev_ops->get_eeprom_length)(dev);
3450 }
3451
3452 int
3453 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3454 {
3455         struct rte_eth_dev *dev;
3456
3457         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3458
3459         dev = &rte_eth_devices[port_id];
3460         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3461         return (*dev->dev_ops->get_eeprom)(dev, info);
3462 }
3463
3464 int
3465 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
3466 {
3467         struct rte_eth_dev *dev;
3468
3469         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3470
3471         dev = &rte_eth_devices[port_id];
3472         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3473         return (*dev->dev_ops->set_eeprom)(dev, info);
3474 }
3475
3476 int
3477 rte_eth_dev_get_dcb_info(uint16_t port_id,
3478                              struct rte_eth_dcb_info *dcb_info)
3479 {
3480         struct rte_eth_dev *dev;
3481
3482         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3483
3484         dev = &rte_eth_devices[port_id];
3485         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3486
3487         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3488         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3489 }
3490
3491 int
3492 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
3493                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3494 {
3495         struct rte_eth_dev *dev;
3496
3497         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3498         if (l2_tunnel == NULL) {
3499                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3500                 return -EINVAL;
3501         }
3502
3503         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3504                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3505                 return -EINVAL;
3506         }
3507
3508         dev = &rte_eth_devices[port_id];
3509         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3510                                 -ENOTSUP);
3511         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3512 }
3513
3514 int
3515 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
3516                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3517                                   uint32_t mask,
3518                                   uint8_t en)
3519 {
3520         struct rte_eth_dev *dev;
3521
3522         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3523
3524         if (l2_tunnel == NULL) {
3525                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3526                 return -EINVAL;
3527         }
3528
3529         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3530                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3531                 return -EINVAL;
3532         }
3533
3534         if (mask == 0) {
3535                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3536                 return -EINVAL;
3537         }
3538
3539         dev = &rte_eth_devices[port_id];
3540         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3541                                 -ENOTSUP);
3542         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3543 }
3544
3545 static void
3546 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
3547                            const struct rte_eth_desc_lim *desc_lim)
3548 {
3549         if (desc_lim->nb_align != 0)
3550                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
3551
3552         if (desc_lim->nb_max != 0)
3553                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
3554
3555         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
3556 }
3557
3558 int
3559 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
3560                                  uint16_t *nb_rx_desc,
3561                                  uint16_t *nb_tx_desc)
3562 {
3563         struct rte_eth_dev *dev;
3564         struct rte_eth_dev_info dev_info;
3565
3566         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3567
3568         dev = &rte_eth_devices[port_id];
3569         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
3570
3571         rte_eth_dev_info_get(port_id, &dev_info);
3572
3573         if (nb_rx_desc != NULL)
3574                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
3575
3576         if (nb_tx_desc != NULL)
3577                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
3578
3579         return 0;
3580 }
3581
3582 int
3583 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
3584 {
3585         struct rte_eth_dev *dev;
3586
3587         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3588
3589         if (pool == NULL)
3590                 return -EINVAL;
3591
3592         dev = &rte_eth_devices[port_id];
3593
3594         if (*dev->dev_ops->pool_ops_supported == NULL)
3595                 return 1; /* all pools are supported */
3596
3597         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
3598 }