Imported Upstream version 16.11.2
[deb_dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t eth_dev_last_created_port;
75 static uint8_t nb_ports;
76
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
79
80 /* spinlock for add/remove rx callbacks */
81 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
82
83 /* spinlock for add/remove tx callbacks */
84 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
85
86 /* store statistics names and its offset in stats structure  */
87 struct rte_eth_xstats_name_off {
88         char name[RTE_ETH_XSTATS_NAME_SIZE];
89         unsigned offset;
90 };
91
92 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
93         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
94         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
95         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
96         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
97         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
98         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
99         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
100                 rx_nombuf)},
101 };
102
103 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
104
105 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
106         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
107         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
108         {"errors", offsetof(struct rte_eth_stats, q_errors)},
109 };
110
111 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
112                 sizeof(rte_rxq_stats_strings[0]))
113
114 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
115         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
116         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
117 };
118 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
119                 sizeof(rte_txq_stats_strings[0]))
120
121
122 /**
123  * The user application callback description.
124  *
125  * It contains callback address to be registered by user application,
126  * the pointer to the parameters for callback, and the event type.
127  */
128 struct rte_eth_dev_callback {
129         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
130         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
131         void *cb_arg;                           /**< Parameter for callback */
132         enum rte_eth_event_type event;          /**< Interrupt event type */
133         uint32_t active;                        /**< Callback is executing */
134 };
135
136 enum {
137         STAT_QMAP_TX = 0,
138         STAT_QMAP_RX
139 };
140
141 enum {
142         DEV_DETACHED = 0,
143         DEV_ATTACHED
144 };
145
146 static void
147 rte_eth_dev_data_alloc(void)
148 {
149         const unsigned flags = 0;
150         const struct rte_memzone *mz;
151
152         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
153                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
154                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
155                                 rte_socket_id(), flags);
156         } else
157                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
158         if (mz == NULL)
159                 rte_panic("Cannot allocate memzone for ethernet port data\n");
160
161         rte_eth_dev_data = mz->addr;
162         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
163                 memset(rte_eth_dev_data, 0,
164                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
165 }
166
167 struct rte_eth_dev *
168 rte_eth_dev_allocated(const char *name)
169 {
170         unsigned i;
171
172         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
173                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
174                     strcmp(rte_eth_devices[i].data->name, name) == 0)
175                         return &rte_eth_devices[i];
176         }
177         return NULL;
178 }
179
180 static uint8_t
181 rte_eth_dev_find_free_port(void)
182 {
183         unsigned i;
184
185         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
186                 if (rte_eth_devices[i].attached == DEV_DETACHED)
187                         return i;
188         }
189         return RTE_MAX_ETHPORTS;
190 }
191
192 static struct rte_eth_dev *
193 eth_dev_get(uint8_t port_id)
194 {
195         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
196
197         eth_dev->data = &rte_eth_dev_data[port_id];
198         eth_dev->attached = DEV_ATTACHED;
199
200         eth_dev_last_created_port = port_id;
201         nb_ports++;
202
203         return eth_dev;
204 }
205
206 struct rte_eth_dev *
207 rte_eth_dev_allocate(const char *name)
208 {
209         uint8_t port_id;
210         struct rte_eth_dev *eth_dev;
211
212         port_id = rte_eth_dev_find_free_port();
213         if (port_id == RTE_MAX_ETHPORTS) {
214                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
215                 return NULL;
216         }
217
218         if (rte_eth_dev_data == NULL)
219                 rte_eth_dev_data_alloc();
220
221         if (rte_eth_dev_allocated(name) != NULL) {
222                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
223                                 name);
224                 return NULL;
225         }
226
227         eth_dev = eth_dev_get(port_id);
228         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
229         eth_dev->data->port_id = port_id;
230
231         return eth_dev;
232 }
233
234 /*
235  * Attach to a port already registered by the primary process, which
236  * makes sure that the same device would have the same port id both
237  * in the primary and secondary process.
238  */
239 static struct rte_eth_dev *
240 eth_dev_attach_secondary(const char *name)
241 {
242         uint8_t i;
243         struct rte_eth_dev *eth_dev;
244
245         if (rte_eth_dev_data == NULL)
246                 rte_eth_dev_data_alloc();
247
248         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
249                 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
250                         break;
251         }
252         if (i == RTE_MAX_ETHPORTS) {
253                 RTE_PMD_DEBUG_TRACE(
254                         "device %s is not driven by the primary process\n",
255                         name);
256                 return NULL;
257         }
258
259         eth_dev = eth_dev_get(i);
260         RTE_ASSERT(eth_dev->data->port_id == i);
261
262         return eth_dev;
263 }
264
265 int
266 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
267 {
268         if (eth_dev == NULL)
269                 return -EINVAL;
270
271         eth_dev->attached = DEV_DETACHED;
272         nb_ports--;
273         return 0;
274 }
275
276 int
277 rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
278                       struct rte_pci_device *pci_dev)
279 {
280         struct eth_driver    *eth_drv;
281         struct rte_eth_dev *eth_dev;
282         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
283
284         int diag;
285
286         eth_drv = (struct eth_driver *)pci_drv;
287
288         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
289                         sizeof(ethdev_name));
290
291         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
292                 eth_dev = rte_eth_dev_allocate(ethdev_name);
293                 if (eth_dev == NULL)
294                         return -ENOMEM;
295
296                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
297                                   eth_drv->dev_private_size,
298                                   RTE_CACHE_LINE_SIZE);
299                 if (eth_dev->data->dev_private == NULL)
300                         rte_panic("Cannot allocate memzone for private port data\n");
301         } else {
302                 eth_dev = eth_dev_attach_secondary(ethdev_name);
303                 if (eth_dev == NULL) {
304                         /*
305                          * if we failed to attach a device, it means the
306                          * device is skipped in primary process, due to
307                          * some errors. If so, we return a positive value,
308                          * to let EAL skip it for the secondary process
309                          * as well.
310                          */
311                         return 1;
312                 }
313         }
314         eth_dev->pci_dev = pci_dev;
315         eth_dev->driver = eth_drv;
316         eth_dev->data->rx_mbuf_alloc_failed = 0;
317
318         /* init user callbacks */
319         TAILQ_INIT(&(eth_dev->link_intr_cbs));
320
321         /*
322          * Set the default MTU.
323          */
324         eth_dev->data->mtu = ETHER_MTU;
325
326         /* Invoke PMD device initialization function */
327         diag = (*eth_drv->eth_dev_init)(eth_dev);
328         if (diag == 0)
329                 return 0;
330
331         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n",
332                         pci_drv->driver.name,
333                         (unsigned) pci_dev->id.vendor_id,
334                         (unsigned) pci_dev->id.device_id);
335         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
336                 rte_free(eth_dev->data->dev_private);
337         rte_eth_dev_release_port(eth_dev);
338         return diag;
339 }
340
341 int
342 rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
343 {
344         const struct eth_driver *eth_drv;
345         struct rte_eth_dev *eth_dev;
346         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
347         int ret;
348
349         if (pci_dev == NULL)
350                 return -EINVAL;
351
352         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
353                         sizeof(ethdev_name));
354
355         eth_dev = rte_eth_dev_allocated(ethdev_name);
356         if (eth_dev == NULL)
357                 return -ENODEV;
358
359         eth_drv = (const struct eth_driver *)pci_dev->driver;
360
361         /* Invoke PMD device uninit function */
362         if (*eth_drv->eth_dev_uninit) {
363                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
364                 if (ret)
365                         return ret;
366         }
367
368         /* free ether device */
369         rte_eth_dev_release_port(eth_dev);
370
371         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
372                 rte_free(eth_dev->data->dev_private);
373
374         eth_dev->pci_dev = NULL;
375         eth_dev->driver = NULL;
376         eth_dev->data = NULL;
377
378         return 0;
379 }
380
381 int
382 rte_eth_dev_is_valid_port(uint8_t port_id)
383 {
384         if (port_id >= RTE_MAX_ETHPORTS ||
385             rte_eth_devices[port_id].attached != DEV_ATTACHED)
386                 return 0;
387         else
388                 return 1;
389 }
390
391 int
392 rte_eth_dev_socket_id(uint8_t port_id)
393 {
394         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
395         return rte_eth_devices[port_id].data->numa_node;
396 }
397
398 uint8_t
399 rte_eth_dev_count(void)
400 {
401         return nb_ports;
402 }
403
404 int
405 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
406 {
407         char *tmp;
408
409         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
410
411         if (name == NULL) {
412                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
413                 return -EINVAL;
414         }
415
416         /* shouldn't check 'rte_eth_devices[i].data',
417          * because it might be overwritten by VDEV PMD */
418         tmp = rte_eth_dev_data[port_id].name;
419         strcpy(name, tmp);
420         return 0;
421 }
422
423 int
424 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
425 {
426         int i;
427
428         if (name == NULL) {
429                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
430                 return -EINVAL;
431         }
432
433         if (!nb_ports)
434                 return -ENODEV;
435
436         *port_id = RTE_MAX_ETHPORTS;
437
438         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
439
440                 if (!strncmp(name,
441                         rte_eth_dev_data[i].name, strlen(name))) {
442
443                         *port_id = i;
444
445                         return 0;
446                 }
447         }
448         return -ENODEV;
449 }
450
451 static int
452 rte_eth_dev_is_detachable(uint8_t port_id)
453 {
454         uint32_t dev_flags;
455
456         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
457
458         switch (rte_eth_devices[port_id].data->kdrv) {
459         case RTE_KDRV_IGB_UIO:
460         case RTE_KDRV_UIO_GENERIC:
461         case RTE_KDRV_NIC_UIO:
462         case RTE_KDRV_NONE:
463                 break;
464         case RTE_KDRV_VFIO:
465         default:
466                 return -ENOTSUP;
467         }
468         dev_flags = rte_eth_devices[port_id].data->dev_flags;
469         if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
470                 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
471                 return 0;
472         else
473                 return 1;
474 }
475
476 /* attach the new device, then store port_id of the device */
477 int
478 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
479 {
480         int ret = -1;
481         int current = rte_eth_dev_count();
482         char *name = NULL;
483         char *args = NULL;
484
485         if ((devargs == NULL) || (port_id == NULL)) {
486                 ret = -EINVAL;
487                 goto err;
488         }
489
490         /* parse devargs, then retrieve device name and args */
491         if (rte_eal_parse_devargs_str(devargs, &name, &args))
492                 goto err;
493
494         ret = rte_eal_dev_attach(name, args);
495         if (ret < 0)
496                 goto err;
497
498         /* no point looking at the port count if no port exists */
499         if (!rte_eth_dev_count()) {
500                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
501                 ret = -1;
502                 goto err;
503         }
504
505         /* if nothing happened, there is a bug here, since some driver told us
506          * it did attach a device, but did not create a port.
507          */
508         if (current == rte_eth_dev_count()) {
509                 ret = -1;
510                 goto err;
511         }
512
513         *port_id = eth_dev_last_created_port;
514         ret = 0;
515
516 err:
517         free(name);
518         free(args);
519         return ret;
520 }
521
522 /* detach the device, then store the name of the device */
523 int
524 rte_eth_dev_detach(uint8_t port_id, char *name)
525 {
526         int ret = -1;
527
528         if (name == NULL) {
529                 ret = -EINVAL;
530                 goto err;
531         }
532
533         /* FIXME: move this to eal, once device flags are relocated there */
534         if (rte_eth_dev_is_detachable(port_id))
535                 goto err;
536
537         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
538                  "%s", rte_eth_devices[port_id].data->name);
539         ret = rte_eal_dev_detach(name);
540         if (ret < 0)
541                 goto err;
542
543         return 0;
544
545 err:
546         return ret;
547 }
548
549 static int
550 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
551 {
552         uint16_t old_nb_queues = dev->data->nb_rx_queues;
553         void **rxq;
554         unsigned i;
555
556         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
557                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
558                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
559                                 RTE_CACHE_LINE_SIZE);
560                 if (dev->data->rx_queues == NULL) {
561                         dev->data->nb_rx_queues = 0;
562                         return -(ENOMEM);
563                 }
564         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
565                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
566
567                 rxq = dev->data->rx_queues;
568
569                 for (i = nb_queues; i < old_nb_queues; i++)
570                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
571                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
572                                 RTE_CACHE_LINE_SIZE);
573                 if (rxq == NULL)
574                         return -(ENOMEM);
575                 if (nb_queues > old_nb_queues) {
576                         uint16_t new_qs = nb_queues - old_nb_queues;
577
578                         memset(rxq + old_nb_queues, 0,
579                                 sizeof(rxq[0]) * new_qs);
580                 }
581
582                 dev->data->rx_queues = rxq;
583
584         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
585                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
586
587                 rxq = dev->data->rx_queues;
588
589                 for (i = nb_queues; i < old_nb_queues; i++)
590                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
591         }
592         dev->data->nb_rx_queues = nb_queues;
593         return 0;
594 }
595
596 int
597 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
598 {
599         struct rte_eth_dev *dev;
600
601         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
602
603         dev = &rte_eth_devices[port_id];
604         if (rx_queue_id >= dev->data->nb_rx_queues) {
605                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
606                 return -EINVAL;
607         }
608
609         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
610
611         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
612                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
613                         " already started\n",
614                         rx_queue_id, port_id);
615                 return 0;
616         }
617
618         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
619
620 }
621
622 int
623 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
624 {
625         struct rte_eth_dev *dev;
626
627         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
628
629         dev = &rte_eth_devices[port_id];
630         if (rx_queue_id >= dev->data->nb_rx_queues) {
631                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
632                 return -EINVAL;
633         }
634
635         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
636
637         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
638                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
639                         " already stopped\n",
640                         rx_queue_id, port_id);
641                 return 0;
642         }
643
644         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
645
646 }
647
648 int
649 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
650 {
651         struct rte_eth_dev *dev;
652
653         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
654
655         dev = &rte_eth_devices[port_id];
656         if (tx_queue_id >= dev->data->nb_tx_queues) {
657                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
658                 return -EINVAL;
659         }
660
661         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
662
663         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
664                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
665                         " already started\n",
666                         tx_queue_id, port_id);
667                 return 0;
668         }
669
670         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
671
672 }
673
674 int
675 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
676 {
677         struct rte_eth_dev *dev;
678
679         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
680
681         dev = &rte_eth_devices[port_id];
682         if (tx_queue_id >= dev->data->nb_tx_queues) {
683                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
684                 return -EINVAL;
685         }
686
687         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
688
689         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
690                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
691                         " already stopped\n",
692                         tx_queue_id, port_id);
693                 return 0;
694         }
695
696         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
697
698 }
699
700 static int
701 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
702 {
703         uint16_t old_nb_queues = dev->data->nb_tx_queues;
704         void **txq;
705         unsigned i;
706
707         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
708                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
709                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
710                                                    RTE_CACHE_LINE_SIZE);
711                 if (dev->data->tx_queues == NULL) {
712                         dev->data->nb_tx_queues = 0;
713                         return -(ENOMEM);
714                 }
715         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
716                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
717
718                 txq = dev->data->tx_queues;
719
720                 for (i = nb_queues; i < old_nb_queues; i++)
721                         (*dev->dev_ops->tx_queue_release)(txq[i]);
722                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
723                                   RTE_CACHE_LINE_SIZE);
724                 if (txq == NULL)
725                         return -ENOMEM;
726                 if (nb_queues > old_nb_queues) {
727                         uint16_t new_qs = nb_queues - old_nb_queues;
728
729                         memset(txq + old_nb_queues, 0,
730                                sizeof(txq[0]) * new_qs);
731                 }
732
733                 dev->data->tx_queues = txq;
734
735         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
736                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
737
738                 txq = dev->data->tx_queues;
739
740                 for (i = nb_queues; i < old_nb_queues; i++)
741                         (*dev->dev_ops->tx_queue_release)(txq[i]);
742         }
743         dev->data->nb_tx_queues = nb_queues;
744         return 0;
745 }
746
747 uint32_t
748 rte_eth_speed_bitflag(uint32_t speed, int duplex)
749 {
750         switch (speed) {
751         case ETH_SPEED_NUM_10M:
752                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
753         case ETH_SPEED_NUM_100M:
754                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
755         case ETH_SPEED_NUM_1G:
756                 return ETH_LINK_SPEED_1G;
757         case ETH_SPEED_NUM_2_5G:
758                 return ETH_LINK_SPEED_2_5G;
759         case ETH_SPEED_NUM_5G:
760                 return ETH_LINK_SPEED_5G;
761         case ETH_SPEED_NUM_10G:
762                 return ETH_LINK_SPEED_10G;
763         case ETH_SPEED_NUM_20G:
764                 return ETH_LINK_SPEED_20G;
765         case ETH_SPEED_NUM_25G:
766                 return ETH_LINK_SPEED_25G;
767         case ETH_SPEED_NUM_40G:
768                 return ETH_LINK_SPEED_40G;
769         case ETH_SPEED_NUM_50G:
770                 return ETH_LINK_SPEED_50G;
771         case ETH_SPEED_NUM_56G:
772                 return ETH_LINK_SPEED_56G;
773         case ETH_SPEED_NUM_100G:
774                 return ETH_LINK_SPEED_100G;
775         default:
776                 return 0;
777         }
778 }
779
780 int
781 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
782                       const struct rte_eth_conf *dev_conf)
783 {
784         struct rte_eth_dev *dev;
785         struct rte_eth_dev_info dev_info;
786         int diag;
787
788         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
789
790         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
791                 RTE_PMD_DEBUG_TRACE(
792                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
793                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
794                 return -EINVAL;
795         }
796
797         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
798                 RTE_PMD_DEBUG_TRACE(
799                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
800                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
801                 return -EINVAL;
802         }
803
804         dev = &rte_eth_devices[port_id];
805
806         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
807         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
808
809         if (dev->data->dev_started) {
810                 RTE_PMD_DEBUG_TRACE(
811                     "port %d must be stopped to allow configuration\n", port_id);
812                 return -EBUSY;
813         }
814
815         /* Copy the dev_conf parameter into the dev structure */
816         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
817
818         /*
819          * Check that the numbers of RX and TX queues are not greater
820          * than the maximum number of RX and TX queues supported by the
821          * configured device.
822          */
823         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
824
825         if (nb_rx_q == 0 && nb_tx_q == 0) {
826                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
827                 return -EINVAL;
828         }
829
830         if (nb_rx_q > dev_info.max_rx_queues) {
831                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
832                                 port_id, nb_rx_q, dev_info.max_rx_queues);
833                 return -EINVAL;
834         }
835
836         if (nb_tx_q > dev_info.max_tx_queues) {
837                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
838                                 port_id, nb_tx_q, dev_info.max_tx_queues);
839                 return -EINVAL;
840         }
841
842         /*
843          * If link state interrupt is enabled, check that the
844          * device supports it.
845          */
846         if ((dev_conf->intr_conf.lsc == 1) &&
847                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
848                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
849                                         dev->data->drv_name);
850                         return -EINVAL;
851         }
852
853         /*
854          * If jumbo frames are enabled, check that the maximum RX packet
855          * length is supported by the configured device.
856          */
857         if (dev_conf->rxmode.jumbo_frame == 1) {
858                 if (dev_conf->rxmode.max_rx_pkt_len >
859                     dev_info.max_rx_pktlen) {
860                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
861                                 " > max valid value %u\n",
862                                 port_id,
863                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
864                                 (unsigned)dev_info.max_rx_pktlen);
865                         return -EINVAL;
866                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
867                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
868                                 " < min valid value %u\n",
869                                 port_id,
870                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
871                                 (unsigned)ETHER_MIN_LEN);
872                         return -EINVAL;
873                 }
874         } else {
875                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
876                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
877                         /* Use default value */
878                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
879                                                         ETHER_MAX_LEN;
880         }
881
882         /*
883          * Setup new number of RX/TX queues and reconfigure device.
884          */
885         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
886         if (diag != 0) {
887                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
888                                 port_id, diag);
889                 return diag;
890         }
891
892         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
893         if (diag != 0) {
894                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
895                                 port_id, diag);
896                 rte_eth_dev_rx_queue_config(dev, 0);
897                 return diag;
898         }
899
900         diag = (*dev->dev_ops->dev_configure)(dev);
901         if (diag != 0) {
902                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
903                                 port_id, diag);
904                 rte_eth_dev_rx_queue_config(dev, 0);
905                 rte_eth_dev_tx_queue_config(dev, 0);
906                 return diag;
907         }
908
909         return 0;
910 }
911
912 static void
913 rte_eth_dev_config_restore(uint8_t port_id)
914 {
915         struct rte_eth_dev *dev;
916         struct rte_eth_dev_info dev_info;
917         struct ether_addr addr;
918         uint16_t i;
919         uint32_t pool = 0;
920
921         dev = &rte_eth_devices[port_id];
922
923         rte_eth_dev_info_get(port_id, &dev_info);
924
925         if (RTE_ETH_DEV_SRIOV(dev).active)
926                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
927
928         /* replay MAC address configuration */
929         for (i = 0; i < dev_info.max_mac_addrs; i++) {
930                 addr = dev->data->mac_addrs[i];
931
932                 /* skip zero address */
933                 if (is_zero_ether_addr(&addr))
934                         continue;
935
936                 /* add address to the hardware */
937                 if  (*dev->dev_ops->mac_addr_add &&
938                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
939                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
940                 else {
941                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
942                                         port_id);
943                         /* exit the loop but not return an error */
944                         break;
945                 }
946         }
947
948         /* replay promiscuous configuration */
949         if (rte_eth_promiscuous_get(port_id) == 1)
950                 rte_eth_promiscuous_enable(port_id);
951         else if (rte_eth_promiscuous_get(port_id) == 0)
952                 rte_eth_promiscuous_disable(port_id);
953
954         /* replay all multicast configuration */
955         if (rte_eth_allmulticast_get(port_id) == 1)
956                 rte_eth_allmulticast_enable(port_id);
957         else if (rte_eth_allmulticast_get(port_id) == 0)
958                 rte_eth_allmulticast_disable(port_id);
959 }
960
961 int
962 rte_eth_dev_start(uint8_t port_id)
963 {
964         struct rte_eth_dev *dev;
965         int diag;
966
967         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
968
969         dev = &rte_eth_devices[port_id];
970
971         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
972
973         if (dev->data->dev_started != 0) {
974                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
975                         " already started\n",
976                         port_id);
977                 return 0;
978         }
979
980         diag = (*dev->dev_ops->dev_start)(dev);
981         if (diag == 0)
982                 dev->data->dev_started = 1;
983         else
984                 return diag;
985
986         rte_eth_dev_config_restore(port_id);
987
988         if (dev->data->dev_conf.intr_conf.lsc == 0) {
989                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
990                 (*dev->dev_ops->link_update)(dev, 0);
991         }
992         return 0;
993 }
994
995 void
996 rte_eth_dev_stop(uint8_t port_id)
997 {
998         struct rte_eth_dev *dev;
999
1000         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1001         dev = &rte_eth_devices[port_id];
1002
1003         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1004
1005         if (dev->data->dev_started == 0) {
1006                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1007                         " already stopped\n",
1008                         port_id);
1009                 return;
1010         }
1011
1012         dev->data->dev_started = 0;
1013         (*dev->dev_ops->dev_stop)(dev);
1014 }
1015
1016 int
1017 rte_eth_dev_set_link_up(uint8_t port_id)
1018 {
1019         struct rte_eth_dev *dev;
1020
1021         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1022
1023         dev = &rte_eth_devices[port_id];
1024
1025         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1026         return (*dev->dev_ops->dev_set_link_up)(dev);
1027 }
1028
1029 int
1030 rte_eth_dev_set_link_down(uint8_t port_id)
1031 {
1032         struct rte_eth_dev *dev;
1033
1034         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1035
1036         dev = &rte_eth_devices[port_id];
1037
1038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1039         return (*dev->dev_ops->dev_set_link_down)(dev);
1040 }
1041
1042 void
1043 rte_eth_dev_close(uint8_t port_id)
1044 {
1045         struct rte_eth_dev *dev;
1046
1047         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1048         dev = &rte_eth_devices[port_id];
1049
1050         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1051         dev->data->dev_started = 0;
1052         (*dev->dev_ops->dev_close)(dev);
1053
1054         dev->data->nb_rx_queues = 0;
1055         rte_free(dev->data->rx_queues);
1056         dev->data->rx_queues = NULL;
1057         dev->data->nb_tx_queues = 0;
1058         rte_free(dev->data->tx_queues);
1059         dev->data->tx_queues = NULL;
1060 }
1061
1062 int
1063 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1064                        uint16_t nb_rx_desc, unsigned int socket_id,
1065                        const struct rte_eth_rxconf *rx_conf,
1066                        struct rte_mempool *mp)
1067 {
1068         int ret;
1069         uint32_t mbp_buf_size;
1070         struct rte_eth_dev *dev;
1071         struct rte_eth_dev_info dev_info;
1072
1073         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1074
1075         dev = &rte_eth_devices[port_id];
1076         if (rx_queue_id >= dev->data->nb_rx_queues) {
1077                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1078                 return -EINVAL;
1079         }
1080
1081         if (dev->data->dev_started) {
1082                 RTE_PMD_DEBUG_TRACE(
1083                     "port %d must be stopped to allow configuration\n", port_id);
1084                 return -EBUSY;
1085         }
1086
1087         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1088         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1089
1090         /*
1091          * Check the size of the mbuf data buffer.
1092          * This value must be provided in the private data of the memory pool.
1093          * First check that the memory pool has a valid private data.
1094          */
1095         rte_eth_dev_info_get(port_id, &dev_info);
1096         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1097                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1098                                 mp->name, (int) mp->private_data_size,
1099                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1100                 return -ENOSPC;
1101         }
1102         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1103
1104         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1105                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1106                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1107                                 "=%d)\n",
1108                                 mp->name,
1109                                 (int)mbp_buf_size,
1110                                 (int)(RTE_PKTMBUF_HEADROOM +
1111                                       dev_info.min_rx_bufsize),
1112                                 (int)RTE_PKTMBUF_HEADROOM,
1113                                 (int)dev_info.min_rx_bufsize);
1114                 return -EINVAL;
1115         }
1116
1117         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1118                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1119                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1120
1121                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1122                         "should be: <= %hu, = %hu, and a product of %hu\n",
1123                         nb_rx_desc,
1124                         dev_info.rx_desc_lim.nb_max,
1125                         dev_info.rx_desc_lim.nb_min,
1126                         dev_info.rx_desc_lim.nb_align);
1127                 return -EINVAL;
1128         }
1129
1130         if (rx_conf == NULL)
1131                 rx_conf = &dev_info.default_rxconf;
1132
1133         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1134                                               socket_id, rx_conf, mp);
1135         if (!ret) {
1136                 if (!dev->data->min_rx_buf_size ||
1137                     dev->data->min_rx_buf_size > mbp_buf_size)
1138                         dev->data->min_rx_buf_size = mbp_buf_size;
1139         }
1140
1141         return ret;
1142 }
1143
1144 int
1145 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1146                        uint16_t nb_tx_desc, unsigned int socket_id,
1147                        const struct rte_eth_txconf *tx_conf)
1148 {
1149         struct rte_eth_dev *dev;
1150         struct rte_eth_dev_info dev_info;
1151
1152         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1153
1154         dev = &rte_eth_devices[port_id];
1155         if (tx_queue_id >= dev->data->nb_tx_queues) {
1156                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1157                 return -EINVAL;
1158         }
1159
1160         if (dev->data->dev_started) {
1161                 RTE_PMD_DEBUG_TRACE(
1162                     "port %d must be stopped to allow configuration\n", port_id);
1163                 return -EBUSY;
1164         }
1165
1166         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1167         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1168
1169         rte_eth_dev_info_get(port_id, &dev_info);
1170
1171         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1172             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1173             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1174                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1175                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1176                                 nb_tx_desc,
1177                                 dev_info.tx_desc_lim.nb_max,
1178                                 dev_info.tx_desc_lim.nb_min,
1179                                 dev_info.tx_desc_lim.nb_align);
1180                 return -EINVAL;
1181         }
1182
1183         if (tx_conf == NULL)
1184                 tx_conf = &dev_info.default_txconf;
1185
1186         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1187                                                socket_id, tx_conf);
1188 }
1189
1190 void
1191 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1192                 void *userdata __rte_unused)
1193 {
1194         unsigned i;
1195
1196         for (i = 0; i < unsent; i++)
1197                 rte_pktmbuf_free(pkts[i]);
1198 }
1199
1200 void
1201 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1202                 void *userdata)
1203 {
1204         uint64_t *count = userdata;
1205         unsigned i;
1206
1207         for (i = 0; i < unsent; i++)
1208                 rte_pktmbuf_free(pkts[i]);
1209
1210         *count += unsent;
1211 }
1212
1213 int
1214 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1215                 buffer_tx_error_fn cbfn, void *userdata)
1216 {
1217         buffer->error_callback = cbfn;
1218         buffer->error_userdata = userdata;
1219         return 0;
1220 }
1221
1222 int
1223 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1224 {
1225         int ret = 0;
1226
1227         if (buffer == NULL)
1228                 return -EINVAL;
1229
1230         buffer->size = size;
1231         if (buffer->error_callback == NULL) {
1232                 ret = rte_eth_tx_buffer_set_err_callback(
1233                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1234         }
1235
1236         return ret;
1237 }
1238
1239 void
1240 rte_eth_promiscuous_enable(uint8_t port_id)
1241 {
1242         struct rte_eth_dev *dev;
1243
1244         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1245         dev = &rte_eth_devices[port_id];
1246
1247         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1248         (*dev->dev_ops->promiscuous_enable)(dev);
1249         dev->data->promiscuous = 1;
1250 }
1251
1252 void
1253 rte_eth_promiscuous_disable(uint8_t port_id)
1254 {
1255         struct rte_eth_dev *dev;
1256
1257         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1258         dev = &rte_eth_devices[port_id];
1259
1260         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1261         dev->data->promiscuous = 0;
1262         (*dev->dev_ops->promiscuous_disable)(dev);
1263 }
1264
1265 int
1266 rte_eth_promiscuous_get(uint8_t port_id)
1267 {
1268         struct rte_eth_dev *dev;
1269
1270         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1271
1272         dev = &rte_eth_devices[port_id];
1273         return dev->data->promiscuous;
1274 }
1275
1276 void
1277 rte_eth_allmulticast_enable(uint8_t port_id)
1278 {
1279         struct rte_eth_dev *dev;
1280
1281         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1282         dev = &rte_eth_devices[port_id];
1283
1284         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1285         (*dev->dev_ops->allmulticast_enable)(dev);
1286         dev->data->all_multicast = 1;
1287 }
1288
1289 void
1290 rte_eth_allmulticast_disable(uint8_t port_id)
1291 {
1292         struct rte_eth_dev *dev;
1293
1294         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1295         dev = &rte_eth_devices[port_id];
1296
1297         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1298         dev->data->all_multicast = 0;
1299         (*dev->dev_ops->allmulticast_disable)(dev);
1300 }
1301
1302 int
1303 rte_eth_allmulticast_get(uint8_t port_id)
1304 {
1305         struct rte_eth_dev *dev;
1306
1307         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1308
1309         dev = &rte_eth_devices[port_id];
1310         return dev->data->all_multicast;
1311 }
1312
1313 static inline int
1314 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1315                                 struct rte_eth_link *link)
1316 {
1317         struct rte_eth_link *dst = link;
1318         struct rte_eth_link *src = &(dev->data->dev_link);
1319
1320         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1321                                         *(uint64_t *)src) == 0)
1322                 return -1;
1323
1324         return 0;
1325 }
1326
1327 void
1328 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1329 {
1330         struct rte_eth_dev *dev;
1331
1332         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1333         dev = &rte_eth_devices[port_id];
1334
1335         if (dev->data->dev_conf.intr_conf.lsc != 0)
1336                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1337         else {
1338                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1339                 (*dev->dev_ops->link_update)(dev, 1);
1340                 *eth_link = dev->data->dev_link;
1341         }
1342 }
1343
1344 void
1345 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1346 {
1347         struct rte_eth_dev *dev;
1348
1349         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1350         dev = &rte_eth_devices[port_id];
1351
1352         if (dev->data->dev_conf.intr_conf.lsc != 0)
1353                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1354         else {
1355                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1356                 (*dev->dev_ops->link_update)(dev, 0);
1357                 *eth_link = dev->data->dev_link;
1358         }
1359 }
1360
1361 int
1362 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1363 {
1364         struct rte_eth_dev *dev;
1365
1366         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1367
1368         dev = &rte_eth_devices[port_id];
1369         memset(stats, 0, sizeof(*stats));
1370
1371         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1372         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1373         (*dev->dev_ops->stats_get)(dev, stats);
1374         return 0;
1375 }
1376
1377 void
1378 rte_eth_stats_reset(uint8_t port_id)
1379 {
1380         struct rte_eth_dev *dev;
1381
1382         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1383         dev = &rte_eth_devices[port_id];
1384
1385         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1386         (*dev->dev_ops->stats_reset)(dev);
1387         dev->data->rx_mbuf_alloc_failed = 0;
1388 }
1389
1390 static int
1391 get_xstats_count(uint8_t port_id)
1392 {
1393         struct rte_eth_dev *dev;
1394         int count;
1395
1396         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1397         dev = &rte_eth_devices[port_id];
1398         if (dev->dev_ops->xstats_get_names != NULL) {
1399                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1400                 if (count < 0)
1401                         return count;
1402         } else
1403                 count = 0;
1404         count += RTE_NB_STATS;
1405         count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1406                  RTE_NB_RXQ_STATS;
1407         count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1408                  RTE_NB_TXQ_STATS;
1409         return count;
1410 }
1411
1412 int
1413 rte_eth_xstats_get_names(uint8_t port_id,
1414         struct rte_eth_xstat_name *xstats_names,
1415         unsigned size)
1416 {
1417         struct rte_eth_dev *dev;
1418         int cnt_used_entries;
1419         int cnt_expected_entries;
1420         int cnt_driver_entries;
1421         uint32_t idx, id_queue;
1422         uint16_t num_q;
1423
1424         cnt_expected_entries = get_xstats_count(port_id);
1425         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1426                         (int)size < cnt_expected_entries)
1427                 return cnt_expected_entries;
1428
1429         /* port_id checked in get_xstats_count() */
1430         dev = &rte_eth_devices[port_id];
1431         cnt_used_entries = 0;
1432
1433         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1434                 snprintf(xstats_names[cnt_used_entries].name,
1435                         sizeof(xstats_names[0].name),
1436                         "%s", rte_stats_strings[idx].name);
1437                 cnt_used_entries++;
1438         }
1439         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1440         for (id_queue = 0; id_queue < num_q; id_queue++) {
1441                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1442                         snprintf(xstats_names[cnt_used_entries].name,
1443                                 sizeof(xstats_names[0].name),
1444                                 "rx_q%u%s",
1445                                 id_queue, rte_rxq_stats_strings[idx].name);
1446                         cnt_used_entries++;
1447                 }
1448
1449         }
1450         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1451         for (id_queue = 0; id_queue < num_q; id_queue++) {
1452                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1453                         snprintf(xstats_names[cnt_used_entries].name,
1454                                 sizeof(xstats_names[0].name),
1455                                 "tx_q%u%s",
1456                                 id_queue, rte_txq_stats_strings[idx].name);
1457                         cnt_used_entries++;
1458                 }
1459         }
1460
1461         if (dev->dev_ops->xstats_get_names != NULL) {
1462                 /* If there are any driver-specific xstats, append them
1463                  * to end of list.
1464                  */
1465                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1466                         dev,
1467                         xstats_names + cnt_used_entries,
1468                         size - cnt_used_entries);
1469                 if (cnt_driver_entries < 0)
1470                         return cnt_driver_entries;
1471                 cnt_used_entries += cnt_driver_entries;
1472         }
1473
1474         return cnt_used_entries;
1475 }
1476
1477 /* retrieve ethdev extended statistics */
1478 int
1479 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1480         unsigned n)
1481 {
1482         struct rte_eth_stats eth_stats;
1483         struct rte_eth_dev *dev;
1484         unsigned count = 0, i, q;
1485         signed xcount = 0;
1486         uint64_t val, *stats_ptr;
1487         uint16_t nb_rxqs, nb_txqs;
1488
1489         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1490
1491         dev = &rte_eth_devices[port_id];
1492
1493         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1494         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1495
1496         /* Return generic statistics */
1497         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1498                 (nb_txqs * RTE_NB_TXQ_STATS);
1499
1500         /* implemented by the driver */
1501         if (dev->dev_ops->xstats_get != NULL) {
1502                 /* Retrieve the xstats from the driver at the end of the
1503                  * xstats struct.
1504                  */
1505                 xcount = (*dev->dev_ops->xstats_get)(dev,
1506                                      xstats ? xstats + count : NULL,
1507                                      (n > count) ? n - count : 0);
1508
1509                 if (xcount < 0)
1510                         return xcount;
1511         }
1512
1513         if (n < count + xcount || xstats == NULL)
1514                 return count + xcount;
1515
1516         /* now fill the xstats structure */
1517         count = 0;
1518         rte_eth_stats_get(port_id, &eth_stats);
1519
1520         /* global stats */
1521         for (i = 0; i < RTE_NB_STATS; i++) {
1522                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1523                                         rte_stats_strings[i].offset);
1524                 val = *stats_ptr;
1525                 xstats[count++].value = val;
1526         }
1527
1528         /* per-rxq stats */
1529         for (q = 0; q < nb_rxqs; q++) {
1530                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1531                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1532                                         rte_rxq_stats_strings[i].offset +
1533                                         q * sizeof(uint64_t));
1534                         val = *stats_ptr;
1535                         xstats[count++].value = val;
1536                 }
1537         }
1538
1539         /* per-txq stats */
1540         for (q = 0; q < nb_txqs; q++) {
1541                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1542                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1543                                         rte_txq_stats_strings[i].offset +
1544                                         q * sizeof(uint64_t));
1545                         val = *stats_ptr;
1546                         xstats[count++].value = val;
1547                 }
1548         }
1549
1550         for (i = 0; i < count; i++)
1551                 xstats[i].id = i;
1552         /* add an offset to driver-specific stats */
1553         for ( ; i < count + xcount; i++)
1554                 xstats[i].id += count;
1555
1556         return count + xcount;
1557 }
1558
1559 /* reset ethdev extended statistics */
1560 void
1561 rte_eth_xstats_reset(uint8_t port_id)
1562 {
1563         struct rte_eth_dev *dev;
1564
1565         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1566         dev = &rte_eth_devices[port_id];
1567
1568         /* implemented by the driver */
1569         if (dev->dev_ops->xstats_reset != NULL) {
1570                 (*dev->dev_ops->xstats_reset)(dev);
1571                 return;
1572         }
1573
1574         /* fallback to default */
1575         rte_eth_stats_reset(port_id);
1576 }
1577
1578 static int
1579 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1580                 uint8_t is_rx)
1581 {
1582         struct rte_eth_dev *dev;
1583
1584         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1585
1586         dev = &rte_eth_devices[port_id];
1587
1588         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1589         return (*dev->dev_ops->queue_stats_mapping_set)
1590                         (dev, queue_id, stat_idx, is_rx);
1591 }
1592
1593
1594 int
1595 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1596                 uint8_t stat_idx)
1597 {
1598         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1599                         STAT_QMAP_TX);
1600 }
1601
1602
1603 int
1604 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1605                 uint8_t stat_idx)
1606 {
1607         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1608                         STAT_QMAP_RX);
1609 }
1610
1611 void
1612 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1613 {
1614         struct rte_eth_dev *dev;
1615         const struct rte_eth_desc_lim lim = {
1616                 .nb_max = UINT16_MAX,
1617                 .nb_min = 0,
1618                 .nb_align = 1,
1619         };
1620
1621         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1622         dev = &rte_eth_devices[port_id];
1623
1624         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1625         dev_info->rx_desc_lim = lim;
1626         dev_info->tx_desc_lim = lim;
1627
1628         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1629         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1630         dev_info->pci_dev = dev->pci_dev;
1631         dev_info->driver_name = dev->data->drv_name;
1632         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1633         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1634 }
1635
1636 int
1637 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1638                                  uint32_t *ptypes, int num)
1639 {
1640         int i, j;
1641         struct rte_eth_dev *dev;
1642         const uint32_t *all_ptypes;
1643
1644         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1645         dev = &rte_eth_devices[port_id];
1646         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1647         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1648
1649         if (!all_ptypes)
1650                 return 0;
1651
1652         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1653                 if (all_ptypes[i] & ptype_mask) {
1654                         if (j < num)
1655                                 ptypes[j] = all_ptypes[i];
1656                         j++;
1657                 }
1658
1659         return j;
1660 }
1661
1662 void
1663 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1664 {
1665         struct rte_eth_dev *dev;
1666
1667         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1668         dev = &rte_eth_devices[port_id];
1669         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1670 }
1671
1672
1673 int
1674 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1675 {
1676         struct rte_eth_dev *dev;
1677
1678         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1679
1680         dev = &rte_eth_devices[port_id];
1681         *mtu = dev->data->mtu;
1682         return 0;
1683 }
1684
1685 int
1686 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1687 {
1688         int ret;
1689         struct rte_eth_dev *dev;
1690
1691         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1692         dev = &rte_eth_devices[port_id];
1693         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1694
1695         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1696         if (!ret)
1697                 dev->data->mtu = mtu;
1698
1699         return ret;
1700 }
1701
1702 int
1703 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1704 {
1705         struct rte_eth_dev *dev;
1706
1707         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1708         dev = &rte_eth_devices[port_id];
1709         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1710                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1711                 return -ENOSYS;
1712         }
1713
1714         if (vlan_id > 4095) {
1715                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1716                                 port_id, (unsigned) vlan_id);
1717                 return -EINVAL;
1718         }
1719         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1720
1721         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1722 }
1723
1724 int
1725 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1726 {
1727         struct rte_eth_dev *dev;
1728
1729         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1730         dev = &rte_eth_devices[port_id];
1731         if (rx_queue_id >= dev->data->nb_rx_queues) {
1732                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1733                 return -EINVAL;
1734         }
1735
1736         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1737         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1738
1739         return 0;
1740 }
1741
1742 int
1743 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
1744                                 enum rte_vlan_type vlan_type,
1745                                 uint16_t tpid)
1746 {
1747         struct rte_eth_dev *dev;
1748
1749         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1750         dev = &rte_eth_devices[port_id];
1751         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1752
1753         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
1754 }
1755
1756 int
1757 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1758 {
1759         struct rte_eth_dev *dev;
1760         int ret = 0;
1761         int mask = 0;
1762         int cur, org = 0;
1763
1764         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1765         dev = &rte_eth_devices[port_id];
1766
1767         /*check which option changed by application*/
1768         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1769         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1770         if (cur != org) {
1771                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1772                 mask |= ETH_VLAN_STRIP_MASK;
1773         }
1774
1775         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1776         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1777         if (cur != org) {
1778                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1779                 mask |= ETH_VLAN_FILTER_MASK;
1780         }
1781
1782         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1783         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1784         if (cur != org) {
1785                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1786                 mask |= ETH_VLAN_EXTEND_MASK;
1787         }
1788
1789         /*no change*/
1790         if (mask == 0)
1791                 return ret;
1792
1793         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1794         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1795
1796         return ret;
1797 }
1798
1799 int
1800 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1801 {
1802         struct rte_eth_dev *dev;
1803         int ret = 0;
1804
1805         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1806         dev = &rte_eth_devices[port_id];
1807
1808         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1809                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1810
1811         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1812                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1813
1814         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1815                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1816
1817         return ret;
1818 }
1819
1820 int
1821 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1822 {
1823         struct rte_eth_dev *dev;
1824
1825         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1826         dev = &rte_eth_devices[port_id];
1827         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1828         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1829
1830         return 0;
1831 }
1832
1833 int
1834 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1835 {
1836         struct rte_eth_dev *dev;
1837
1838         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1839         dev = &rte_eth_devices[port_id];
1840         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1841         memset(fc_conf, 0, sizeof(*fc_conf));
1842         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1843 }
1844
1845 int
1846 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1847 {
1848         struct rte_eth_dev *dev;
1849
1850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1851         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1852                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1853                 return -EINVAL;
1854         }
1855
1856         dev = &rte_eth_devices[port_id];
1857         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1858         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1859 }
1860
1861 int
1862 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1863 {
1864         struct rte_eth_dev *dev;
1865
1866         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1867         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1868                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1869                 return -EINVAL;
1870         }
1871
1872         dev = &rte_eth_devices[port_id];
1873         /* High water, low water validation are device specific */
1874         if  (*dev->dev_ops->priority_flow_ctrl_set)
1875                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1876         return -ENOTSUP;
1877 }
1878
1879 static int
1880 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1881                         uint16_t reta_size)
1882 {
1883         uint16_t i, num;
1884
1885         if (!reta_conf)
1886                 return -EINVAL;
1887
1888         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1889                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1890                                                         RTE_RETA_GROUP_SIZE);
1891                 return -EINVAL;
1892         }
1893
1894         num = reta_size / RTE_RETA_GROUP_SIZE;
1895         for (i = 0; i < num; i++) {
1896                 if (reta_conf[i].mask)
1897                         return 0;
1898         }
1899
1900         return -EINVAL;
1901 }
1902
1903 static int
1904 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1905                          uint16_t reta_size,
1906                          uint16_t max_rxq)
1907 {
1908         uint16_t i, idx, shift;
1909
1910         if (!reta_conf)
1911                 return -EINVAL;
1912
1913         if (max_rxq == 0) {
1914                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1915                 return -EINVAL;
1916         }
1917
1918         for (i = 0; i < reta_size; i++) {
1919                 idx = i / RTE_RETA_GROUP_SIZE;
1920                 shift = i % RTE_RETA_GROUP_SIZE;
1921                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1922                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1923                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1924                                 "the maximum rxq index: %u\n", idx, shift,
1925                                 reta_conf[idx].reta[shift], max_rxq);
1926                         return -EINVAL;
1927                 }
1928         }
1929
1930         return 0;
1931 }
1932
1933 int
1934 rte_eth_dev_rss_reta_update(uint8_t port_id,
1935                             struct rte_eth_rss_reta_entry64 *reta_conf,
1936                             uint16_t reta_size)
1937 {
1938         struct rte_eth_dev *dev;
1939         int ret;
1940
1941         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1942         /* Check mask bits */
1943         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1944         if (ret < 0)
1945                 return ret;
1946
1947         dev = &rte_eth_devices[port_id];
1948
1949         /* Check entry value */
1950         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1951                                 dev->data->nb_rx_queues);
1952         if (ret < 0)
1953                 return ret;
1954
1955         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1956         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1957 }
1958
1959 int
1960 rte_eth_dev_rss_reta_query(uint8_t port_id,
1961                            struct rte_eth_rss_reta_entry64 *reta_conf,
1962                            uint16_t reta_size)
1963 {
1964         struct rte_eth_dev *dev;
1965         int ret;
1966
1967         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1968
1969         /* Check mask bits */
1970         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1971         if (ret < 0)
1972                 return ret;
1973
1974         dev = &rte_eth_devices[port_id];
1975         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1976         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1977 }
1978
1979 int
1980 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1981 {
1982         struct rte_eth_dev *dev;
1983         uint16_t rss_hash_protos;
1984
1985         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1986         rss_hash_protos = rss_conf->rss_hf;
1987         if ((rss_hash_protos != 0) &&
1988             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1989                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1990                                 rss_hash_protos);
1991                 return -EINVAL;
1992         }
1993         dev = &rte_eth_devices[port_id];
1994         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1995         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1996 }
1997
1998 int
1999 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
2000                               struct rte_eth_rss_conf *rss_conf)
2001 {
2002         struct rte_eth_dev *dev;
2003
2004         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2005         dev = &rte_eth_devices[port_id];
2006         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2007         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2008 }
2009
2010 int
2011 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
2012                                 struct rte_eth_udp_tunnel *udp_tunnel)
2013 {
2014         struct rte_eth_dev *dev;
2015
2016         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2017         if (udp_tunnel == NULL) {
2018                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2019                 return -EINVAL;
2020         }
2021
2022         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2023                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2024                 return -EINVAL;
2025         }
2026
2027         dev = &rte_eth_devices[port_id];
2028         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2029         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2030 }
2031
2032 int
2033 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
2034                                    struct rte_eth_udp_tunnel *udp_tunnel)
2035 {
2036         struct rte_eth_dev *dev;
2037
2038         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2039         dev = &rte_eth_devices[port_id];
2040
2041         if (udp_tunnel == NULL) {
2042                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2043                 return -EINVAL;
2044         }
2045
2046         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2047                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2048                 return -EINVAL;
2049         }
2050
2051         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2052         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2053 }
2054
2055 int
2056 rte_eth_led_on(uint8_t port_id)
2057 {
2058         struct rte_eth_dev *dev;
2059
2060         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2061         dev = &rte_eth_devices[port_id];
2062         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2063         return (*dev->dev_ops->dev_led_on)(dev);
2064 }
2065
2066 int
2067 rte_eth_led_off(uint8_t port_id)
2068 {
2069         struct rte_eth_dev *dev;
2070
2071         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2072         dev = &rte_eth_devices[port_id];
2073         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2074         return (*dev->dev_ops->dev_led_off)(dev);
2075 }
2076
2077 /*
2078  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2079  * an empty spot.
2080  */
2081 static int
2082 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2083 {
2084         struct rte_eth_dev_info dev_info;
2085         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2086         unsigned i;
2087
2088         rte_eth_dev_info_get(port_id, &dev_info);
2089
2090         for (i = 0; i < dev_info.max_mac_addrs; i++)
2091                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2092                         return i;
2093
2094         return -1;
2095 }
2096
2097 static const struct ether_addr null_mac_addr;
2098
2099 int
2100 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2101                         uint32_t pool)
2102 {
2103         struct rte_eth_dev *dev;
2104         int index;
2105         uint64_t pool_mask;
2106
2107         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2108         dev = &rte_eth_devices[port_id];
2109         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2110
2111         if (is_zero_ether_addr(addr)) {
2112                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2113                         port_id);
2114                 return -EINVAL;
2115         }
2116         if (pool >= ETH_64_POOLS) {
2117                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2118                 return -EINVAL;
2119         }
2120
2121         index = get_mac_addr_index(port_id, addr);
2122         if (index < 0) {
2123                 index = get_mac_addr_index(port_id, &null_mac_addr);
2124                 if (index < 0) {
2125                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2126                                 port_id);
2127                         return -ENOSPC;
2128                 }
2129         } else {
2130                 pool_mask = dev->data->mac_pool_sel[index];
2131
2132                 /* Check if both MAC address and pool is already there, and do nothing */
2133                 if (pool_mask & (1ULL << pool))
2134                         return 0;
2135         }
2136
2137         /* Update NIC */
2138         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2139
2140         /* Update address in NIC data structure */
2141         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2142
2143         /* Update pool bitmap in NIC data structure */
2144         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2145
2146         return 0;
2147 }
2148
2149 int
2150 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2151 {
2152         struct rte_eth_dev *dev;
2153         int index;
2154
2155         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2156         dev = &rte_eth_devices[port_id];
2157         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2158
2159         index = get_mac_addr_index(port_id, addr);
2160         if (index == 0) {
2161                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2162                 return -EADDRINUSE;
2163         } else if (index < 0)
2164                 return 0;  /* Do nothing if address wasn't found */
2165
2166         /* Update NIC */
2167         (*dev->dev_ops->mac_addr_remove)(dev, index);
2168
2169         /* Update address in NIC data structure */
2170         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2171
2172         /* reset pool bitmap */
2173         dev->data->mac_pool_sel[index] = 0;
2174
2175         return 0;
2176 }
2177
2178 int
2179 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2180 {
2181         struct rte_eth_dev *dev;
2182
2183         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2184
2185         if (!is_valid_assigned_ether_addr(addr))
2186                 return -EINVAL;
2187
2188         dev = &rte_eth_devices[port_id];
2189         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2190
2191         /* Update default address in NIC data structure */
2192         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2193
2194         (*dev->dev_ops->mac_addr_set)(dev, addr);
2195
2196         return 0;
2197 }
2198
2199 int
2200 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2201                                 uint16_t rx_mode, uint8_t on)
2202 {
2203         uint16_t num_vfs;
2204         struct rte_eth_dev *dev;
2205         struct rte_eth_dev_info dev_info;
2206
2207         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2208
2209         dev = &rte_eth_devices[port_id];
2210         rte_eth_dev_info_get(port_id, &dev_info);
2211
2212         num_vfs = dev_info.max_vfs;
2213         if (vf > num_vfs) {
2214                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2215                 return -EINVAL;
2216         }
2217
2218         if (rx_mode == 0) {
2219                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2220                 return -EINVAL;
2221         }
2222         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2223         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2224 }
2225
2226 /*
2227  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2228  * an empty spot.
2229  */
2230 static int
2231 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2232 {
2233         struct rte_eth_dev_info dev_info;
2234         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2235         unsigned i;
2236
2237         rte_eth_dev_info_get(port_id, &dev_info);
2238         if (!dev->data->hash_mac_addrs)
2239                 return -1;
2240
2241         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2242                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2243                         ETHER_ADDR_LEN) == 0)
2244                         return i;
2245
2246         return -1;
2247 }
2248
2249 int
2250 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2251                                 uint8_t on)
2252 {
2253         int index;
2254         int ret;
2255         struct rte_eth_dev *dev;
2256
2257         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2258
2259         dev = &rte_eth_devices[port_id];
2260         if (is_zero_ether_addr(addr)) {
2261                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2262                         port_id);
2263                 return -EINVAL;
2264         }
2265
2266         index = get_hash_mac_addr_index(port_id, addr);
2267         /* Check if it's already there, and do nothing */
2268         if ((index >= 0) && (on))
2269                 return 0;
2270
2271         if (index < 0) {
2272                 if (!on) {
2273                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2274                                 "set in UTA\n", port_id);
2275                         return -EINVAL;
2276                 }
2277
2278                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2279                 if (index < 0) {
2280                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2281                                         port_id);
2282                         return -ENOSPC;
2283                 }
2284         }
2285
2286         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2287         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2288         if (ret == 0) {
2289                 /* Update address in NIC data structure */
2290                 if (on)
2291                         ether_addr_copy(addr,
2292                                         &dev->data->hash_mac_addrs[index]);
2293                 else
2294                         ether_addr_copy(&null_mac_addr,
2295                                         &dev->data->hash_mac_addrs[index]);
2296         }
2297
2298         return ret;
2299 }
2300
2301 int
2302 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2303 {
2304         struct rte_eth_dev *dev;
2305
2306         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2307
2308         dev = &rte_eth_devices[port_id];
2309
2310         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2311         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2312 }
2313
2314 int
2315 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2316 {
2317         uint16_t num_vfs;
2318         struct rte_eth_dev *dev;
2319         struct rte_eth_dev_info dev_info;
2320
2321         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2322
2323         dev = &rte_eth_devices[port_id];
2324         rte_eth_dev_info_get(port_id, &dev_info);
2325
2326         num_vfs = dev_info.max_vfs;
2327         if (vf > num_vfs) {
2328                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2329                 return -EINVAL;
2330         }
2331
2332         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2333         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2334 }
2335
2336 int
2337 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2338 {
2339         uint16_t num_vfs;
2340         struct rte_eth_dev *dev;
2341         struct rte_eth_dev_info dev_info;
2342
2343         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2344
2345         dev = &rte_eth_devices[port_id];
2346         rte_eth_dev_info_get(port_id, &dev_info);
2347
2348         num_vfs = dev_info.max_vfs;
2349         if (vf > num_vfs) {
2350                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2351                 return -EINVAL;
2352         }
2353
2354         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2355         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2356 }
2357
2358 int
2359 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2360                                uint64_t vf_mask, uint8_t vlan_on)
2361 {
2362         struct rte_eth_dev *dev;
2363
2364         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2365
2366         dev = &rte_eth_devices[port_id];
2367
2368         if (vlan_id > ETHER_MAX_VLAN_ID) {
2369                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2370                         vlan_id);
2371                 return -EINVAL;
2372         }
2373
2374         if (vf_mask == 0) {
2375                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2376                 return -EINVAL;
2377         }
2378
2379         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2380         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2381                                                    vf_mask, vlan_on);
2382 }
2383
2384 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2385                                         uint16_t tx_rate)
2386 {
2387         struct rte_eth_dev *dev;
2388         struct rte_eth_dev_info dev_info;
2389         struct rte_eth_link link;
2390
2391         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2392
2393         dev = &rte_eth_devices[port_id];
2394         rte_eth_dev_info_get(port_id, &dev_info);
2395         link = dev->data->dev_link;
2396
2397         if (queue_idx > dev_info.max_tx_queues) {
2398                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2399                                 "invalid queue id=%d\n", port_id, queue_idx);
2400                 return -EINVAL;
2401         }
2402
2403         if (tx_rate > link.link_speed) {
2404                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2405                                 "bigger than link speed= %d\n",
2406                         tx_rate, link.link_speed);
2407                 return -EINVAL;
2408         }
2409
2410         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2411         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2412 }
2413
2414 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2415                                 uint64_t q_msk)
2416 {
2417         struct rte_eth_dev *dev;
2418         struct rte_eth_dev_info dev_info;
2419         struct rte_eth_link link;
2420
2421         if (q_msk == 0)
2422                 return 0;
2423
2424         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2425
2426         dev = &rte_eth_devices[port_id];
2427         rte_eth_dev_info_get(port_id, &dev_info);
2428         link = dev->data->dev_link;
2429
2430         if (vf > dev_info.max_vfs) {
2431                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2432                                 "invalid vf id=%d\n", port_id, vf);
2433                 return -EINVAL;
2434         }
2435
2436         if (tx_rate > link.link_speed) {
2437                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2438                                 "bigger than link speed= %d\n",
2439                                 tx_rate, link.link_speed);
2440                 return -EINVAL;
2441         }
2442
2443         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2444         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2445 }
2446
2447 int
2448 rte_eth_mirror_rule_set(uint8_t port_id,
2449                         struct rte_eth_mirror_conf *mirror_conf,
2450                         uint8_t rule_id, uint8_t on)
2451 {
2452         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2453
2454         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2455         if (mirror_conf->rule_type == 0) {
2456                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2457                 return -EINVAL;
2458         }
2459
2460         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2461                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2462                                 ETH_64_POOLS - 1);
2463                 return -EINVAL;
2464         }
2465
2466         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2467              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2468             (mirror_conf->pool_mask == 0)) {
2469                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2470                 return -EINVAL;
2471         }
2472
2473         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2474             mirror_conf->vlan.vlan_mask == 0) {
2475                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2476                 return -EINVAL;
2477         }
2478
2479         dev = &rte_eth_devices[port_id];
2480         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2481
2482         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2483 }
2484
2485 int
2486 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2487 {
2488         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2489
2490         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2491
2492         dev = &rte_eth_devices[port_id];
2493         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2494
2495         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2496 }
2497
2498 int
2499 rte_eth_dev_callback_register(uint8_t port_id,
2500                         enum rte_eth_event_type event,
2501                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2502 {
2503         struct rte_eth_dev *dev;
2504         struct rte_eth_dev_callback *user_cb;
2505
2506         if (!cb_fn)
2507                 return -EINVAL;
2508
2509         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2510
2511         dev = &rte_eth_devices[port_id];
2512         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2513
2514         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2515                 if (user_cb->cb_fn == cb_fn &&
2516                         user_cb->cb_arg == cb_arg &&
2517                         user_cb->event == event) {
2518                         break;
2519                 }
2520         }
2521
2522         /* create a new callback. */
2523         if (user_cb == NULL) {
2524                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2525                                         sizeof(struct rte_eth_dev_callback), 0);
2526                 if (user_cb != NULL) {
2527                         user_cb->cb_fn = cb_fn;
2528                         user_cb->cb_arg = cb_arg;
2529                         user_cb->event = event;
2530                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2531                 }
2532         }
2533
2534         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2535         return (user_cb == NULL) ? -ENOMEM : 0;
2536 }
2537
2538 int
2539 rte_eth_dev_callback_unregister(uint8_t port_id,
2540                         enum rte_eth_event_type event,
2541                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2542 {
2543         int ret;
2544         struct rte_eth_dev *dev;
2545         struct rte_eth_dev_callback *cb, *next;
2546
2547         if (!cb_fn)
2548                 return -EINVAL;
2549
2550         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2551
2552         dev = &rte_eth_devices[port_id];
2553         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2554
2555         ret = 0;
2556         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2557
2558                 next = TAILQ_NEXT(cb, next);
2559
2560                 if (cb->cb_fn != cb_fn || cb->event != event ||
2561                                 (cb->cb_arg != (void *)-1 &&
2562                                 cb->cb_arg != cb_arg))
2563                         continue;
2564
2565                 /*
2566                  * if this callback is not executing right now,
2567                  * then remove it.
2568                  */
2569                 if (cb->active == 0) {
2570                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2571                         rte_free(cb);
2572                 } else {
2573                         ret = -EAGAIN;
2574                 }
2575         }
2576
2577         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2578         return ret;
2579 }
2580
2581 void
2582 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2583         enum rte_eth_event_type event, void *cb_arg)
2584 {
2585         struct rte_eth_dev_callback *cb_lst;
2586         struct rte_eth_dev_callback dev_cb;
2587
2588         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2589         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2590                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2591                         continue;
2592                 dev_cb = *cb_lst;
2593                 cb_lst->active = 1;
2594                 if (cb_arg != NULL)
2595                         dev_cb.cb_arg = (void *) cb_arg;
2596
2597                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2598                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2599                                                 dev_cb.cb_arg);
2600                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2601                 cb_lst->active = 0;
2602         }
2603         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2604 }
2605
2606 int
2607 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2608 {
2609         uint32_t vec;
2610         struct rte_eth_dev *dev;
2611         struct rte_intr_handle *intr_handle;
2612         uint16_t qid;
2613         int rc;
2614
2615         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2616
2617         dev = &rte_eth_devices[port_id];
2618         intr_handle = &dev->pci_dev->intr_handle;
2619         if (!intr_handle->intr_vec) {
2620                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2621                 return -EPERM;
2622         }
2623
2624         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2625                 vec = intr_handle->intr_vec[qid];
2626                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2627                 if (rc && rc != -EEXIST) {
2628                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2629                                         " op %d epfd %d vec %u\n",
2630                                         port_id, qid, op, epfd, vec);
2631                 }
2632         }
2633
2634         return 0;
2635 }
2636
2637 const struct rte_memzone *
2638 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2639                          uint16_t queue_id, size_t size, unsigned align,
2640                          int socket_id)
2641 {
2642         char z_name[RTE_MEMZONE_NAMESIZE];
2643         const struct rte_memzone *mz;
2644
2645         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2646                  dev->driver->pci_drv.driver.name, ring_name,
2647                  dev->data->port_id, queue_id);
2648
2649         mz = rte_memzone_lookup(z_name);
2650         if (mz)
2651                 return mz;
2652
2653         if (rte_xen_dom0_supported())
2654                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2655                                                    0, align, RTE_PGSIZE_2M);
2656         else
2657                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2658                                                    0, align);
2659 }
2660
2661 int
2662 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2663                           int epfd, int op, void *data)
2664 {
2665         uint32_t vec;
2666         struct rte_eth_dev *dev;
2667         struct rte_intr_handle *intr_handle;
2668         int rc;
2669
2670         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2671
2672         dev = &rte_eth_devices[port_id];
2673         if (queue_id >= dev->data->nb_rx_queues) {
2674                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2675                 return -EINVAL;
2676         }
2677
2678         intr_handle = &dev->pci_dev->intr_handle;
2679         if (!intr_handle->intr_vec) {
2680                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2681                 return -EPERM;
2682         }
2683
2684         vec = intr_handle->intr_vec[queue_id];
2685         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2686         if (rc && rc != -EEXIST) {
2687                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2688                                 " op %d epfd %d vec %u\n",
2689                                 port_id, queue_id, op, epfd, vec);
2690                 return rc;
2691         }
2692
2693         return 0;
2694 }
2695
2696 int
2697 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2698                            uint16_t queue_id)
2699 {
2700         struct rte_eth_dev *dev;
2701
2702         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2703
2704         dev = &rte_eth_devices[port_id];
2705
2706         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2707         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2708 }
2709
2710 int
2711 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2712                             uint16_t queue_id)
2713 {
2714         struct rte_eth_dev *dev;
2715
2716         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2717
2718         dev = &rte_eth_devices[port_id];
2719
2720         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2721         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2722 }
2723
2724 #ifdef RTE_NIC_BYPASS
2725 int rte_eth_dev_bypass_init(uint8_t port_id)
2726 {
2727         struct rte_eth_dev *dev;
2728
2729         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2730
2731         dev = &rte_eth_devices[port_id];
2732         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2733         (*dev->dev_ops->bypass_init)(dev);
2734         return 0;
2735 }
2736
2737 int
2738 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2739 {
2740         struct rte_eth_dev *dev;
2741
2742         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2743
2744         dev = &rte_eth_devices[port_id];
2745         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2746         (*dev->dev_ops->bypass_state_show)(dev, state);
2747         return 0;
2748 }
2749
2750 int
2751 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2752 {
2753         struct rte_eth_dev *dev;
2754
2755         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2756
2757         dev = &rte_eth_devices[port_id];
2758         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2759         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2760         return 0;
2761 }
2762
2763 int
2764 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2765 {
2766         struct rte_eth_dev *dev;
2767
2768         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2769
2770         dev = &rte_eth_devices[port_id];
2771         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2772         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2773         return 0;
2774 }
2775
2776 int
2777 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2778 {
2779         struct rte_eth_dev *dev;
2780
2781         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2782
2783         dev = &rte_eth_devices[port_id];
2784
2785         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2786         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2787         return 0;
2788 }
2789
2790 int
2791 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2792 {
2793         struct rte_eth_dev *dev;
2794
2795         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2796
2797         dev = &rte_eth_devices[port_id];
2798
2799         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2800         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2801         return 0;
2802 }
2803
2804 int
2805 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2806 {
2807         struct rte_eth_dev *dev;
2808
2809         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2810
2811         dev = &rte_eth_devices[port_id];
2812
2813         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2814         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2815         return 0;
2816 }
2817
2818 int
2819 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2820 {
2821         struct rte_eth_dev *dev;
2822
2823         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2824
2825         dev = &rte_eth_devices[port_id];
2826
2827         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2828         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2829         return 0;
2830 }
2831
2832 int
2833 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2834 {
2835         struct rte_eth_dev *dev;
2836
2837         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2838
2839         dev = &rte_eth_devices[port_id];
2840
2841         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2842         (*dev->dev_ops->bypass_wd_reset)(dev);
2843         return 0;
2844 }
2845 #endif
2846
2847 int
2848 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2849 {
2850         struct rte_eth_dev *dev;
2851
2852         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2853
2854         dev = &rte_eth_devices[port_id];
2855         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2856         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2857                                 RTE_ETH_FILTER_NOP, NULL);
2858 }
2859
2860 int
2861 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2862                        enum rte_filter_op filter_op, void *arg)
2863 {
2864         struct rte_eth_dev *dev;
2865
2866         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2867
2868         dev = &rte_eth_devices[port_id];
2869         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2870         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2871 }
2872
2873 void *
2874 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2875                 rte_rx_callback_fn fn, void *user_param)
2876 {
2877 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2878         rte_errno = ENOTSUP;
2879         return NULL;
2880 #endif
2881         /* check input parameters */
2882         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2883                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2884                 rte_errno = EINVAL;
2885                 return NULL;
2886         }
2887         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2888
2889         if (cb == NULL) {
2890                 rte_errno = ENOMEM;
2891                 return NULL;
2892         }
2893
2894         cb->fn.rx = fn;
2895         cb->param = user_param;
2896
2897         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2898         /* Add the callbacks in fifo order. */
2899         struct rte_eth_rxtx_callback *tail =
2900                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2901
2902         if (!tail) {
2903                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2904
2905         } else {
2906                 while (tail->next)
2907                         tail = tail->next;
2908                 tail->next = cb;
2909         }
2910         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2911
2912         return cb;
2913 }
2914
2915 void *
2916 rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
2917                 rte_rx_callback_fn fn, void *user_param)
2918 {
2919 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2920         rte_errno = ENOTSUP;
2921         return NULL;
2922 #endif
2923         /* check input parameters */
2924         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2925                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2926                 rte_errno = EINVAL;
2927                 return NULL;
2928         }
2929
2930         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2931
2932         if (cb == NULL) {
2933                 rte_errno = ENOMEM;
2934                 return NULL;
2935         }
2936
2937         cb->fn.rx = fn;
2938         cb->param = user_param;
2939
2940         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2941         /* Add the callbacks at fisrt position*/
2942         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2943         rte_smp_wmb();
2944         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2945         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2946
2947         return cb;
2948 }
2949
2950 void *
2951 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2952                 rte_tx_callback_fn fn, void *user_param)
2953 {
2954 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2955         rte_errno = ENOTSUP;
2956         return NULL;
2957 #endif
2958         /* check input parameters */
2959         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2960                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2961                 rte_errno = EINVAL;
2962                 return NULL;
2963         }
2964
2965         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2966
2967         if (cb == NULL) {
2968                 rte_errno = ENOMEM;
2969                 return NULL;
2970         }
2971
2972         cb->fn.tx = fn;
2973         cb->param = user_param;
2974
2975         rte_spinlock_lock(&rte_eth_tx_cb_lock);
2976         /* Add the callbacks in fifo order. */
2977         struct rte_eth_rxtx_callback *tail =
2978                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2979
2980         if (!tail) {
2981                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2982
2983         } else {
2984                 while (tail->next)
2985                         tail = tail->next;
2986                 tail->next = cb;
2987         }
2988         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2989
2990         return cb;
2991 }
2992
2993 int
2994 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2995                 struct rte_eth_rxtx_callback *user_cb)
2996 {
2997 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2998         return -ENOTSUP;
2999 #endif
3000         /* Check input parameters. */
3001         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3002         if (user_cb == NULL ||
3003                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3004                 return -EINVAL;
3005
3006         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3007         struct rte_eth_rxtx_callback *cb;
3008         struct rte_eth_rxtx_callback **prev_cb;
3009         int ret = -EINVAL;
3010
3011         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3012         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3013         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3014                 cb = *prev_cb;
3015                 if (cb == user_cb) {
3016                         /* Remove the user cb from the callback list. */
3017                         *prev_cb = cb->next;
3018                         ret = 0;
3019                         break;
3020                 }
3021         }
3022         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3023
3024         return ret;
3025 }
3026
3027 int
3028 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3029                 struct rte_eth_rxtx_callback *user_cb)
3030 {
3031 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3032         return -ENOTSUP;
3033 #endif
3034         /* Check input parameters. */
3035         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3036         if (user_cb == NULL ||
3037                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3038                 return -EINVAL;
3039
3040         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3041         int ret = -EINVAL;
3042         struct rte_eth_rxtx_callback *cb;
3043         struct rte_eth_rxtx_callback **prev_cb;
3044
3045         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3046         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3047         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3048                 cb = *prev_cb;
3049                 if (cb == user_cb) {
3050                         /* Remove the user cb from the callback list. */
3051                         *prev_cb = cb->next;
3052                         ret = 0;
3053                         break;
3054                 }
3055         }
3056         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3057
3058         return ret;
3059 }
3060
3061 int
3062 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3063         struct rte_eth_rxq_info *qinfo)
3064 {
3065         struct rte_eth_dev *dev;
3066
3067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3068
3069         if (qinfo == NULL)
3070                 return -EINVAL;
3071
3072         dev = &rte_eth_devices[port_id];
3073         if (queue_id >= dev->data->nb_rx_queues) {
3074                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3075                 return -EINVAL;
3076         }
3077
3078         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3079
3080         memset(qinfo, 0, sizeof(*qinfo));
3081         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3082         return 0;
3083 }
3084
3085 int
3086 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3087         struct rte_eth_txq_info *qinfo)
3088 {
3089         struct rte_eth_dev *dev;
3090
3091         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3092
3093         if (qinfo == NULL)
3094                 return -EINVAL;
3095
3096         dev = &rte_eth_devices[port_id];
3097         if (queue_id >= dev->data->nb_tx_queues) {
3098                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3099                 return -EINVAL;
3100         }
3101
3102         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3103
3104         memset(qinfo, 0, sizeof(*qinfo));
3105         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3106         return 0;
3107 }
3108
3109 int
3110 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3111                              struct ether_addr *mc_addr_set,
3112                              uint32_t nb_mc_addr)
3113 {
3114         struct rte_eth_dev *dev;
3115
3116         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3117
3118         dev = &rte_eth_devices[port_id];
3119         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3120         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3121 }
3122
3123 int
3124 rte_eth_timesync_enable(uint8_t port_id)
3125 {
3126         struct rte_eth_dev *dev;
3127
3128         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3129         dev = &rte_eth_devices[port_id];
3130
3131         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3132         return (*dev->dev_ops->timesync_enable)(dev);
3133 }
3134
3135 int
3136 rte_eth_timesync_disable(uint8_t port_id)
3137 {
3138         struct rte_eth_dev *dev;
3139
3140         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3141         dev = &rte_eth_devices[port_id];
3142
3143         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3144         return (*dev->dev_ops->timesync_disable)(dev);
3145 }
3146
3147 int
3148 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3149                                    uint32_t flags)
3150 {
3151         struct rte_eth_dev *dev;
3152
3153         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3154         dev = &rte_eth_devices[port_id];
3155
3156         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3157         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3158 }
3159
3160 int
3161 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3162 {
3163         struct rte_eth_dev *dev;
3164
3165         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3166         dev = &rte_eth_devices[port_id];
3167
3168         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3169         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3170 }
3171
3172 int
3173 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3174 {
3175         struct rte_eth_dev *dev;
3176
3177         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3178         dev = &rte_eth_devices[port_id];
3179
3180         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3181         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3182 }
3183
3184 int
3185 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3186 {
3187         struct rte_eth_dev *dev;
3188
3189         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3190         dev = &rte_eth_devices[port_id];
3191
3192         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3193         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3194 }
3195
3196 int
3197 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3198 {
3199         struct rte_eth_dev *dev;
3200
3201         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3202         dev = &rte_eth_devices[port_id];
3203
3204         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3205         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3206 }
3207
3208 int
3209 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3210 {
3211         struct rte_eth_dev *dev;
3212
3213         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3214
3215         dev = &rte_eth_devices[port_id];
3216         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3217         return (*dev->dev_ops->get_reg)(dev, info);
3218 }
3219
3220 int
3221 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3222 {
3223         struct rte_eth_dev *dev;
3224
3225         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3226
3227         dev = &rte_eth_devices[port_id];
3228         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3229         return (*dev->dev_ops->get_eeprom_length)(dev);
3230 }
3231
3232 int
3233 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3234 {
3235         struct rte_eth_dev *dev;
3236
3237         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3238
3239         dev = &rte_eth_devices[port_id];
3240         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3241         return (*dev->dev_ops->get_eeprom)(dev, info);
3242 }
3243
3244 int
3245 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3246 {
3247         struct rte_eth_dev *dev;
3248
3249         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3250
3251         dev = &rte_eth_devices[port_id];
3252         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3253         return (*dev->dev_ops->set_eeprom)(dev, info);
3254 }
3255
3256 int
3257 rte_eth_dev_get_dcb_info(uint8_t port_id,
3258                              struct rte_eth_dcb_info *dcb_info)
3259 {
3260         struct rte_eth_dev *dev;
3261
3262         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3263
3264         dev = &rte_eth_devices[port_id];
3265         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3266
3267         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3268         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3269 }
3270
3271 void
3272 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3273 {
3274         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3275                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3276                                 eth_dev, pci_dev);
3277                 return;
3278         }
3279
3280         eth_dev->data->dev_flags = 0;
3281         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3282                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3283         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3284                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3285
3286         eth_dev->data->kdrv = pci_dev->kdrv;
3287         eth_dev->data->numa_node = pci_dev->device.numa_node;
3288         eth_dev->data->drv_name = pci_dev->driver->driver.name;
3289 }
3290
3291 int
3292 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3293                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3294 {
3295         struct rte_eth_dev *dev;
3296
3297         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3298         if (l2_tunnel == NULL) {
3299                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3300                 return -EINVAL;
3301         }
3302
3303         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3304                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3305                 return -EINVAL;
3306         }
3307
3308         dev = &rte_eth_devices[port_id];
3309         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3310                                 -ENOTSUP);
3311         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3312 }
3313
3314 int
3315 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3316                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3317                                   uint32_t mask,
3318                                   uint8_t en)
3319 {
3320         struct rte_eth_dev *dev;
3321
3322         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3323
3324         if (l2_tunnel == NULL) {
3325                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3326                 return -EINVAL;
3327         }
3328
3329         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3330                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3331                 return -EINVAL;
3332         }
3333
3334         if (mask == 0) {
3335                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3336                 return -EINVAL;
3337         }
3338
3339         dev = &rte_eth_devices[port_id];
3340         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3341                                 -ENOTSUP);
3342         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3343 }