Imported Upstream version 16.11.1
[deb_dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/types.h>
35 #include <sys/queue.h>
36 #include <ctype.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <stdarg.h>
41 #include <errno.h>
42 #include <stdint.h>
43 #include <inttypes.h>
44 #include <netinet/in.h>
45
46 #include <rte_byteorder.h>
47 #include <rte_log.h>
48 #include <rte_debug.h>
49 #include <rte_interrupts.h>
50 #include <rte_pci.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
55 #include <rte_eal.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_common.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
63 #include <rte_mbuf.h>
64 #include <rte_errno.h>
65 #include <rte_spinlock.h>
66 #include <rte_string_fns.h>
67
68 #include "rte_ether.h"
69 #include "rte_ethdev.h"
70
71 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73 static struct rte_eth_dev_data *rte_eth_dev_data;
74 static uint8_t eth_dev_last_created_port;
75 static uint8_t nb_ports;
76
77 /* spinlock for eth device callbacks */
78 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
79
80 /* spinlock for add/remove rx callbacks */
81 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
82
83 /* spinlock for add/remove tx callbacks */
84 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
85
86 /* store statistics names and its offset in stats structure  */
87 struct rte_eth_xstats_name_off {
88         char name[RTE_ETH_XSTATS_NAME_SIZE];
89         unsigned offset;
90 };
91
92 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
93         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
94         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
95         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
96         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
97         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
98         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
99         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
100                 rx_nombuf)},
101 };
102
103 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
104
105 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
106         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
107         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
108         {"errors", offsetof(struct rte_eth_stats, q_errors)},
109 };
110
111 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
112                 sizeof(rte_rxq_stats_strings[0]))
113
114 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
115         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
116         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
117 };
118 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
119                 sizeof(rte_txq_stats_strings[0]))
120
121
122 /**
123  * The user application callback description.
124  *
125  * It contains callback address to be registered by user application,
126  * the pointer to the parameters for callback, and the event type.
127  */
128 struct rte_eth_dev_callback {
129         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
130         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
131         void *cb_arg;                           /**< Parameter for callback */
132         enum rte_eth_event_type event;          /**< Interrupt event type */
133         uint32_t active;                        /**< Callback is executing */
134 };
135
136 enum {
137         STAT_QMAP_TX = 0,
138         STAT_QMAP_RX
139 };
140
141 enum {
142         DEV_DETACHED = 0,
143         DEV_ATTACHED
144 };
145
146 static void
147 rte_eth_dev_data_alloc(void)
148 {
149         const unsigned flags = 0;
150         const struct rte_memzone *mz;
151
152         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
153                 mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
154                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
155                                 rte_socket_id(), flags);
156         } else
157                 mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
158         if (mz == NULL)
159                 rte_panic("Cannot allocate memzone for ethernet port data\n");
160
161         rte_eth_dev_data = mz->addr;
162         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
163                 memset(rte_eth_dev_data, 0,
164                                 RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
165 }
166
167 struct rte_eth_dev *
168 rte_eth_dev_allocated(const char *name)
169 {
170         unsigned i;
171
172         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
173                 if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
174                     strcmp(rte_eth_devices[i].data->name, name) == 0)
175                         return &rte_eth_devices[i];
176         }
177         return NULL;
178 }
179
180 static uint8_t
181 rte_eth_dev_find_free_port(void)
182 {
183         unsigned i;
184
185         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
186                 if (rte_eth_devices[i].attached == DEV_DETACHED)
187                         return i;
188         }
189         return RTE_MAX_ETHPORTS;
190 }
191
192 static struct rte_eth_dev *
193 eth_dev_get(uint8_t port_id)
194 {
195         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
196
197         eth_dev->data = &rte_eth_dev_data[port_id];
198         eth_dev->attached = DEV_ATTACHED;
199
200         eth_dev_last_created_port = port_id;
201         nb_ports++;
202
203         return eth_dev;
204 }
205
206 struct rte_eth_dev *
207 rte_eth_dev_allocate(const char *name)
208 {
209         uint8_t port_id;
210         struct rte_eth_dev *eth_dev;
211
212         port_id = rte_eth_dev_find_free_port();
213         if (port_id == RTE_MAX_ETHPORTS) {
214                 RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
215                 return NULL;
216         }
217
218         if (rte_eth_dev_data == NULL)
219                 rte_eth_dev_data_alloc();
220
221         if (rte_eth_dev_allocated(name) != NULL) {
222                 RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
223                                 name);
224                 return NULL;
225         }
226
227         eth_dev = eth_dev_get(port_id);
228         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
229         eth_dev->data->port_id = port_id;
230
231         return eth_dev;
232 }
233
234 /*
235  * Attach to a port already registered by the primary process, which
236  * makes sure that the same device would have the same port id both
237  * in the primary and secondary process.
238  */
239 static struct rte_eth_dev *
240 eth_dev_attach_secondary(const char *name)
241 {
242         uint8_t i;
243         struct rte_eth_dev *eth_dev;
244
245         if (rte_eth_dev_data == NULL)
246                 rte_eth_dev_data_alloc();
247
248         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
249                 if (strcmp(rte_eth_dev_data[i].name, name) == 0)
250                         break;
251         }
252         if (i == RTE_MAX_ETHPORTS) {
253                 RTE_PMD_DEBUG_TRACE(
254                         "device %s is not driven by the primary process\n",
255                         name);
256                 return NULL;
257         }
258
259         eth_dev = eth_dev_get(i);
260         RTE_ASSERT(eth_dev->data->port_id == i);
261
262         return eth_dev;
263 }
264
265 int
266 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
267 {
268         if (eth_dev == NULL)
269                 return -EINVAL;
270
271         eth_dev->attached = DEV_DETACHED;
272         nb_ports--;
273         return 0;
274 }
275
276 int
277 rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
278                       struct rte_pci_device *pci_dev)
279 {
280         struct eth_driver    *eth_drv;
281         struct rte_eth_dev *eth_dev;
282         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
283
284         int diag;
285
286         eth_drv = (struct eth_driver *)pci_drv;
287
288         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
289                         sizeof(ethdev_name));
290
291         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
292                 eth_dev = rte_eth_dev_allocate(ethdev_name);
293                 if (eth_dev == NULL)
294                         return -ENOMEM;
295
296                 eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
297                                   eth_drv->dev_private_size,
298                                   RTE_CACHE_LINE_SIZE);
299                 if (eth_dev->data->dev_private == NULL)
300                         rte_panic("Cannot allocate memzone for private port data\n");
301         } else {
302                 eth_dev = eth_dev_attach_secondary(ethdev_name);
303                 if (eth_dev == NULL) {
304                         /*
305                          * if we failed to attach a device, it means the
306                          * device is skipped in primary process, due to
307                          * some errors. If so, we return a positive value,
308                          * to let EAL skip it for the secondary process
309                          * as well.
310                          */
311                         return 1;
312                 }
313         }
314         eth_dev->pci_dev = pci_dev;
315         eth_dev->driver = eth_drv;
316         eth_dev->data->rx_mbuf_alloc_failed = 0;
317
318         /* init user callbacks */
319         TAILQ_INIT(&(eth_dev->link_intr_cbs));
320
321         /*
322          * Set the default MTU.
323          */
324         eth_dev->data->mtu = ETHER_MTU;
325
326         /* Invoke PMD device initialization function */
327         diag = (*eth_drv->eth_dev_init)(eth_dev);
328         if (diag == 0)
329                 return 0;
330
331         RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n",
332                         pci_drv->driver.name,
333                         (unsigned) pci_dev->id.vendor_id,
334                         (unsigned) pci_dev->id.device_id);
335         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
336                 rte_free(eth_dev->data->dev_private);
337         rte_eth_dev_release_port(eth_dev);
338         return diag;
339 }
340
341 int
342 rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
343 {
344         const struct eth_driver *eth_drv;
345         struct rte_eth_dev *eth_dev;
346         char ethdev_name[RTE_ETH_NAME_MAX_LEN];
347         int ret;
348
349         if (pci_dev == NULL)
350                 return -EINVAL;
351
352         rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
353                         sizeof(ethdev_name));
354
355         eth_dev = rte_eth_dev_allocated(ethdev_name);
356         if (eth_dev == NULL)
357                 return -ENODEV;
358
359         eth_drv = (const struct eth_driver *)pci_dev->driver;
360
361         /* Invoke PMD device uninit function */
362         if (*eth_drv->eth_dev_uninit) {
363                 ret = (*eth_drv->eth_dev_uninit)(eth_dev);
364                 if (ret)
365                         return ret;
366         }
367
368         /* free ether device */
369         rte_eth_dev_release_port(eth_dev);
370
371         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
372                 rte_free(eth_dev->data->dev_private);
373
374         eth_dev->pci_dev = NULL;
375         eth_dev->driver = NULL;
376         eth_dev->data = NULL;
377
378         return 0;
379 }
380
381 int
382 rte_eth_dev_is_valid_port(uint8_t port_id)
383 {
384         if (port_id >= RTE_MAX_ETHPORTS ||
385             rte_eth_devices[port_id].attached != DEV_ATTACHED)
386                 return 0;
387         else
388                 return 1;
389 }
390
391 int
392 rte_eth_dev_socket_id(uint8_t port_id)
393 {
394         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
395         return rte_eth_devices[port_id].data->numa_node;
396 }
397
398 uint8_t
399 rte_eth_dev_count(void)
400 {
401         return nb_ports;
402 }
403
404 int
405 rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
406 {
407         char *tmp;
408
409         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
410
411         if (name == NULL) {
412                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
413                 return -EINVAL;
414         }
415
416         /* shouldn't check 'rte_eth_devices[i].data',
417          * because it might be overwritten by VDEV PMD */
418         tmp = rte_eth_dev_data[port_id].name;
419         strcpy(name, tmp);
420         return 0;
421 }
422
423 int
424 rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
425 {
426         int i;
427
428         if (name == NULL) {
429                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
430                 return -EINVAL;
431         }
432
433         if (!nb_ports)
434                 return -ENODEV;
435
436         *port_id = RTE_MAX_ETHPORTS;
437
438         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
439
440                 if (!strncmp(name,
441                         rte_eth_dev_data[i].name, strlen(name))) {
442
443                         *port_id = i;
444
445                         return 0;
446                 }
447         }
448         return -ENODEV;
449 }
450
451 static int
452 rte_eth_dev_is_detachable(uint8_t port_id)
453 {
454         uint32_t dev_flags;
455
456         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
457
458         switch (rte_eth_devices[port_id].data->kdrv) {
459         case RTE_KDRV_IGB_UIO:
460         case RTE_KDRV_UIO_GENERIC:
461         case RTE_KDRV_NIC_UIO:
462         case RTE_KDRV_NONE:
463                 break;
464         case RTE_KDRV_VFIO:
465         default:
466                 return -ENOTSUP;
467         }
468         dev_flags = rte_eth_devices[port_id].data->dev_flags;
469         if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
470                 (!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
471                 return 0;
472         else
473                 return 1;
474 }
475
476 /* attach the new device, then store port_id of the device */
477 int
478 rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
479 {
480         int ret = -1;
481         int current = rte_eth_dev_count();
482         char *name = NULL;
483         char *args = NULL;
484
485         if ((devargs == NULL) || (port_id == NULL)) {
486                 ret = -EINVAL;
487                 goto err;
488         }
489
490         /* parse devargs, then retrieve device name and args */
491         if (rte_eal_parse_devargs_str(devargs, &name, &args))
492                 goto err;
493
494         ret = rte_eal_dev_attach(name, args);
495         if (ret < 0)
496                 goto err;
497
498         /* no point looking at the port count if no port exists */
499         if (!rte_eth_dev_count()) {
500                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
501                 ret = -1;
502                 goto err;
503         }
504
505         /* if nothing happened, there is a bug here, since some driver told us
506          * it did attach a device, but did not create a port.
507          */
508         if (current == rte_eth_dev_count()) {
509                 ret = -1;
510                 goto err;
511         }
512
513         *port_id = eth_dev_last_created_port;
514         ret = 0;
515
516 err:
517         free(name);
518         free(args);
519         return ret;
520 }
521
522 /* detach the device, then store the name of the device */
523 int
524 rte_eth_dev_detach(uint8_t port_id, char *name)
525 {
526         int ret = -1;
527
528         if (name == NULL) {
529                 ret = -EINVAL;
530                 goto err;
531         }
532
533         /* FIXME: move this to eal, once device flags are relocated there */
534         if (rte_eth_dev_is_detachable(port_id))
535                 goto err;
536
537         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
538                  "%s", rte_eth_devices[port_id].data->name);
539         ret = rte_eal_dev_detach(name);
540         if (ret < 0)
541                 goto err;
542
543         return 0;
544
545 err:
546         return ret;
547 }
548
549 static int
550 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
551 {
552         uint16_t old_nb_queues = dev->data->nb_rx_queues;
553         void **rxq;
554         unsigned i;
555
556         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
557                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
558                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
559                                 RTE_CACHE_LINE_SIZE);
560                 if (dev->data->rx_queues == NULL) {
561                         dev->data->nb_rx_queues = 0;
562                         return -(ENOMEM);
563                 }
564         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
565                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
566
567                 rxq = dev->data->rx_queues;
568
569                 for (i = nb_queues; i < old_nb_queues; i++)
570                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
571                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
572                                 RTE_CACHE_LINE_SIZE);
573                 if (rxq == NULL)
574                         return -(ENOMEM);
575                 if (nb_queues > old_nb_queues) {
576                         uint16_t new_qs = nb_queues - old_nb_queues;
577
578                         memset(rxq + old_nb_queues, 0,
579                                 sizeof(rxq[0]) * new_qs);
580                 }
581
582                 dev->data->rx_queues = rxq;
583
584         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
585                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
586
587                 rxq = dev->data->rx_queues;
588
589                 for (i = nb_queues; i < old_nb_queues; i++)
590                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
591         }
592         dev->data->nb_rx_queues = nb_queues;
593         return 0;
594 }
595
596 int
597 rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
598 {
599         struct rte_eth_dev *dev;
600
601         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
602
603         dev = &rte_eth_devices[port_id];
604         if (rx_queue_id >= dev->data->nb_rx_queues) {
605                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
606                 return -EINVAL;
607         }
608
609         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
610
611         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
612                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
613                         " already started\n",
614                         rx_queue_id, port_id);
615                 return 0;
616         }
617
618         return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
619
620 }
621
622 int
623 rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
624 {
625         struct rte_eth_dev *dev;
626
627         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
628
629         dev = &rte_eth_devices[port_id];
630         if (rx_queue_id >= dev->data->nb_rx_queues) {
631                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
632                 return -EINVAL;
633         }
634
635         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
636
637         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
638                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
639                         " already stopped\n",
640                         rx_queue_id, port_id);
641                 return 0;
642         }
643
644         return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
645
646 }
647
648 int
649 rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
650 {
651         struct rte_eth_dev *dev;
652
653         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
654
655         dev = &rte_eth_devices[port_id];
656         if (tx_queue_id >= dev->data->nb_tx_queues) {
657                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
658                 return -EINVAL;
659         }
660
661         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
662
663         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
664                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
665                         " already started\n",
666                         tx_queue_id, port_id);
667                 return 0;
668         }
669
670         return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
671
672 }
673
674 int
675 rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
676 {
677         struct rte_eth_dev *dev;
678
679         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
680
681         dev = &rte_eth_devices[port_id];
682         if (tx_queue_id >= dev->data->nb_tx_queues) {
683                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
684                 return -EINVAL;
685         }
686
687         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
688
689         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
690                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
691                         " already stopped\n",
692                         tx_queue_id, port_id);
693                 return 0;
694         }
695
696         return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
697
698 }
699
700 static int
701 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
702 {
703         uint16_t old_nb_queues = dev->data->nb_tx_queues;
704         void **txq;
705         unsigned i;
706
707         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
708                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
709                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
710                                                    RTE_CACHE_LINE_SIZE);
711                 if (dev->data->tx_queues == NULL) {
712                         dev->data->nb_tx_queues = 0;
713                         return -(ENOMEM);
714                 }
715         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
716                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
717
718                 txq = dev->data->tx_queues;
719
720                 for (i = nb_queues; i < old_nb_queues; i++)
721                         (*dev->dev_ops->tx_queue_release)(txq[i]);
722                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
723                                   RTE_CACHE_LINE_SIZE);
724                 if (txq == NULL)
725                         return -ENOMEM;
726                 if (nb_queues > old_nb_queues) {
727                         uint16_t new_qs = nb_queues - old_nb_queues;
728
729                         memset(txq + old_nb_queues, 0,
730                                sizeof(txq[0]) * new_qs);
731                 }
732
733                 dev->data->tx_queues = txq;
734
735         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
736                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
737
738                 txq = dev->data->tx_queues;
739
740                 for (i = nb_queues; i < old_nb_queues; i++)
741                         (*dev->dev_ops->tx_queue_release)(txq[i]);
742         }
743         dev->data->nb_tx_queues = nb_queues;
744         return 0;
745 }
746
747 uint32_t
748 rte_eth_speed_bitflag(uint32_t speed, int duplex)
749 {
750         switch (speed) {
751         case ETH_SPEED_NUM_10M:
752                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
753         case ETH_SPEED_NUM_100M:
754                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
755         case ETH_SPEED_NUM_1G:
756                 return ETH_LINK_SPEED_1G;
757         case ETH_SPEED_NUM_2_5G:
758                 return ETH_LINK_SPEED_2_5G;
759         case ETH_SPEED_NUM_5G:
760                 return ETH_LINK_SPEED_5G;
761         case ETH_SPEED_NUM_10G:
762                 return ETH_LINK_SPEED_10G;
763         case ETH_SPEED_NUM_20G:
764                 return ETH_LINK_SPEED_20G;
765         case ETH_SPEED_NUM_25G:
766                 return ETH_LINK_SPEED_25G;
767         case ETH_SPEED_NUM_40G:
768                 return ETH_LINK_SPEED_40G;
769         case ETH_SPEED_NUM_50G:
770                 return ETH_LINK_SPEED_50G;
771         case ETH_SPEED_NUM_56G:
772                 return ETH_LINK_SPEED_56G;
773         case ETH_SPEED_NUM_100G:
774                 return ETH_LINK_SPEED_100G;
775         default:
776                 return 0;
777         }
778 }
779
780 int
781 rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
782                       const struct rte_eth_conf *dev_conf)
783 {
784         struct rte_eth_dev *dev;
785         struct rte_eth_dev_info dev_info;
786         int diag;
787
788         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
789
790         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
791                 RTE_PMD_DEBUG_TRACE(
792                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
793                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
794                 return -EINVAL;
795         }
796
797         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
798                 RTE_PMD_DEBUG_TRACE(
799                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
800                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
801                 return -EINVAL;
802         }
803
804         dev = &rte_eth_devices[port_id];
805
806         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
807         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
808
809         if (dev->data->dev_started) {
810                 RTE_PMD_DEBUG_TRACE(
811                     "port %d must be stopped to allow configuration\n", port_id);
812                 return -EBUSY;
813         }
814
815         /* Copy the dev_conf parameter into the dev structure */
816         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
817
818         /*
819          * Check that the numbers of RX and TX queues are not greater
820          * than the maximum number of RX and TX queues supported by the
821          * configured device.
822          */
823         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
824
825         if (nb_rx_q == 0 && nb_tx_q == 0) {
826                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
827                 return -EINVAL;
828         }
829
830         if (nb_rx_q > dev_info.max_rx_queues) {
831                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
832                                 port_id, nb_rx_q, dev_info.max_rx_queues);
833                 return -EINVAL;
834         }
835
836         if (nb_tx_q > dev_info.max_tx_queues) {
837                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
838                                 port_id, nb_tx_q, dev_info.max_tx_queues);
839                 return -EINVAL;
840         }
841
842         /*
843          * If link state interrupt is enabled, check that the
844          * device supports it.
845          */
846         if ((dev_conf->intr_conf.lsc == 1) &&
847                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
848                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
849                                         dev->data->drv_name);
850                         return -EINVAL;
851         }
852
853         /*
854          * If jumbo frames are enabled, check that the maximum RX packet
855          * length is supported by the configured device.
856          */
857         if (dev_conf->rxmode.jumbo_frame == 1) {
858                 if (dev_conf->rxmode.max_rx_pkt_len >
859                     dev_info.max_rx_pktlen) {
860                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
861                                 " > max valid value %u\n",
862                                 port_id,
863                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
864                                 (unsigned)dev_info.max_rx_pktlen);
865                         return -EINVAL;
866                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
867                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
868                                 " < min valid value %u\n",
869                                 port_id,
870                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
871                                 (unsigned)ETHER_MIN_LEN);
872                         return -EINVAL;
873                 }
874         } else {
875                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
876                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
877                         /* Use default value */
878                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
879                                                         ETHER_MAX_LEN;
880         }
881
882         /*
883          * Setup new number of RX/TX queues and reconfigure device.
884          */
885         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
886         if (diag != 0) {
887                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
888                                 port_id, diag);
889                 return diag;
890         }
891
892         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
893         if (diag != 0) {
894                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
895                                 port_id, diag);
896                 rte_eth_dev_rx_queue_config(dev, 0);
897                 return diag;
898         }
899
900         diag = (*dev->dev_ops->dev_configure)(dev);
901         if (diag != 0) {
902                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
903                                 port_id, diag);
904                 rte_eth_dev_rx_queue_config(dev, 0);
905                 rte_eth_dev_tx_queue_config(dev, 0);
906                 return diag;
907         }
908
909         return 0;
910 }
911
912 static void
913 rte_eth_dev_config_restore(uint8_t port_id)
914 {
915         struct rte_eth_dev *dev;
916         struct rte_eth_dev_info dev_info;
917         struct ether_addr addr;
918         uint16_t i;
919         uint32_t pool = 0;
920
921         dev = &rte_eth_devices[port_id];
922
923         rte_eth_dev_info_get(port_id, &dev_info);
924
925         if (RTE_ETH_DEV_SRIOV(dev).active)
926                 pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
927
928         /* replay MAC address configuration */
929         for (i = 0; i < dev_info.max_mac_addrs; i++) {
930                 addr = dev->data->mac_addrs[i];
931
932                 /* skip zero address */
933                 if (is_zero_ether_addr(&addr))
934                         continue;
935
936                 /* add address to the hardware */
937                 if  (*dev->dev_ops->mac_addr_add &&
938                         (dev->data->mac_pool_sel[i] & (1ULL << pool)))
939                         (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
940                 else {
941                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
942                                         port_id);
943                         /* exit the loop but not return an error */
944                         break;
945                 }
946         }
947
948         /* replay promiscuous configuration */
949         if (rte_eth_promiscuous_get(port_id) == 1)
950                 rte_eth_promiscuous_enable(port_id);
951         else if (rte_eth_promiscuous_get(port_id) == 0)
952                 rte_eth_promiscuous_disable(port_id);
953
954         /* replay all multicast configuration */
955         if (rte_eth_allmulticast_get(port_id) == 1)
956                 rte_eth_allmulticast_enable(port_id);
957         else if (rte_eth_allmulticast_get(port_id) == 0)
958                 rte_eth_allmulticast_disable(port_id);
959 }
960
961 int
962 rte_eth_dev_start(uint8_t port_id)
963 {
964         struct rte_eth_dev *dev;
965         int diag;
966
967         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
968
969         dev = &rte_eth_devices[port_id];
970
971         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
972
973         if (dev->data->dev_started != 0) {
974                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
975                         " already started\n",
976                         port_id);
977                 return 0;
978         }
979
980         diag = (*dev->dev_ops->dev_start)(dev);
981         if (diag == 0)
982                 dev->data->dev_started = 1;
983         else
984                 return diag;
985
986         rte_eth_dev_config_restore(port_id);
987
988         if (dev->data->dev_conf.intr_conf.lsc == 0) {
989                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
990                 (*dev->dev_ops->link_update)(dev, 0);
991         }
992         return 0;
993 }
994
995 void
996 rte_eth_dev_stop(uint8_t port_id)
997 {
998         struct rte_eth_dev *dev;
999
1000         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1001         dev = &rte_eth_devices[port_id];
1002
1003         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1004
1005         if (dev->data->dev_started == 0) {
1006                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
1007                         " already stopped\n",
1008                         port_id);
1009                 return;
1010         }
1011
1012         dev->data->dev_started = 0;
1013         (*dev->dev_ops->dev_stop)(dev);
1014 }
1015
1016 int
1017 rte_eth_dev_set_link_up(uint8_t port_id)
1018 {
1019         struct rte_eth_dev *dev;
1020
1021         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1022
1023         dev = &rte_eth_devices[port_id];
1024
1025         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1026         return (*dev->dev_ops->dev_set_link_up)(dev);
1027 }
1028
1029 int
1030 rte_eth_dev_set_link_down(uint8_t port_id)
1031 {
1032         struct rte_eth_dev *dev;
1033
1034         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1035
1036         dev = &rte_eth_devices[port_id];
1037
1038         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1039         return (*dev->dev_ops->dev_set_link_down)(dev);
1040 }
1041
1042 void
1043 rte_eth_dev_close(uint8_t port_id)
1044 {
1045         struct rte_eth_dev *dev;
1046
1047         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1048         dev = &rte_eth_devices[port_id];
1049
1050         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1051         dev->data->dev_started = 0;
1052         (*dev->dev_ops->dev_close)(dev);
1053
1054         rte_free(dev->data->rx_queues);
1055         dev->data->rx_queues = NULL;
1056         rte_free(dev->data->tx_queues);
1057         dev->data->tx_queues = NULL;
1058 }
1059
1060 int
1061 rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1062                        uint16_t nb_rx_desc, unsigned int socket_id,
1063                        const struct rte_eth_rxconf *rx_conf,
1064                        struct rte_mempool *mp)
1065 {
1066         int ret;
1067         uint32_t mbp_buf_size;
1068         struct rte_eth_dev *dev;
1069         struct rte_eth_dev_info dev_info;
1070
1071         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1072
1073         dev = &rte_eth_devices[port_id];
1074         if (rx_queue_id >= dev->data->nb_rx_queues) {
1075                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1076                 return -EINVAL;
1077         }
1078
1079         if (dev->data->dev_started) {
1080                 RTE_PMD_DEBUG_TRACE(
1081                     "port %d must be stopped to allow configuration\n", port_id);
1082                 return -EBUSY;
1083         }
1084
1085         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1086         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1087
1088         /*
1089          * Check the size of the mbuf data buffer.
1090          * This value must be provided in the private data of the memory pool.
1091          * First check that the memory pool has a valid private data.
1092          */
1093         rte_eth_dev_info_get(port_id, &dev_info);
1094         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1095                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1096                                 mp->name, (int) mp->private_data_size,
1097                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1098                 return -ENOSPC;
1099         }
1100         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1101
1102         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1103                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1104                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1105                                 "=%d)\n",
1106                                 mp->name,
1107                                 (int)mbp_buf_size,
1108                                 (int)(RTE_PKTMBUF_HEADROOM +
1109                                       dev_info.min_rx_bufsize),
1110                                 (int)RTE_PKTMBUF_HEADROOM,
1111                                 (int)dev_info.min_rx_bufsize);
1112                 return -EINVAL;
1113         }
1114
1115         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1116                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1117                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1118
1119                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1120                         "should be: <= %hu, = %hu, and a product of %hu\n",
1121                         nb_rx_desc,
1122                         dev_info.rx_desc_lim.nb_max,
1123                         dev_info.rx_desc_lim.nb_min,
1124                         dev_info.rx_desc_lim.nb_align);
1125                 return -EINVAL;
1126         }
1127
1128         if (rx_conf == NULL)
1129                 rx_conf = &dev_info.default_rxconf;
1130
1131         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1132                                               socket_id, rx_conf, mp);
1133         if (!ret) {
1134                 if (!dev->data->min_rx_buf_size ||
1135                     dev->data->min_rx_buf_size > mbp_buf_size)
1136                         dev->data->min_rx_buf_size = mbp_buf_size;
1137         }
1138
1139         return ret;
1140 }
1141
1142 int
1143 rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1144                        uint16_t nb_tx_desc, unsigned int socket_id,
1145                        const struct rte_eth_txconf *tx_conf)
1146 {
1147         struct rte_eth_dev *dev;
1148         struct rte_eth_dev_info dev_info;
1149
1150         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1151
1152         dev = &rte_eth_devices[port_id];
1153         if (tx_queue_id >= dev->data->nb_tx_queues) {
1154                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1155                 return -EINVAL;
1156         }
1157
1158         if (dev->data->dev_started) {
1159                 RTE_PMD_DEBUG_TRACE(
1160                     "port %d must be stopped to allow configuration\n", port_id);
1161                 return -EBUSY;
1162         }
1163
1164         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1165         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1166
1167         rte_eth_dev_info_get(port_id, &dev_info);
1168
1169         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1170             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1171             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1172                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1173                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1174                                 nb_tx_desc,
1175                                 dev_info.tx_desc_lim.nb_max,
1176                                 dev_info.tx_desc_lim.nb_min,
1177                                 dev_info.tx_desc_lim.nb_align);
1178                 return -EINVAL;
1179         }
1180
1181         if (tx_conf == NULL)
1182                 tx_conf = &dev_info.default_txconf;
1183
1184         return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1185                                                socket_id, tx_conf);
1186 }
1187
1188 void
1189 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1190                 void *userdata __rte_unused)
1191 {
1192         unsigned i;
1193
1194         for (i = 0; i < unsent; i++)
1195                 rte_pktmbuf_free(pkts[i]);
1196 }
1197
1198 void
1199 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1200                 void *userdata)
1201 {
1202         uint64_t *count = userdata;
1203         unsigned i;
1204
1205         for (i = 0; i < unsent; i++)
1206                 rte_pktmbuf_free(pkts[i]);
1207
1208         *count += unsent;
1209 }
1210
1211 int
1212 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1213                 buffer_tx_error_fn cbfn, void *userdata)
1214 {
1215         buffer->error_callback = cbfn;
1216         buffer->error_userdata = userdata;
1217         return 0;
1218 }
1219
1220 int
1221 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1222 {
1223         int ret = 0;
1224
1225         if (buffer == NULL)
1226                 return -EINVAL;
1227
1228         buffer->size = size;
1229         if (buffer->error_callback == NULL) {
1230                 ret = rte_eth_tx_buffer_set_err_callback(
1231                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1232         }
1233
1234         return ret;
1235 }
1236
1237 void
1238 rte_eth_promiscuous_enable(uint8_t port_id)
1239 {
1240         struct rte_eth_dev *dev;
1241
1242         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1243         dev = &rte_eth_devices[port_id];
1244
1245         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1246         (*dev->dev_ops->promiscuous_enable)(dev);
1247         dev->data->promiscuous = 1;
1248 }
1249
1250 void
1251 rte_eth_promiscuous_disable(uint8_t port_id)
1252 {
1253         struct rte_eth_dev *dev;
1254
1255         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1256         dev = &rte_eth_devices[port_id];
1257
1258         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1259         dev->data->promiscuous = 0;
1260         (*dev->dev_ops->promiscuous_disable)(dev);
1261 }
1262
1263 int
1264 rte_eth_promiscuous_get(uint8_t port_id)
1265 {
1266         struct rte_eth_dev *dev;
1267
1268         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1269
1270         dev = &rte_eth_devices[port_id];
1271         return dev->data->promiscuous;
1272 }
1273
1274 void
1275 rte_eth_allmulticast_enable(uint8_t port_id)
1276 {
1277         struct rte_eth_dev *dev;
1278
1279         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1280         dev = &rte_eth_devices[port_id];
1281
1282         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1283         (*dev->dev_ops->allmulticast_enable)(dev);
1284         dev->data->all_multicast = 1;
1285 }
1286
1287 void
1288 rte_eth_allmulticast_disable(uint8_t port_id)
1289 {
1290         struct rte_eth_dev *dev;
1291
1292         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1293         dev = &rte_eth_devices[port_id];
1294
1295         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1296         dev->data->all_multicast = 0;
1297         (*dev->dev_ops->allmulticast_disable)(dev);
1298 }
1299
1300 int
1301 rte_eth_allmulticast_get(uint8_t port_id)
1302 {
1303         struct rte_eth_dev *dev;
1304
1305         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1306
1307         dev = &rte_eth_devices[port_id];
1308         return dev->data->all_multicast;
1309 }
1310
1311 static inline int
1312 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1313                                 struct rte_eth_link *link)
1314 {
1315         struct rte_eth_link *dst = link;
1316         struct rte_eth_link *src = &(dev->data->dev_link);
1317
1318         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1319                                         *(uint64_t *)src) == 0)
1320                 return -1;
1321
1322         return 0;
1323 }
1324
1325 void
1326 rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1327 {
1328         struct rte_eth_dev *dev;
1329
1330         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1331         dev = &rte_eth_devices[port_id];
1332
1333         if (dev->data->dev_conf.intr_conf.lsc != 0)
1334                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1335         else {
1336                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1337                 (*dev->dev_ops->link_update)(dev, 1);
1338                 *eth_link = dev->data->dev_link;
1339         }
1340 }
1341
1342 void
1343 rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1344 {
1345         struct rte_eth_dev *dev;
1346
1347         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1348         dev = &rte_eth_devices[port_id];
1349
1350         if (dev->data->dev_conf.intr_conf.lsc != 0)
1351                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1352         else {
1353                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1354                 (*dev->dev_ops->link_update)(dev, 0);
1355                 *eth_link = dev->data->dev_link;
1356         }
1357 }
1358
1359 int
1360 rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1361 {
1362         struct rte_eth_dev *dev;
1363
1364         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1365
1366         dev = &rte_eth_devices[port_id];
1367         memset(stats, 0, sizeof(*stats));
1368
1369         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1370         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1371         (*dev->dev_ops->stats_get)(dev, stats);
1372         return 0;
1373 }
1374
1375 void
1376 rte_eth_stats_reset(uint8_t port_id)
1377 {
1378         struct rte_eth_dev *dev;
1379
1380         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1381         dev = &rte_eth_devices[port_id];
1382
1383         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1384         (*dev->dev_ops->stats_reset)(dev);
1385         dev->data->rx_mbuf_alloc_failed = 0;
1386 }
1387
1388 static int
1389 get_xstats_count(uint8_t port_id)
1390 {
1391         struct rte_eth_dev *dev;
1392         int count;
1393
1394         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1395         dev = &rte_eth_devices[port_id];
1396         if (dev->dev_ops->xstats_get_names != NULL) {
1397                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1398                 if (count < 0)
1399                         return count;
1400         } else
1401                 count = 0;
1402         count += RTE_NB_STATS;
1403         count += RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1404                  RTE_NB_RXQ_STATS;
1405         count += RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS) *
1406                  RTE_NB_TXQ_STATS;
1407         return count;
1408 }
1409
1410 int
1411 rte_eth_xstats_get_names(uint8_t port_id,
1412         struct rte_eth_xstat_name *xstats_names,
1413         unsigned size)
1414 {
1415         struct rte_eth_dev *dev;
1416         int cnt_used_entries;
1417         int cnt_expected_entries;
1418         int cnt_driver_entries;
1419         uint32_t idx, id_queue;
1420         uint16_t num_q;
1421
1422         cnt_expected_entries = get_xstats_count(port_id);
1423         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1424                         (int)size < cnt_expected_entries)
1425                 return cnt_expected_entries;
1426
1427         /* port_id checked in get_xstats_count() */
1428         dev = &rte_eth_devices[port_id];
1429         cnt_used_entries = 0;
1430
1431         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1432                 snprintf(xstats_names[cnt_used_entries].name,
1433                         sizeof(xstats_names[0].name),
1434                         "%s", rte_stats_strings[idx].name);
1435                 cnt_used_entries++;
1436         }
1437         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1438         for (id_queue = 0; id_queue < num_q; id_queue++) {
1439                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1440                         snprintf(xstats_names[cnt_used_entries].name,
1441                                 sizeof(xstats_names[0].name),
1442                                 "rx_q%u%s",
1443                                 id_queue, rte_rxq_stats_strings[idx].name);
1444                         cnt_used_entries++;
1445                 }
1446
1447         }
1448         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1449         for (id_queue = 0; id_queue < num_q; id_queue++) {
1450                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1451                         snprintf(xstats_names[cnt_used_entries].name,
1452                                 sizeof(xstats_names[0].name),
1453                                 "tx_q%u%s",
1454                                 id_queue, rte_txq_stats_strings[idx].name);
1455                         cnt_used_entries++;
1456                 }
1457         }
1458
1459         if (dev->dev_ops->xstats_get_names != NULL) {
1460                 /* If there are any driver-specific xstats, append them
1461                  * to end of list.
1462                  */
1463                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1464                         dev,
1465                         xstats_names + cnt_used_entries,
1466                         size - cnt_used_entries);
1467                 if (cnt_driver_entries < 0)
1468                         return cnt_driver_entries;
1469                 cnt_used_entries += cnt_driver_entries;
1470         }
1471
1472         return cnt_used_entries;
1473 }
1474
1475 /* retrieve ethdev extended statistics */
1476 int
1477 rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1478         unsigned n)
1479 {
1480         struct rte_eth_stats eth_stats;
1481         struct rte_eth_dev *dev;
1482         unsigned count = 0, i, q;
1483         signed xcount = 0;
1484         uint64_t val, *stats_ptr;
1485         uint16_t nb_rxqs, nb_txqs;
1486
1487         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1488
1489         dev = &rte_eth_devices[port_id];
1490
1491         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1492         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1493
1494         /* Return generic statistics */
1495         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
1496                 (nb_txqs * RTE_NB_TXQ_STATS);
1497
1498         /* implemented by the driver */
1499         if (dev->dev_ops->xstats_get != NULL) {
1500                 /* Retrieve the xstats from the driver at the end of the
1501                  * xstats struct.
1502                  */
1503                 xcount = (*dev->dev_ops->xstats_get)(dev,
1504                                      xstats ? xstats + count : NULL,
1505                                      (n > count) ? n - count : 0);
1506
1507                 if (xcount < 0)
1508                         return xcount;
1509         }
1510
1511         if (n < count + xcount || xstats == NULL)
1512                 return count + xcount;
1513
1514         /* now fill the xstats structure */
1515         count = 0;
1516         rte_eth_stats_get(port_id, &eth_stats);
1517
1518         /* global stats */
1519         for (i = 0; i < RTE_NB_STATS; i++) {
1520                 stats_ptr = RTE_PTR_ADD(&eth_stats,
1521                                         rte_stats_strings[i].offset);
1522                 val = *stats_ptr;
1523                 xstats[count++].value = val;
1524         }
1525
1526         /* per-rxq stats */
1527         for (q = 0; q < nb_rxqs; q++) {
1528                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1529                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1530                                         rte_rxq_stats_strings[i].offset +
1531                                         q * sizeof(uint64_t));
1532                         val = *stats_ptr;
1533                         xstats[count++].value = val;
1534                 }
1535         }
1536
1537         /* per-txq stats */
1538         for (q = 0; q < nb_txqs; q++) {
1539                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1540                         stats_ptr = RTE_PTR_ADD(&eth_stats,
1541                                         rte_txq_stats_strings[i].offset +
1542                                         q * sizeof(uint64_t));
1543                         val = *stats_ptr;
1544                         xstats[count++].value = val;
1545                 }
1546         }
1547
1548         for (i = 0; i < count; i++)
1549                 xstats[i].id = i;
1550         /* add an offset to driver-specific stats */
1551         for ( ; i < count + xcount; i++)
1552                 xstats[i].id += count;
1553
1554         return count + xcount;
1555 }
1556
1557 /* reset ethdev extended statistics */
1558 void
1559 rte_eth_xstats_reset(uint8_t port_id)
1560 {
1561         struct rte_eth_dev *dev;
1562
1563         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1564         dev = &rte_eth_devices[port_id];
1565
1566         /* implemented by the driver */
1567         if (dev->dev_ops->xstats_reset != NULL) {
1568                 (*dev->dev_ops->xstats_reset)(dev);
1569                 return;
1570         }
1571
1572         /* fallback to default */
1573         rte_eth_stats_reset(port_id);
1574 }
1575
1576 static int
1577 set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1578                 uint8_t is_rx)
1579 {
1580         struct rte_eth_dev *dev;
1581
1582         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1583
1584         dev = &rte_eth_devices[port_id];
1585
1586         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1587         return (*dev->dev_ops->queue_stats_mapping_set)
1588                         (dev, queue_id, stat_idx, is_rx);
1589 }
1590
1591
1592 int
1593 rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1594                 uint8_t stat_idx)
1595 {
1596         return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1597                         STAT_QMAP_TX);
1598 }
1599
1600
1601 int
1602 rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1603                 uint8_t stat_idx)
1604 {
1605         return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1606                         STAT_QMAP_RX);
1607 }
1608
1609 void
1610 rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1611 {
1612         struct rte_eth_dev *dev;
1613         const struct rte_eth_desc_lim lim = {
1614                 .nb_max = UINT16_MAX,
1615                 .nb_min = 0,
1616                 .nb_align = 1,
1617         };
1618
1619         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1620         dev = &rte_eth_devices[port_id];
1621
1622         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1623         dev_info->rx_desc_lim = lim;
1624         dev_info->tx_desc_lim = lim;
1625
1626         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1627         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
1628         dev_info->pci_dev = dev->pci_dev;
1629         dev_info->driver_name = dev->data->drv_name;
1630         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1631         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1632 }
1633
1634 int
1635 rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1636                                  uint32_t *ptypes, int num)
1637 {
1638         int i, j;
1639         struct rte_eth_dev *dev;
1640         const uint32_t *all_ptypes;
1641
1642         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1643         dev = &rte_eth_devices[port_id];
1644         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1645         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1646
1647         if (!all_ptypes)
1648                 return 0;
1649
1650         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1651                 if (all_ptypes[i] & ptype_mask) {
1652                         if (j < num)
1653                                 ptypes[j] = all_ptypes[i];
1654                         j++;
1655                 }
1656
1657         return j;
1658 }
1659
1660 void
1661 rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1662 {
1663         struct rte_eth_dev *dev;
1664
1665         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1666         dev = &rte_eth_devices[port_id];
1667         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1668 }
1669
1670
1671 int
1672 rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1673 {
1674         struct rte_eth_dev *dev;
1675
1676         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1677
1678         dev = &rte_eth_devices[port_id];
1679         *mtu = dev->data->mtu;
1680         return 0;
1681 }
1682
1683 int
1684 rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1685 {
1686         int ret;
1687         struct rte_eth_dev *dev;
1688
1689         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1690         dev = &rte_eth_devices[port_id];
1691         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1692
1693         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1694         if (!ret)
1695                 dev->data->mtu = mtu;
1696
1697         return ret;
1698 }
1699
1700 int
1701 rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1702 {
1703         struct rte_eth_dev *dev;
1704
1705         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1706         dev = &rte_eth_devices[port_id];
1707         if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1708                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1709                 return -ENOSYS;
1710         }
1711
1712         if (vlan_id > 4095) {
1713                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1714                                 port_id, (unsigned) vlan_id);
1715                 return -EINVAL;
1716         }
1717         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1718
1719         return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1720 }
1721
1722 int
1723 rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1724 {
1725         struct rte_eth_dev *dev;
1726
1727         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1728         dev = &rte_eth_devices[port_id];
1729         if (rx_queue_id >= dev->data->nb_rx_queues) {
1730                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1731                 return -EINVAL;
1732         }
1733
1734         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1735         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1736
1737         return 0;
1738 }
1739
1740 int
1741 rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
1742                                 enum rte_vlan_type vlan_type,
1743                                 uint16_t tpid)
1744 {
1745         struct rte_eth_dev *dev;
1746
1747         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1748         dev = &rte_eth_devices[port_id];
1749         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1750
1751         return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
1752 }
1753
1754 int
1755 rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1756 {
1757         struct rte_eth_dev *dev;
1758         int ret = 0;
1759         int mask = 0;
1760         int cur, org = 0;
1761
1762         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1763         dev = &rte_eth_devices[port_id];
1764
1765         /*check which option changed by application*/
1766         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1767         org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1768         if (cur != org) {
1769                 dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1770                 mask |= ETH_VLAN_STRIP_MASK;
1771         }
1772
1773         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1774         org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1775         if (cur != org) {
1776                 dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1777                 mask |= ETH_VLAN_FILTER_MASK;
1778         }
1779
1780         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1781         org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1782         if (cur != org) {
1783                 dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1784                 mask |= ETH_VLAN_EXTEND_MASK;
1785         }
1786
1787         /*no change*/
1788         if (mask == 0)
1789                 return ret;
1790
1791         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1792         (*dev->dev_ops->vlan_offload_set)(dev, mask);
1793
1794         return ret;
1795 }
1796
1797 int
1798 rte_eth_dev_get_vlan_offload(uint8_t port_id)
1799 {
1800         struct rte_eth_dev *dev;
1801         int ret = 0;
1802
1803         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1804         dev = &rte_eth_devices[port_id];
1805
1806         if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1807                 ret |= ETH_VLAN_STRIP_OFFLOAD;
1808
1809         if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1810                 ret |= ETH_VLAN_FILTER_OFFLOAD;
1811
1812         if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1813                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
1814
1815         return ret;
1816 }
1817
1818 int
1819 rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1820 {
1821         struct rte_eth_dev *dev;
1822
1823         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1824         dev = &rte_eth_devices[port_id];
1825         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1826         (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1827
1828         return 0;
1829 }
1830
1831 int
1832 rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1833 {
1834         struct rte_eth_dev *dev;
1835
1836         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1837         dev = &rte_eth_devices[port_id];
1838         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1839         memset(fc_conf, 0, sizeof(*fc_conf));
1840         return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1841 }
1842
1843 int
1844 rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1845 {
1846         struct rte_eth_dev *dev;
1847
1848         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1849         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1850                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1851                 return -EINVAL;
1852         }
1853
1854         dev = &rte_eth_devices[port_id];
1855         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1856         return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1857 }
1858
1859 int
1860 rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1861 {
1862         struct rte_eth_dev *dev;
1863
1864         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1865         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1866                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1867                 return -EINVAL;
1868         }
1869
1870         dev = &rte_eth_devices[port_id];
1871         /* High water, low water validation are device specific */
1872         if  (*dev->dev_ops->priority_flow_ctrl_set)
1873                 return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1874         return -ENOTSUP;
1875 }
1876
1877 static int
1878 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1879                         uint16_t reta_size)
1880 {
1881         uint16_t i, num;
1882
1883         if (!reta_conf)
1884                 return -EINVAL;
1885
1886         if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1887                 RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1888                                                         RTE_RETA_GROUP_SIZE);
1889                 return -EINVAL;
1890         }
1891
1892         num = reta_size / RTE_RETA_GROUP_SIZE;
1893         for (i = 0; i < num; i++) {
1894                 if (reta_conf[i].mask)
1895                         return 0;
1896         }
1897
1898         return -EINVAL;
1899 }
1900
1901 static int
1902 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1903                          uint16_t reta_size,
1904                          uint16_t max_rxq)
1905 {
1906         uint16_t i, idx, shift;
1907
1908         if (!reta_conf)
1909                 return -EINVAL;
1910
1911         if (max_rxq == 0) {
1912                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1913                 return -EINVAL;
1914         }
1915
1916         for (i = 0; i < reta_size; i++) {
1917                 idx = i / RTE_RETA_GROUP_SIZE;
1918                 shift = i % RTE_RETA_GROUP_SIZE;
1919                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
1920                         (reta_conf[idx].reta[shift] >= max_rxq)) {
1921                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1922                                 "the maximum rxq index: %u\n", idx, shift,
1923                                 reta_conf[idx].reta[shift], max_rxq);
1924                         return -EINVAL;
1925                 }
1926         }
1927
1928         return 0;
1929 }
1930
1931 int
1932 rte_eth_dev_rss_reta_update(uint8_t port_id,
1933                             struct rte_eth_rss_reta_entry64 *reta_conf,
1934                             uint16_t reta_size)
1935 {
1936         struct rte_eth_dev *dev;
1937         int ret;
1938
1939         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1940         /* Check mask bits */
1941         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1942         if (ret < 0)
1943                 return ret;
1944
1945         dev = &rte_eth_devices[port_id];
1946
1947         /* Check entry value */
1948         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1949                                 dev->data->nb_rx_queues);
1950         if (ret < 0)
1951                 return ret;
1952
1953         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1954         return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1955 }
1956
1957 int
1958 rte_eth_dev_rss_reta_query(uint8_t port_id,
1959                            struct rte_eth_rss_reta_entry64 *reta_conf,
1960                            uint16_t reta_size)
1961 {
1962         struct rte_eth_dev *dev;
1963         int ret;
1964
1965         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1966
1967         /* Check mask bits */
1968         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1969         if (ret < 0)
1970                 return ret;
1971
1972         dev = &rte_eth_devices[port_id];
1973         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1974         return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1975 }
1976
1977 int
1978 rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1979 {
1980         struct rte_eth_dev *dev;
1981         uint16_t rss_hash_protos;
1982
1983         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1984         rss_hash_protos = rss_conf->rss_hf;
1985         if ((rss_hash_protos != 0) &&
1986             ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1987                 RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1988                                 rss_hash_protos);
1989                 return -EINVAL;
1990         }
1991         dev = &rte_eth_devices[port_id];
1992         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1993         return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1994 }
1995
1996 int
1997 rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1998                               struct rte_eth_rss_conf *rss_conf)
1999 {
2000         struct rte_eth_dev *dev;
2001
2002         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2003         dev = &rte_eth_devices[port_id];
2004         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2005         return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
2006 }
2007
2008 int
2009 rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
2010                                 struct rte_eth_udp_tunnel *udp_tunnel)
2011 {
2012         struct rte_eth_dev *dev;
2013
2014         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2015         if (udp_tunnel == NULL) {
2016                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2017                 return -EINVAL;
2018         }
2019
2020         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2021                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2022                 return -EINVAL;
2023         }
2024
2025         dev = &rte_eth_devices[port_id];
2026         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2027         return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
2028 }
2029
2030 int
2031 rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
2032                                    struct rte_eth_udp_tunnel *udp_tunnel)
2033 {
2034         struct rte_eth_dev *dev;
2035
2036         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2037         dev = &rte_eth_devices[port_id];
2038
2039         if (udp_tunnel == NULL) {
2040                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2041                 return -EINVAL;
2042         }
2043
2044         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2045                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2046                 return -EINVAL;
2047         }
2048
2049         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2050         return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
2051 }
2052
2053 int
2054 rte_eth_led_on(uint8_t port_id)
2055 {
2056         struct rte_eth_dev *dev;
2057
2058         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2059         dev = &rte_eth_devices[port_id];
2060         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2061         return (*dev->dev_ops->dev_led_on)(dev);
2062 }
2063
2064 int
2065 rte_eth_led_off(uint8_t port_id)
2066 {
2067         struct rte_eth_dev *dev;
2068
2069         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2070         dev = &rte_eth_devices[port_id];
2071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2072         return (*dev->dev_ops->dev_led_off)(dev);
2073 }
2074
2075 /*
2076  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2077  * an empty spot.
2078  */
2079 static int
2080 get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2081 {
2082         struct rte_eth_dev_info dev_info;
2083         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2084         unsigned i;
2085
2086         rte_eth_dev_info_get(port_id, &dev_info);
2087
2088         for (i = 0; i < dev_info.max_mac_addrs; i++)
2089                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2090                         return i;
2091
2092         return -1;
2093 }
2094
2095 static const struct ether_addr null_mac_addr;
2096
2097 int
2098 rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2099                         uint32_t pool)
2100 {
2101         struct rte_eth_dev *dev;
2102         int index;
2103         uint64_t pool_mask;
2104
2105         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2106         dev = &rte_eth_devices[port_id];
2107         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2108
2109         if (is_zero_ether_addr(addr)) {
2110                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2111                         port_id);
2112                 return -EINVAL;
2113         }
2114         if (pool >= ETH_64_POOLS) {
2115                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2116                 return -EINVAL;
2117         }
2118
2119         index = get_mac_addr_index(port_id, addr);
2120         if (index < 0) {
2121                 index = get_mac_addr_index(port_id, &null_mac_addr);
2122                 if (index < 0) {
2123                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2124                                 port_id);
2125                         return -ENOSPC;
2126                 }
2127         } else {
2128                 pool_mask = dev->data->mac_pool_sel[index];
2129
2130                 /* Check if both MAC address and pool is already there, and do nothing */
2131                 if (pool_mask & (1ULL << pool))
2132                         return 0;
2133         }
2134
2135         /* Update NIC */
2136         (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2137
2138         /* Update address in NIC data structure */
2139         ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2140
2141         /* Update pool bitmap in NIC data structure */
2142         dev->data->mac_pool_sel[index] |= (1ULL << pool);
2143
2144         return 0;
2145 }
2146
2147 int
2148 rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2149 {
2150         struct rte_eth_dev *dev;
2151         int index;
2152
2153         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2154         dev = &rte_eth_devices[port_id];
2155         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2156
2157         index = get_mac_addr_index(port_id, addr);
2158         if (index == 0) {
2159                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2160                 return -EADDRINUSE;
2161         } else if (index < 0)
2162                 return 0;  /* Do nothing if address wasn't found */
2163
2164         /* Update NIC */
2165         (*dev->dev_ops->mac_addr_remove)(dev, index);
2166
2167         /* Update address in NIC data structure */
2168         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2169
2170         /* reset pool bitmap */
2171         dev->data->mac_pool_sel[index] = 0;
2172
2173         return 0;
2174 }
2175
2176 int
2177 rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2178 {
2179         struct rte_eth_dev *dev;
2180
2181         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2182
2183         if (!is_valid_assigned_ether_addr(addr))
2184                 return -EINVAL;
2185
2186         dev = &rte_eth_devices[port_id];
2187         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2188
2189         /* Update default address in NIC data structure */
2190         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2191
2192         (*dev->dev_ops->mac_addr_set)(dev, addr);
2193
2194         return 0;
2195 }
2196
2197 int
2198 rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2199                                 uint16_t rx_mode, uint8_t on)
2200 {
2201         uint16_t num_vfs;
2202         struct rte_eth_dev *dev;
2203         struct rte_eth_dev_info dev_info;
2204
2205         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2206
2207         dev = &rte_eth_devices[port_id];
2208         rte_eth_dev_info_get(port_id, &dev_info);
2209
2210         num_vfs = dev_info.max_vfs;
2211         if (vf > num_vfs) {
2212                 RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2213                 return -EINVAL;
2214         }
2215
2216         if (rx_mode == 0) {
2217                 RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2218                 return -EINVAL;
2219         }
2220         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2221         return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2222 }
2223
2224 /*
2225  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2226  * an empty spot.
2227  */
2228 static int
2229 get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2230 {
2231         struct rte_eth_dev_info dev_info;
2232         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2233         unsigned i;
2234
2235         rte_eth_dev_info_get(port_id, &dev_info);
2236         if (!dev->data->hash_mac_addrs)
2237                 return -1;
2238
2239         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2240                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2241                         ETHER_ADDR_LEN) == 0)
2242                         return i;
2243
2244         return -1;
2245 }
2246
2247 int
2248 rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2249                                 uint8_t on)
2250 {
2251         int index;
2252         int ret;
2253         struct rte_eth_dev *dev;
2254
2255         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2256
2257         dev = &rte_eth_devices[port_id];
2258         if (is_zero_ether_addr(addr)) {
2259                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2260                         port_id);
2261                 return -EINVAL;
2262         }
2263
2264         index = get_hash_mac_addr_index(port_id, addr);
2265         /* Check if it's already there, and do nothing */
2266         if ((index >= 0) && (on))
2267                 return 0;
2268
2269         if (index < 0) {
2270                 if (!on) {
2271                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2272                                 "set in UTA\n", port_id);
2273                         return -EINVAL;
2274                 }
2275
2276                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2277                 if (index < 0) {
2278                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2279                                         port_id);
2280                         return -ENOSPC;
2281                 }
2282         }
2283
2284         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2285         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2286         if (ret == 0) {
2287                 /* Update address in NIC data structure */
2288                 if (on)
2289                         ether_addr_copy(addr,
2290                                         &dev->data->hash_mac_addrs[index]);
2291                 else
2292                         ether_addr_copy(&null_mac_addr,
2293                                         &dev->data->hash_mac_addrs[index]);
2294         }
2295
2296         return ret;
2297 }
2298
2299 int
2300 rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2301 {
2302         struct rte_eth_dev *dev;
2303
2304         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2305
2306         dev = &rte_eth_devices[port_id];
2307
2308         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2309         return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2310 }
2311
2312 int
2313 rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2314 {
2315         uint16_t num_vfs;
2316         struct rte_eth_dev *dev;
2317         struct rte_eth_dev_info dev_info;
2318
2319         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2320
2321         dev = &rte_eth_devices[port_id];
2322         rte_eth_dev_info_get(port_id, &dev_info);
2323
2324         num_vfs = dev_info.max_vfs;
2325         if (vf > num_vfs) {
2326                 RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2327                 return -EINVAL;
2328         }
2329
2330         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2331         return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2332 }
2333
2334 int
2335 rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2336 {
2337         uint16_t num_vfs;
2338         struct rte_eth_dev *dev;
2339         struct rte_eth_dev_info dev_info;
2340
2341         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2342
2343         dev = &rte_eth_devices[port_id];
2344         rte_eth_dev_info_get(port_id, &dev_info);
2345
2346         num_vfs = dev_info.max_vfs;
2347         if (vf > num_vfs) {
2348                 RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2349                 return -EINVAL;
2350         }
2351
2352         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2353         return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2354 }
2355
2356 int
2357 rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2358                                uint64_t vf_mask, uint8_t vlan_on)
2359 {
2360         struct rte_eth_dev *dev;
2361
2362         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2363
2364         dev = &rte_eth_devices[port_id];
2365
2366         if (vlan_id > ETHER_MAX_VLAN_ID) {
2367                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2368                         vlan_id);
2369                 return -EINVAL;
2370         }
2371
2372         if (vf_mask == 0) {
2373                 RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2374                 return -EINVAL;
2375         }
2376
2377         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2378         return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2379                                                    vf_mask, vlan_on);
2380 }
2381
2382 int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2383                                         uint16_t tx_rate)
2384 {
2385         struct rte_eth_dev *dev;
2386         struct rte_eth_dev_info dev_info;
2387         struct rte_eth_link link;
2388
2389         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2390
2391         dev = &rte_eth_devices[port_id];
2392         rte_eth_dev_info_get(port_id, &dev_info);
2393         link = dev->data->dev_link;
2394
2395         if (queue_idx > dev_info.max_tx_queues) {
2396                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2397                                 "invalid queue id=%d\n", port_id, queue_idx);
2398                 return -EINVAL;
2399         }
2400
2401         if (tx_rate > link.link_speed) {
2402                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2403                                 "bigger than link speed= %d\n",
2404                         tx_rate, link.link_speed);
2405                 return -EINVAL;
2406         }
2407
2408         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2409         return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2410 }
2411
2412 int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2413                                 uint64_t q_msk)
2414 {
2415         struct rte_eth_dev *dev;
2416         struct rte_eth_dev_info dev_info;
2417         struct rte_eth_link link;
2418
2419         if (q_msk == 0)
2420                 return 0;
2421
2422         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2423
2424         dev = &rte_eth_devices[port_id];
2425         rte_eth_dev_info_get(port_id, &dev_info);
2426         link = dev->data->dev_link;
2427
2428         if (vf > dev_info.max_vfs) {
2429                 RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2430                                 "invalid vf id=%d\n", port_id, vf);
2431                 return -EINVAL;
2432         }
2433
2434         if (tx_rate > link.link_speed) {
2435                 RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2436                                 "bigger than link speed= %d\n",
2437                                 tx_rate, link.link_speed);
2438                 return -EINVAL;
2439         }
2440
2441         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2442         return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2443 }
2444
2445 int
2446 rte_eth_mirror_rule_set(uint8_t port_id,
2447                         struct rte_eth_mirror_conf *mirror_conf,
2448                         uint8_t rule_id, uint8_t on)
2449 {
2450         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2451
2452         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2453         if (mirror_conf->rule_type == 0) {
2454                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2455                 return -EINVAL;
2456         }
2457
2458         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2459                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2460                                 ETH_64_POOLS - 1);
2461                 return -EINVAL;
2462         }
2463
2464         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2465              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2466             (mirror_conf->pool_mask == 0)) {
2467                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2468                 return -EINVAL;
2469         }
2470
2471         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2472             mirror_conf->vlan.vlan_mask == 0) {
2473                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2474                 return -EINVAL;
2475         }
2476
2477         dev = &rte_eth_devices[port_id];
2478         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2479
2480         return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2481 }
2482
2483 int
2484 rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2485 {
2486         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2487
2488         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2489
2490         dev = &rte_eth_devices[port_id];
2491         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2492
2493         return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2494 }
2495
2496 int
2497 rte_eth_dev_callback_register(uint8_t port_id,
2498                         enum rte_eth_event_type event,
2499                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2500 {
2501         struct rte_eth_dev *dev;
2502         struct rte_eth_dev_callback *user_cb;
2503
2504         if (!cb_fn)
2505                 return -EINVAL;
2506
2507         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2508
2509         dev = &rte_eth_devices[port_id];
2510         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2511
2512         TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2513                 if (user_cb->cb_fn == cb_fn &&
2514                         user_cb->cb_arg == cb_arg &&
2515                         user_cb->event == event) {
2516                         break;
2517                 }
2518         }
2519
2520         /* create a new callback. */
2521         if (user_cb == NULL) {
2522                 user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2523                                         sizeof(struct rte_eth_dev_callback), 0);
2524                 if (user_cb != NULL) {
2525                         user_cb->cb_fn = cb_fn;
2526                         user_cb->cb_arg = cb_arg;
2527                         user_cb->event = event;
2528                         TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2529                 }
2530         }
2531
2532         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2533         return (user_cb == NULL) ? -ENOMEM : 0;
2534 }
2535
2536 int
2537 rte_eth_dev_callback_unregister(uint8_t port_id,
2538                         enum rte_eth_event_type event,
2539                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2540 {
2541         int ret;
2542         struct rte_eth_dev *dev;
2543         struct rte_eth_dev_callback *cb, *next;
2544
2545         if (!cb_fn)
2546                 return -EINVAL;
2547
2548         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2549
2550         dev = &rte_eth_devices[port_id];
2551         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2552
2553         ret = 0;
2554         for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2555
2556                 next = TAILQ_NEXT(cb, next);
2557
2558                 if (cb->cb_fn != cb_fn || cb->event != event ||
2559                                 (cb->cb_arg != (void *)-1 &&
2560                                 cb->cb_arg != cb_arg))
2561                         continue;
2562
2563                 /*
2564                  * if this callback is not executing right now,
2565                  * then remove it.
2566                  */
2567                 if (cb->active == 0) {
2568                         TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2569                         rte_free(cb);
2570                 } else {
2571                         ret = -EAGAIN;
2572                 }
2573         }
2574
2575         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2576         return ret;
2577 }
2578
2579 void
2580 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2581         enum rte_eth_event_type event, void *cb_arg)
2582 {
2583         struct rte_eth_dev_callback *cb_lst;
2584         struct rte_eth_dev_callback dev_cb;
2585
2586         rte_spinlock_lock(&rte_eth_dev_cb_lock);
2587         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2588                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2589                         continue;
2590                 dev_cb = *cb_lst;
2591                 cb_lst->active = 1;
2592                 if (cb_arg != NULL)
2593                         dev_cb.cb_arg = (void *) cb_arg;
2594
2595                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2596                 dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2597                                                 dev_cb.cb_arg);
2598                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
2599                 cb_lst->active = 0;
2600         }
2601         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2602 }
2603
2604 int
2605 rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2606 {
2607         uint32_t vec;
2608         struct rte_eth_dev *dev;
2609         struct rte_intr_handle *intr_handle;
2610         uint16_t qid;
2611         int rc;
2612
2613         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2614
2615         dev = &rte_eth_devices[port_id];
2616         intr_handle = &dev->pci_dev->intr_handle;
2617         if (!intr_handle->intr_vec) {
2618                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2619                 return -EPERM;
2620         }
2621
2622         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2623                 vec = intr_handle->intr_vec[qid];
2624                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2625                 if (rc && rc != -EEXIST) {
2626                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2627                                         " op %d epfd %d vec %u\n",
2628                                         port_id, qid, op, epfd, vec);
2629                 }
2630         }
2631
2632         return 0;
2633 }
2634
2635 const struct rte_memzone *
2636 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2637                          uint16_t queue_id, size_t size, unsigned align,
2638                          int socket_id)
2639 {
2640         char z_name[RTE_MEMZONE_NAMESIZE];
2641         const struct rte_memzone *mz;
2642
2643         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2644                  dev->driver->pci_drv.driver.name, ring_name,
2645                  dev->data->port_id, queue_id);
2646
2647         mz = rte_memzone_lookup(z_name);
2648         if (mz)
2649                 return mz;
2650
2651         if (rte_xen_dom0_supported())
2652                 return rte_memzone_reserve_bounded(z_name, size, socket_id,
2653                                                    0, align, RTE_PGSIZE_2M);
2654         else
2655                 return rte_memzone_reserve_aligned(z_name, size, socket_id,
2656                                                    0, align);
2657 }
2658
2659 int
2660 rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2661                           int epfd, int op, void *data)
2662 {
2663         uint32_t vec;
2664         struct rte_eth_dev *dev;
2665         struct rte_intr_handle *intr_handle;
2666         int rc;
2667
2668         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2669
2670         dev = &rte_eth_devices[port_id];
2671         if (queue_id >= dev->data->nb_rx_queues) {
2672                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2673                 return -EINVAL;
2674         }
2675
2676         intr_handle = &dev->pci_dev->intr_handle;
2677         if (!intr_handle->intr_vec) {
2678                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2679                 return -EPERM;
2680         }
2681
2682         vec = intr_handle->intr_vec[queue_id];
2683         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2684         if (rc && rc != -EEXIST) {
2685                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2686                                 " op %d epfd %d vec %u\n",
2687                                 port_id, queue_id, op, epfd, vec);
2688                 return rc;
2689         }
2690
2691         return 0;
2692 }
2693
2694 int
2695 rte_eth_dev_rx_intr_enable(uint8_t port_id,
2696                            uint16_t queue_id)
2697 {
2698         struct rte_eth_dev *dev;
2699
2700         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2701
2702         dev = &rte_eth_devices[port_id];
2703
2704         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2705         return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2706 }
2707
2708 int
2709 rte_eth_dev_rx_intr_disable(uint8_t port_id,
2710                             uint16_t queue_id)
2711 {
2712         struct rte_eth_dev *dev;
2713
2714         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2715
2716         dev = &rte_eth_devices[port_id];
2717
2718         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2719         return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2720 }
2721
2722 #ifdef RTE_NIC_BYPASS
2723 int rte_eth_dev_bypass_init(uint8_t port_id)
2724 {
2725         struct rte_eth_dev *dev;
2726
2727         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2728
2729         dev = &rte_eth_devices[port_id];
2730         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2731         (*dev->dev_ops->bypass_init)(dev);
2732         return 0;
2733 }
2734
2735 int
2736 rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2737 {
2738         struct rte_eth_dev *dev;
2739
2740         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2741
2742         dev = &rte_eth_devices[port_id];
2743         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2744         (*dev->dev_ops->bypass_state_show)(dev, state);
2745         return 0;
2746 }
2747
2748 int
2749 rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2750 {
2751         struct rte_eth_dev *dev;
2752
2753         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2754
2755         dev = &rte_eth_devices[port_id];
2756         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2757         (*dev->dev_ops->bypass_state_set)(dev, new_state);
2758         return 0;
2759 }
2760
2761 int
2762 rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2763 {
2764         struct rte_eth_dev *dev;
2765
2766         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2767
2768         dev = &rte_eth_devices[port_id];
2769         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2770         (*dev->dev_ops->bypass_event_show)(dev, event, state);
2771         return 0;
2772 }
2773
2774 int
2775 rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2776 {
2777         struct rte_eth_dev *dev;
2778
2779         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2780
2781         dev = &rte_eth_devices[port_id];
2782
2783         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2784         (*dev->dev_ops->bypass_event_set)(dev, event, state);
2785         return 0;
2786 }
2787
2788 int
2789 rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2790 {
2791         struct rte_eth_dev *dev;
2792
2793         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2794
2795         dev = &rte_eth_devices[port_id];
2796
2797         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2798         (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2799         return 0;
2800 }
2801
2802 int
2803 rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2804 {
2805         struct rte_eth_dev *dev;
2806
2807         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2808
2809         dev = &rte_eth_devices[port_id];
2810
2811         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2812         (*dev->dev_ops->bypass_ver_show)(dev, ver);
2813         return 0;
2814 }
2815
2816 int
2817 rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2818 {
2819         struct rte_eth_dev *dev;
2820
2821         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2822
2823         dev = &rte_eth_devices[port_id];
2824
2825         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2826         (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2827         return 0;
2828 }
2829
2830 int
2831 rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2832 {
2833         struct rte_eth_dev *dev;
2834
2835         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2836
2837         dev = &rte_eth_devices[port_id];
2838
2839         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2840         (*dev->dev_ops->bypass_wd_reset)(dev);
2841         return 0;
2842 }
2843 #endif
2844
2845 int
2846 rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2847 {
2848         struct rte_eth_dev *dev;
2849
2850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2851
2852         dev = &rte_eth_devices[port_id];
2853         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2854         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2855                                 RTE_ETH_FILTER_NOP, NULL);
2856 }
2857
2858 int
2859 rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2860                        enum rte_filter_op filter_op, void *arg)
2861 {
2862         struct rte_eth_dev *dev;
2863
2864         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2865
2866         dev = &rte_eth_devices[port_id];
2867         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2868         return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2869 }
2870
2871 void *
2872 rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2873                 rte_rx_callback_fn fn, void *user_param)
2874 {
2875 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2876         rte_errno = ENOTSUP;
2877         return NULL;
2878 #endif
2879         /* check input parameters */
2880         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2881                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2882                 rte_errno = EINVAL;
2883                 return NULL;
2884         }
2885         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2886
2887         if (cb == NULL) {
2888                 rte_errno = ENOMEM;
2889                 return NULL;
2890         }
2891
2892         cb->fn.rx = fn;
2893         cb->param = user_param;
2894
2895         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2896         /* Add the callbacks in fifo order. */
2897         struct rte_eth_rxtx_callback *tail =
2898                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2899
2900         if (!tail) {
2901                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2902
2903         } else {
2904                 while (tail->next)
2905                         tail = tail->next;
2906                 tail->next = cb;
2907         }
2908         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2909
2910         return cb;
2911 }
2912
2913 void *
2914 rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
2915                 rte_rx_callback_fn fn, void *user_param)
2916 {
2917 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2918         rte_errno = ENOTSUP;
2919         return NULL;
2920 #endif
2921         /* check input parameters */
2922         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2923                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2924                 rte_errno = EINVAL;
2925                 return NULL;
2926         }
2927
2928         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2929
2930         if (cb == NULL) {
2931                 rte_errno = ENOMEM;
2932                 return NULL;
2933         }
2934
2935         cb->fn.rx = fn;
2936         cb->param = user_param;
2937
2938         rte_spinlock_lock(&rte_eth_rx_cb_lock);
2939         /* Add the callbacks at fisrt position*/
2940         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2941         rte_smp_wmb();
2942         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2943         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2944
2945         return cb;
2946 }
2947
2948 void *
2949 rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2950                 rte_tx_callback_fn fn, void *user_param)
2951 {
2952 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2953         rte_errno = ENOTSUP;
2954         return NULL;
2955 #endif
2956         /* check input parameters */
2957         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2958                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2959                 rte_errno = EINVAL;
2960                 return NULL;
2961         }
2962
2963         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2964
2965         if (cb == NULL) {
2966                 rte_errno = ENOMEM;
2967                 return NULL;
2968         }
2969
2970         cb->fn.tx = fn;
2971         cb->param = user_param;
2972
2973         rte_spinlock_lock(&rte_eth_tx_cb_lock);
2974         /* Add the callbacks in fifo order. */
2975         struct rte_eth_rxtx_callback *tail =
2976                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2977
2978         if (!tail) {
2979                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2980
2981         } else {
2982                 while (tail->next)
2983                         tail = tail->next;
2984                 tail->next = cb;
2985         }
2986         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2987
2988         return cb;
2989 }
2990
2991 int
2992 rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2993                 struct rte_eth_rxtx_callback *user_cb)
2994 {
2995 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
2996         return -ENOTSUP;
2997 #endif
2998         /* Check input parameters. */
2999         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3000         if (user_cb == NULL ||
3001                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3002                 return -EINVAL;
3003
3004         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3005         struct rte_eth_rxtx_callback *cb;
3006         struct rte_eth_rxtx_callback **prev_cb;
3007         int ret = -EINVAL;
3008
3009         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3010         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3011         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3012                 cb = *prev_cb;
3013                 if (cb == user_cb) {
3014                         /* Remove the user cb from the callback list. */
3015                         *prev_cb = cb->next;
3016                         ret = 0;
3017                         break;
3018                 }
3019         }
3020         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3021
3022         return ret;
3023 }
3024
3025 int
3026 rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
3027                 struct rte_eth_rxtx_callback *user_cb)
3028 {
3029 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3030         return -ENOTSUP;
3031 #endif
3032         /* Check input parameters. */
3033         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3034         if (user_cb == NULL ||
3035                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3036                 return -EINVAL;
3037
3038         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3039         int ret = -EINVAL;
3040         struct rte_eth_rxtx_callback *cb;
3041         struct rte_eth_rxtx_callback **prev_cb;
3042
3043         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3044         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3045         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3046                 cb = *prev_cb;
3047                 if (cb == user_cb) {
3048                         /* Remove the user cb from the callback list. */
3049                         *prev_cb = cb->next;
3050                         ret = 0;
3051                         break;
3052                 }
3053         }
3054         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3055
3056         return ret;
3057 }
3058
3059 int
3060 rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3061         struct rte_eth_rxq_info *qinfo)
3062 {
3063         struct rte_eth_dev *dev;
3064
3065         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3066
3067         if (qinfo == NULL)
3068                 return -EINVAL;
3069
3070         dev = &rte_eth_devices[port_id];
3071         if (queue_id >= dev->data->nb_rx_queues) {
3072                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3073                 return -EINVAL;
3074         }
3075
3076         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3077
3078         memset(qinfo, 0, sizeof(*qinfo));
3079         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3080         return 0;
3081 }
3082
3083 int
3084 rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3085         struct rte_eth_txq_info *qinfo)
3086 {
3087         struct rte_eth_dev *dev;
3088
3089         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3090
3091         if (qinfo == NULL)
3092                 return -EINVAL;
3093
3094         dev = &rte_eth_devices[port_id];
3095         if (queue_id >= dev->data->nb_tx_queues) {
3096                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3097                 return -EINVAL;
3098         }
3099
3100         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3101
3102         memset(qinfo, 0, sizeof(*qinfo));
3103         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3104         return 0;
3105 }
3106
3107 int
3108 rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3109                              struct ether_addr *mc_addr_set,
3110                              uint32_t nb_mc_addr)
3111 {
3112         struct rte_eth_dev *dev;
3113
3114         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3115
3116         dev = &rte_eth_devices[port_id];
3117         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3118         return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3119 }
3120
3121 int
3122 rte_eth_timesync_enable(uint8_t port_id)
3123 {
3124         struct rte_eth_dev *dev;
3125
3126         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3127         dev = &rte_eth_devices[port_id];
3128
3129         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3130         return (*dev->dev_ops->timesync_enable)(dev);
3131 }
3132
3133 int
3134 rte_eth_timesync_disable(uint8_t port_id)
3135 {
3136         struct rte_eth_dev *dev;
3137
3138         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3139         dev = &rte_eth_devices[port_id];
3140
3141         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3142         return (*dev->dev_ops->timesync_disable)(dev);
3143 }
3144
3145 int
3146 rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3147                                    uint32_t flags)
3148 {
3149         struct rte_eth_dev *dev;
3150
3151         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3152         dev = &rte_eth_devices[port_id];
3153
3154         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3155         return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3156 }
3157
3158 int
3159 rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3160 {
3161         struct rte_eth_dev *dev;
3162
3163         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3164         dev = &rte_eth_devices[port_id];
3165
3166         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3167         return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3168 }
3169
3170 int
3171 rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3172 {
3173         struct rte_eth_dev *dev;
3174
3175         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3176         dev = &rte_eth_devices[port_id];
3177
3178         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3179         return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3180 }
3181
3182 int
3183 rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3184 {
3185         struct rte_eth_dev *dev;
3186
3187         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3188         dev = &rte_eth_devices[port_id];
3189
3190         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3191         return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3192 }
3193
3194 int
3195 rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3196 {
3197         struct rte_eth_dev *dev;
3198
3199         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3200         dev = &rte_eth_devices[port_id];
3201
3202         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3203         return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3204 }
3205
3206 int
3207 rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3208 {
3209         struct rte_eth_dev *dev;
3210
3211         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3212
3213         dev = &rte_eth_devices[port_id];
3214         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3215         return (*dev->dev_ops->get_reg)(dev, info);
3216 }
3217
3218 int
3219 rte_eth_dev_get_eeprom_length(uint8_t port_id)
3220 {
3221         struct rte_eth_dev *dev;
3222
3223         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3224
3225         dev = &rte_eth_devices[port_id];
3226         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3227         return (*dev->dev_ops->get_eeprom_length)(dev);
3228 }
3229
3230 int
3231 rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3232 {
3233         struct rte_eth_dev *dev;
3234
3235         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3236
3237         dev = &rte_eth_devices[port_id];
3238         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3239         return (*dev->dev_ops->get_eeprom)(dev, info);
3240 }
3241
3242 int
3243 rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3244 {
3245         struct rte_eth_dev *dev;
3246
3247         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3248
3249         dev = &rte_eth_devices[port_id];
3250         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3251         return (*dev->dev_ops->set_eeprom)(dev, info);
3252 }
3253
3254 int
3255 rte_eth_dev_get_dcb_info(uint8_t port_id,
3256                              struct rte_eth_dcb_info *dcb_info)
3257 {
3258         struct rte_eth_dev *dev;
3259
3260         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3261
3262         dev = &rte_eth_devices[port_id];
3263         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3264
3265         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3266         return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3267 }
3268
3269 void
3270 rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3271 {
3272         if ((eth_dev == NULL) || (pci_dev == NULL)) {
3273                 RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3274                                 eth_dev, pci_dev);
3275                 return;
3276         }
3277
3278         eth_dev->data->dev_flags = 0;
3279         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3280                 eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3281         if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3282                 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3283
3284         eth_dev->data->kdrv = pci_dev->kdrv;
3285         eth_dev->data->numa_node = pci_dev->device.numa_node;
3286         eth_dev->data->drv_name = pci_dev->driver->driver.name;
3287 }
3288
3289 int
3290 rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3291                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
3292 {
3293         struct rte_eth_dev *dev;
3294
3295         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3296         if (l2_tunnel == NULL) {
3297                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3298                 return -EINVAL;
3299         }
3300
3301         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3302                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3303                 return -EINVAL;
3304         }
3305
3306         dev = &rte_eth_devices[port_id];
3307         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3308                                 -ENOTSUP);
3309         return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3310 }
3311
3312 int
3313 rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3314                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
3315                                   uint32_t mask,
3316                                   uint8_t en)
3317 {
3318         struct rte_eth_dev *dev;
3319
3320         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3321
3322         if (l2_tunnel == NULL) {
3323                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3324                 return -EINVAL;
3325         }
3326
3327         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3328                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3329                 return -EINVAL;
3330         }
3331
3332         if (mask == 0) {
3333                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3334                 return -EINVAL;
3335         }
3336
3337         dev = &rte_eth_devices[port_id];
3338         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3339                                 -ENOTSUP);
3340         return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3341 }