New upstream version 18.02
[deb_dpdk.git] / lib / librte_ether / rte_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <sys/types.h>
6 #include <sys/queue.h>
7 #include <ctype.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <stdarg.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <inttypes.h>
15 #include <netinet/in.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_common.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
33 #include <rte_mbuf.h>
34 #include <rte_errno.h>
35 #include <rte_spinlock.h>
36 #include <rte_string_fns.h>
37 #include <rte_compat.h>
38
39 #include "rte_ether.h"
40 #include "rte_ethdev.h"
41 #include "rte_ethdev_driver.h"
42 #include "ethdev_profile.h"
43
44 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
45 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
46 static uint8_t eth_dev_last_created_port;
47
48 /* spinlock for eth device callbacks */
49 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
50
51 /* spinlock for add/remove rx callbacks */
52 static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
53
54 /* spinlock for add/remove tx callbacks */
55 static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
56
57 /* spinlock for shared data allocation */
58 static rte_spinlock_t rte_eth_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
59
60 /* store statistics names and its offset in stats structure  */
61 struct rte_eth_xstats_name_off {
62         char name[RTE_ETH_XSTATS_NAME_SIZE];
63         unsigned offset;
64 };
65
66 /* Shared memory between primary and secondary processes. */
67 static struct {
68         uint64_t next_owner_id;
69         rte_spinlock_t ownership_lock;
70         struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
71 } *rte_eth_dev_shared_data;
72
73 static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
74         {"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
75         {"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
76         {"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
77         {"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
78         {"rx_missed_errors", offsetof(struct rte_eth_stats, imissed)},
79         {"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
80         {"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
81         {"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
82                 rx_nombuf)},
83 };
84
85 #define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
86
87 static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
88         {"packets", offsetof(struct rte_eth_stats, q_ipackets)},
89         {"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
90         {"errors", offsetof(struct rte_eth_stats, q_errors)},
91 };
92
93 #define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /       \
94                 sizeof(rte_rxq_stats_strings[0]))
95
96 static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
97         {"packets", offsetof(struct rte_eth_stats, q_opackets)},
98         {"bytes", offsetof(struct rte_eth_stats, q_obytes)},
99 };
100 #define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /       \
101                 sizeof(rte_txq_stats_strings[0]))
102
103 #define RTE_RX_OFFLOAD_BIT2STR(_name)   \
104         { DEV_RX_OFFLOAD_##_name, #_name }
105
106 static const struct {
107         uint64_t offload;
108         const char *name;
109 } rte_rx_offload_names[] = {
110         RTE_RX_OFFLOAD_BIT2STR(VLAN_STRIP),
111         RTE_RX_OFFLOAD_BIT2STR(IPV4_CKSUM),
112         RTE_RX_OFFLOAD_BIT2STR(UDP_CKSUM),
113         RTE_RX_OFFLOAD_BIT2STR(TCP_CKSUM),
114         RTE_RX_OFFLOAD_BIT2STR(TCP_LRO),
115         RTE_RX_OFFLOAD_BIT2STR(QINQ_STRIP),
116         RTE_RX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
117         RTE_RX_OFFLOAD_BIT2STR(MACSEC_STRIP),
118         RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
119         RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
120         RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
121         RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
122         RTE_RX_OFFLOAD_BIT2STR(CRC_STRIP),
123         RTE_RX_OFFLOAD_BIT2STR(SCATTER),
124         RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
125         RTE_RX_OFFLOAD_BIT2STR(SECURITY),
126 };
127
128 #undef RTE_RX_OFFLOAD_BIT2STR
129
130 #define RTE_TX_OFFLOAD_BIT2STR(_name)   \
131         { DEV_TX_OFFLOAD_##_name, #_name }
132
133 static const struct {
134         uint64_t offload;
135         const char *name;
136 } rte_tx_offload_names[] = {
137         RTE_TX_OFFLOAD_BIT2STR(VLAN_INSERT),
138         RTE_TX_OFFLOAD_BIT2STR(IPV4_CKSUM),
139         RTE_TX_OFFLOAD_BIT2STR(UDP_CKSUM),
140         RTE_TX_OFFLOAD_BIT2STR(TCP_CKSUM),
141         RTE_TX_OFFLOAD_BIT2STR(SCTP_CKSUM),
142         RTE_TX_OFFLOAD_BIT2STR(TCP_TSO),
143         RTE_TX_OFFLOAD_BIT2STR(UDP_TSO),
144         RTE_TX_OFFLOAD_BIT2STR(OUTER_IPV4_CKSUM),
145         RTE_TX_OFFLOAD_BIT2STR(QINQ_INSERT),
146         RTE_TX_OFFLOAD_BIT2STR(VXLAN_TNL_TSO),
147         RTE_TX_OFFLOAD_BIT2STR(GRE_TNL_TSO),
148         RTE_TX_OFFLOAD_BIT2STR(IPIP_TNL_TSO),
149         RTE_TX_OFFLOAD_BIT2STR(GENEVE_TNL_TSO),
150         RTE_TX_OFFLOAD_BIT2STR(MACSEC_INSERT),
151         RTE_TX_OFFLOAD_BIT2STR(MT_LOCKFREE),
152         RTE_TX_OFFLOAD_BIT2STR(MULTI_SEGS),
153         RTE_TX_OFFLOAD_BIT2STR(MBUF_FAST_FREE),
154         RTE_TX_OFFLOAD_BIT2STR(SECURITY),
155 };
156
157 #undef RTE_TX_OFFLOAD_BIT2STR
158
159 /**
160  * The user application callback description.
161  *
162  * It contains callback address to be registered by user application,
163  * the pointer to the parameters for callback, and the event type.
164  */
165 struct rte_eth_dev_callback {
166         TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
167         rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
168         void *cb_arg;                           /**< Parameter for callback */
169         void *ret_param;                        /**< Return parameter */
170         enum rte_eth_event_type event;          /**< Interrupt event type */
171         uint32_t active;                        /**< Callback is executing */
172 };
173
174 enum {
175         STAT_QMAP_TX = 0,
176         STAT_QMAP_RX
177 };
178
179 uint16_t
180 rte_eth_find_next(uint16_t port_id)
181 {
182         while (port_id < RTE_MAX_ETHPORTS &&
183                rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
184                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
185                 port_id++;
186
187         if (port_id >= RTE_MAX_ETHPORTS)
188                 return RTE_MAX_ETHPORTS;
189
190         return port_id;
191 }
192
193 static void
194 rte_eth_dev_shared_data_prepare(void)
195 {
196         const unsigned flags = 0;
197         const struct rte_memzone *mz;
198
199         rte_spinlock_lock(&rte_eth_shared_data_lock);
200
201         if (rte_eth_dev_shared_data == NULL) {
202                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
203                         /* Allocate port data and ownership shared memory. */
204                         mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
205                                         sizeof(*rte_eth_dev_shared_data),
206                                         rte_socket_id(), flags);
207                 } else
208                         mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
209                 if (mz == NULL)
210                         rte_panic("Cannot allocate ethdev shared data\n");
211
212                 rte_eth_dev_shared_data = mz->addr;
213                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
214                         rte_eth_dev_shared_data->next_owner_id =
215                                         RTE_ETH_DEV_NO_OWNER + 1;
216                         rte_spinlock_init(&rte_eth_dev_shared_data->ownership_lock);
217                         memset(rte_eth_dev_shared_data->data, 0,
218                                sizeof(rte_eth_dev_shared_data->data));
219                 }
220         }
221
222         rte_spinlock_unlock(&rte_eth_shared_data_lock);
223 }
224
225 struct rte_eth_dev *
226 rte_eth_dev_allocated(const char *name)
227 {
228         unsigned i;
229
230         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
231                 if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
232                     strcmp(rte_eth_devices[i].data->name, name) == 0)
233                         return &rte_eth_devices[i];
234         }
235         return NULL;
236 }
237
238 static uint16_t
239 rte_eth_dev_find_free_port(void)
240 {
241         unsigned i;
242
243         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
244                 /* Using shared name field to find a free port. */
245                 if (rte_eth_dev_shared_data->data[i].name[0] == '\0') {
246                         RTE_ASSERT(rte_eth_devices[i].state ==
247                                    RTE_ETH_DEV_UNUSED);
248                         return i;
249                 }
250         }
251         return RTE_MAX_ETHPORTS;
252 }
253
254 static struct rte_eth_dev *
255 eth_dev_get(uint16_t port_id)
256 {
257         struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
258
259         eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
260         eth_dev->state = RTE_ETH_DEV_ATTACHED;
261
262         eth_dev_last_created_port = port_id;
263
264         return eth_dev;
265 }
266
267 struct rte_eth_dev *
268 rte_eth_dev_allocate(const char *name)
269 {
270         uint16_t port_id;
271         struct rte_eth_dev *eth_dev = NULL;
272
273         rte_eth_dev_shared_data_prepare();
274
275         /* Synchronize port creation between primary and secondary threads. */
276         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
277
278         port_id = rte_eth_dev_find_free_port();
279         if (port_id == RTE_MAX_ETHPORTS) {
280                 RTE_LOG(ERR, EAL, "Reached maximum number of Ethernet ports\n");
281                 goto unlock;
282         }
283
284         if (rte_eth_dev_allocated(name) != NULL) {
285                 RTE_LOG(ERR, EAL, "Ethernet Device with name %s already allocated!\n",
286                                 name);
287                 goto unlock;
288         }
289
290         eth_dev = eth_dev_get(port_id);
291         snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
292         eth_dev->data->port_id = port_id;
293         eth_dev->data->mtu = ETHER_MTU;
294
295 unlock:
296         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
297
298         if (eth_dev != NULL)
299                 _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
300
301         return eth_dev;
302 }
303
304 /*
305  * Attach to a port already registered by the primary process, which
306  * makes sure that the same device would have the same port id both
307  * in the primary and secondary process.
308  */
309 struct rte_eth_dev *
310 rte_eth_dev_attach_secondary(const char *name)
311 {
312         uint16_t i;
313         struct rte_eth_dev *eth_dev = NULL;
314
315         rte_eth_dev_shared_data_prepare();
316
317         /* Synchronize port attachment to primary port creation and release. */
318         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
319
320         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
321                 if (strcmp(rte_eth_dev_shared_data->data[i].name, name) == 0)
322                         break;
323         }
324         if (i == RTE_MAX_ETHPORTS) {
325                 RTE_PMD_DEBUG_TRACE(
326                         "device %s is not driven by the primary process\n",
327                         name);
328         } else {
329                 eth_dev = eth_dev_get(i);
330                 RTE_ASSERT(eth_dev->data->port_id == i);
331         }
332
333         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
334         return eth_dev;
335 }
336
337 int
338 rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
339 {
340         if (eth_dev == NULL)
341                 return -EINVAL;
342
343         rte_eth_dev_shared_data_prepare();
344
345         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
346
347         eth_dev->state = RTE_ETH_DEV_UNUSED;
348
349         memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
350
351         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
352
353         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
354
355         return 0;
356 }
357
358 int
359 rte_eth_dev_is_valid_port(uint16_t port_id)
360 {
361         if (port_id >= RTE_MAX_ETHPORTS ||
362             (rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED))
363                 return 0;
364         else
365                 return 1;
366 }
367
368 static int
369 rte_eth_is_valid_owner_id(uint64_t owner_id)
370 {
371         if (owner_id == RTE_ETH_DEV_NO_OWNER ||
372             rte_eth_dev_shared_data->next_owner_id <= owner_id) {
373                 RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
374                 return 0;
375         }
376         return 1;
377 }
378
379 uint64_t __rte_experimental
380 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
381 {
382         while (port_id < RTE_MAX_ETHPORTS &&
383                ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
384                rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
385                rte_eth_devices[port_id].data->owner.id != owner_id))
386                 port_id++;
387
388         if (port_id >= RTE_MAX_ETHPORTS)
389                 return RTE_MAX_ETHPORTS;
390
391         return port_id;
392 }
393
394 int __rte_experimental
395 rte_eth_dev_owner_new(uint64_t *owner_id)
396 {
397         rte_eth_dev_shared_data_prepare();
398
399         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
400
401         *owner_id = rte_eth_dev_shared_data->next_owner_id++;
402
403         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
404         return 0;
405 }
406
407 static int
408 _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
409                        const struct rte_eth_dev_owner *new_owner)
410 {
411         struct rte_eth_dev_owner *port_owner;
412         int sret;
413
414         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
415
416         if (!rte_eth_is_valid_owner_id(new_owner->id) &&
417             !rte_eth_is_valid_owner_id(old_owner_id))
418                 return -EINVAL;
419
420         port_owner = &rte_eth_devices[port_id].data->owner;
421         if (port_owner->id != old_owner_id) {
422                 RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
423                                     " by %s_%016lX.\n", port_id,
424                                     port_owner->name, port_owner->id);
425                 return -EPERM;
426         }
427
428         sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
429                         new_owner->name);
430         if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
431                 RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
432                                     port_id);
433
434         port_owner->id = new_owner->id;
435
436         RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
437                             new_owner->name, new_owner->id);
438
439         return 0;
440 }
441
442 int __rte_experimental
443 rte_eth_dev_owner_set(const uint16_t port_id,
444                       const struct rte_eth_dev_owner *owner)
445 {
446         int ret;
447
448         rte_eth_dev_shared_data_prepare();
449
450         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
451
452         ret = _rte_eth_dev_owner_set(port_id, RTE_ETH_DEV_NO_OWNER, owner);
453
454         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
455         return ret;
456 }
457
458 int __rte_experimental
459 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
460 {
461         const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
462                         {.id = RTE_ETH_DEV_NO_OWNER, .name = ""};
463         int ret;
464
465         rte_eth_dev_shared_data_prepare();
466
467         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
468
469         ret = _rte_eth_dev_owner_set(port_id, owner_id, &new_owner);
470
471         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
472         return ret;
473 }
474
475 void __rte_experimental
476 rte_eth_dev_owner_delete(const uint64_t owner_id)
477 {
478         uint16_t port_id;
479
480         rte_eth_dev_shared_data_prepare();
481
482         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
483
484         if (rte_eth_is_valid_owner_id(owner_id)) {
485                 RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
486                         memset(&rte_eth_devices[port_id].data->owner, 0,
487                                sizeof(struct rte_eth_dev_owner));
488                 RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
489                                     " have removed.\n", owner_id);
490         }
491
492         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
493 }
494
495 int __rte_experimental
496 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
497 {
498         int ret = 0;
499
500         rte_eth_dev_shared_data_prepare();
501
502         rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
503
504         if (!rte_eth_dev_is_valid_port(port_id)) {
505                 RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
506                 ret = -ENODEV;
507         } else {
508                 rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
509                            sizeof(*owner));
510         }
511
512         rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
513         return ret;
514 }
515
516 int
517 rte_eth_dev_socket_id(uint16_t port_id)
518 {
519         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
520         return rte_eth_devices[port_id].data->numa_node;
521 }
522
523 void *
524 rte_eth_dev_get_sec_ctx(uint8_t port_id)
525 {
526         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
527         return rte_eth_devices[port_id].security_ctx;
528 }
529
530 uint16_t
531 rte_eth_dev_count(void)
532 {
533         uint16_t p;
534         uint16_t count;
535
536         count = 0;
537
538         RTE_ETH_FOREACH_DEV(p)
539                 count++;
540
541         return count;
542 }
543
544 int
545 rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
546 {
547         char *tmp;
548
549         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
550
551         if (name == NULL) {
552                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
553                 return -EINVAL;
554         }
555
556         /* shouldn't check 'rte_eth_devices[i].data',
557          * because it might be overwritten by VDEV PMD */
558         tmp = rte_eth_dev_shared_data->data[port_id].name;
559         strcpy(name, tmp);
560         return 0;
561 }
562
563 int
564 rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
565 {
566         uint32_t pid;
567
568         if (name == NULL) {
569                 RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
570                 return -EINVAL;
571         }
572
573         for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
574                 if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
575                     !strncmp(name, rte_eth_dev_shared_data->data[pid].name,
576                              strlen(name))) {
577                         *port_id = pid;
578                         return 0;
579                 }
580         }
581
582         return -ENODEV;
583 }
584
585 static int
586 eth_err(uint16_t port_id, int ret)
587 {
588         if (ret == 0)
589                 return 0;
590         if (rte_eth_dev_is_removed(port_id))
591                 return -EIO;
592         return ret;
593 }
594
595 /* attach the new device, then store port_id of the device */
596 int
597 rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
598 {
599         int ret = -1;
600         int current = rte_eth_dev_count();
601         char *name = NULL;
602         char *args = NULL;
603
604         if ((devargs == NULL) || (port_id == NULL)) {
605                 ret = -EINVAL;
606                 goto err;
607         }
608
609         /* parse devargs, then retrieve device name and args */
610         if (rte_eal_parse_devargs_str(devargs, &name, &args))
611                 goto err;
612
613         ret = rte_eal_dev_attach(name, args);
614         if (ret < 0)
615                 goto err;
616
617         /* no point looking at the port count if no port exists */
618         if (!rte_eth_dev_count()) {
619                 RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
620                 ret = -1;
621                 goto err;
622         }
623
624         /* if nothing happened, there is a bug here, since some driver told us
625          * it did attach a device, but did not create a port.
626          */
627         if (current == rte_eth_dev_count()) {
628                 ret = -1;
629                 goto err;
630         }
631
632         *port_id = eth_dev_last_created_port;
633         ret = 0;
634
635 err:
636         free(name);
637         free(args);
638         return ret;
639 }
640
641 /* detach the device, then store the name of the device */
642 int
643 rte_eth_dev_detach(uint16_t port_id, char *name)
644 {
645         uint32_t dev_flags;
646         int ret = -1;
647
648         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
649
650         if (name == NULL) {
651                 ret = -EINVAL;
652                 goto err;
653         }
654
655         dev_flags = rte_eth_devices[port_id].data->dev_flags;
656         if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
657                 RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
658                         port_id);
659                 ret = -ENOTSUP;
660                 goto err;
661         }
662
663         snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
664                  "%s", rte_eth_devices[port_id].data->name);
665
666         ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
667         if (ret < 0)
668                 goto err;
669
670         rte_eth_dev_release_port(&rte_eth_devices[port_id]);
671         return 0;
672
673 err:
674         return ret;
675 }
676
677 static int
678 rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
679 {
680         uint16_t old_nb_queues = dev->data->nb_rx_queues;
681         void **rxq;
682         unsigned i;
683
684         if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
685                 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
686                                 sizeof(dev->data->rx_queues[0]) * nb_queues,
687                                 RTE_CACHE_LINE_SIZE);
688                 if (dev->data->rx_queues == NULL) {
689                         dev->data->nb_rx_queues = 0;
690                         return -(ENOMEM);
691                 }
692         } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
693                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
694
695                 rxq = dev->data->rx_queues;
696
697                 for (i = nb_queues; i < old_nb_queues; i++)
698                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
699                 rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
700                                 RTE_CACHE_LINE_SIZE);
701                 if (rxq == NULL)
702                         return -(ENOMEM);
703                 if (nb_queues > old_nb_queues) {
704                         uint16_t new_qs = nb_queues - old_nb_queues;
705
706                         memset(rxq + old_nb_queues, 0,
707                                 sizeof(rxq[0]) * new_qs);
708                 }
709
710                 dev->data->rx_queues = rxq;
711
712         } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
713                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
714
715                 rxq = dev->data->rx_queues;
716
717                 for (i = nb_queues; i < old_nb_queues; i++)
718                         (*dev->dev_ops->rx_queue_release)(rxq[i]);
719
720                 rte_free(dev->data->rx_queues);
721                 dev->data->rx_queues = NULL;
722         }
723         dev->data->nb_rx_queues = nb_queues;
724         return 0;
725 }
726
727 int
728 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
729 {
730         struct rte_eth_dev *dev;
731
732         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
733
734         dev = &rte_eth_devices[port_id];
735         if (rx_queue_id >= dev->data->nb_rx_queues) {
736                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
737                 return -EINVAL;
738         }
739
740         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
741
742         if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
743                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
744                         " already started\n",
745                         rx_queue_id, port_id);
746                 return 0;
747         }
748
749         return eth_err(port_id, dev->dev_ops->rx_queue_start(dev,
750                                                              rx_queue_id));
751
752 }
753
754 int
755 rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
756 {
757         struct rte_eth_dev *dev;
758
759         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
760
761         dev = &rte_eth_devices[port_id];
762         if (rx_queue_id >= dev->data->nb_rx_queues) {
763                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
764                 return -EINVAL;
765         }
766
767         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
768
769         if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
770                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
771                         " already stopped\n",
772                         rx_queue_id, port_id);
773                 return 0;
774         }
775
776         return eth_err(port_id, dev->dev_ops->rx_queue_stop(dev, rx_queue_id));
777
778 }
779
780 int
781 rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
782 {
783         struct rte_eth_dev *dev;
784
785         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
786
787         dev = &rte_eth_devices[port_id];
788         if (tx_queue_id >= dev->data->nb_tx_queues) {
789                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
790                 return -EINVAL;
791         }
792
793         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
794
795         if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
796                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
797                         " already started\n",
798                         tx_queue_id, port_id);
799                 return 0;
800         }
801
802         return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
803                                                              tx_queue_id));
804
805 }
806
807 int
808 rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
809 {
810         struct rte_eth_dev *dev;
811
812         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
813
814         dev = &rte_eth_devices[port_id];
815         if (tx_queue_id >= dev->data->nb_tx_queues) {
816                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
817                 return -EINVAL;
818         }
819
820         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
821
822         if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
823                 RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
824                         " already stopped\n",
825                         tx_queue_id, port_id);
826                 return 0;
827         }
828
829         return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
830
831 }
832
833 static int
834 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
835 {
836         uint16_t old_nb_queues = dev->data->nb_tx_queues;
837         void **txq;
838         unsigned i;
839
840         if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
841                 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
842                                                    sizeof(dev->data->tx_queues[0]) * nb_queues,
843                                                    RTE_CACHE_LINE_SIZE);
844                 if (dev->data->tx_queues == NULL) {
845                         dev->data->nb_tx_queues = 0;
846                         return -(ENOMEM);
847                 }
848         } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
849                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
850
851                 txq = dev->data->tx_queues;
852
853                 for (i = nb_queues; i < old_nb_queues; i++)
854                         (*dev->dev_ops->tx_queue_release)(txq[i]);
855                 txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
856                                   RTE_CACHE_LINE_SIZE);
857                 if (txq == NULL)
858                         return -ENOMEM;
859                 if (nb_queues > old_nb_queues) {
860                         uint16_t new_qs = nb_queues - old_nb_queues;
861
862                         memset(txq + old_nb_queues, 0,
863                                sizeof(txq[0]) * new_qs);
864                 }
865
866                 dev->data->tx_queues = txq;
867
868         } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
869                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
870
871                 txq = dev->data->tx_queues;
872
873                 for (i = nb_queues; i < old_nb_queues; i++)
874                         (*dev->dev_ops->tx_queue_release)(txq[i]);
875
876                 rte_free(dev->data->tx_queues);
877                 dev->data->tx_queues = NULL;
878         }
879         dev->data->nb_tx_queues = nb_queues;
880         return 0;
881 }
882
883 uint32_t
884 rte_eth_speed_bitflag(uint32_t speed, int duplex)
885 {
886         switch (speed) {
887         case ETH_SPEED_NUM_10M:
888                 return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
889         case ETH_SPEED_NUM_100M:
890                 return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
891         case ETH_SPEED_NUM_1G:
892                 return ETH_LINK_SPEED_1G;
893         case ETH_SPEED_NUM_2_5G:
894                 return ETH_LINK_SPEED_2_5G;
895         case ETH_SPEED_NUM_5G:
896                 return ETH_LINK_SPEED_5G;
897         case ETH_SPEED_NUM_10G:
898                 return ETH_LINK_SPEED_10G;
899         case ETH_SPEED_NUM_20G:
900                 return ETH_LINK_SPEED_20G;
901         case ETH_SPEED_NUM_25G:
902                 return ETH_LINK_SPEED_25G;
903         case ETH_SPEED_NUM_40G:
904                 return ETH_LINK_SPEED_40G;
905         case ETH_SPEED_NUM_50G:
906                 return ETH_LINK_SPEED_50G;
907         case ETH_SPEED_NUM_56G:
908                 return ETH_LINK_SPEED_56G;
909         case ETH_SPEED_NUM_100G:
910                 return ETH_LINK_SPEED_100G;
911         default:
912                 return 0;
913         }
914 }
915
916 /**
917  * A conversion function from rxmode bitfield API.
918  */
919 static void
920 rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
921                                     uint64_t *rx_offloads)
922 {
923         uint64_t offloads = 0;
924
925         if (rxmode->header_split == 1)
926                 offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
927         if (rxmode->hw_ip_checksum == 1)
928                 offloads |= DEV_RX_OFFLOAD_CHECKSUM;
929         if (rxmode->hw_vlan_filter == 1)
930                 offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
931         if (rxmode->hw_vlan_strip == 1)
932                 offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
933         if (rxmode->hw_vlan_extend == 1)
934                 offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
935         if (rxmode->jumbo_frame == 1)
936                 offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
937         if (rxmode->hw_strip_crc == 1)
938                 offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
939         if (rxmode->enable_scatter == 1)
940                 offloads |= DEV_RX_OFFLOAD_SCATTER;
941         if (rxmode->enable_lro == 1)
942                 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
943         if (rxmode->hw_timestamp == 1)
944                 offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
945         if (rxmode->security == 1)
946                 offloads |= DEV_RX_OFFLOAD_SECURITY;
947
948         *rx_offloads = offloads;
949 }
950
951 /**
952  * A conversion function from rxmode offloads API.
953  */
954 static void
955 rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
956                             struct rte_eth_rxmode *rxmode)
957 {
958
959         if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
960                 rxmode->header_split = 1;
961         else
962                 rxmode->header_split = 0;
963         if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
964                 rxmode->hw_ip_checksum = 1;
965         else
966                 rxmode->hw_ip_checksum = 0;
967         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
968                 rxmode->hw_vlan_filter = 1;
969         else
970                 rxmode->hw_vlan_filter = 0;
971         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
972                 rxmode->hw_vlan_strip = 1;
973         else
974                 rxmode->hw_vlan_strip = 0;
975         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
976                 rxmode->hw_vlan_extend = 1;
977         else
978                 rxmode->hw_vlan_extend = 0;
979         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
980                 rxmode->jumbo_frame = 1;
981         else
982                 rxmode->jumbo_frame = 0;
983         if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
984                 rxmode->hw_strip_crc = 1;
985         else
986                 rxmode->hw_strip_crc = 0;
987         if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
988                 rxmode->enable_scatter = 1;
989         else
990                 rxmode->enable_scatter = 0;
991         if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
992                 rxmode->enable_lro = 1;
993         else
994                 rxmode->enable_lro = 0;
995         if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
996                 rxmode->hw_timestamp = 1;
997         else
998                 rxmode->hw_timestamp = 0;
999         if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
1000                 rxmode->security = 1;
1001         else
1002                 rxmode->security = 0;
1003 }
1004
1005 const char * __rte_experimental
1006 rte_eth_dev_rx_offload_name(uint64_t offload)
1007 {
1008         const char *name = "UNKNOWN";
1009         unsigned int i;
1010
1011         for (i = 0; i < RTE_DIM(rte_rx_offload_names); ++i) {
1012                 if (offload == rte_rx_offload_names[i].offload) {
1013                         name = rte_rx_offload_names[i].name;
1014                         break;
1015                 }
1016         }
1017
1018         return name;
1019 }
1020
1021 const char * __rte_experimental
1022 rte_eth_dev_tx_offload_name(uint64_t offload)
1023 {
1024         const char *name = "UNKNOWN";
1025         unsigned int i;
1026
1027         for (i = 0; i < RTE_DIM(rte_tx_offload_names); ++i) {
1028                 if (offload == rte_tx_offload_names[i].offload) {
1029                         name = rte_tx_offload_names[i].name;
1030                         break;
1031                 }
1032         }
1033
1034         return name;
1035 }
1036
1037 int
1038 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
1039                       const struct rte_eth_conf *dev_conf)
1040 {
1041         struct rte_eth_dev *dev;
1042         struct rte_eth_dev_info dev_info;
1043         struct rte_eth_conf local_conf = *dev_conf;
1044         int diag;
1045
1046         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1047
1048         if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
1049                 RTE_PMD_DEBUG_TRACE(
1050                         "Number of RX queues requested (%u) is greater than max supported(%d)\n",
1051                         nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
1052                 return -EINVAL;
1053         }
1054
1055         if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
1056                 RTE_PMD_DEBUG_TRACE(
1057                         "Number of TX queues requested (%u) is greater than max supported(%d)\n",
1058                         nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
1059                 return -EINVAL;
1060         }
1061
1062         dev = &rte_eth_devices[port_id];
1063
1064         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1065         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
1066
1067         if (dev->data->dev_started) {
1068                 RTE_PMD_DEBUG_TRACE(
1069                     "port %d must be stopped to allow configuration\n", port_id);
1070                 return -EBUSY;
1071         }
1072
1073         /*
1074          * Convert between the offloads API to enable PMDs to support
1075          * only one of them.
1076          */
1077         if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
1078                 rte_eth_convert_rx_offload_bitfield(
1079                                 &dev_conf->rxmode, &local_conf.rxmode.offloads);
1080         } else {
1081                 rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
1082                                             &local_conf.rxmode);
1083         }
1084
1085         /* Copy the dev_conf parameter into the dev structure */
1086         memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
1087
1088         /*
1089          * Check that the numbers of RX and TX queues are not greater
1090          * than the maximum number of RX and TX queues supported by the
1091          * configured device.
1092          */
1093         (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
1094
1095         if (nb_rx_q == 0 && nb_tx_q == 0) {
1096                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
1097                 return -EINVAL;
1098         }
1099
1100         if (nb_rx_q > dev_info.max_rx_queues) {
1101                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
1102                                 port_id, nb_rx_q, dev_info.max_rx_queues);
1103                 return -EINVAL;
1104         }
1105
1106         if (nb_tx_q > dev_info.max_tx_queues) {
1107                 RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
1108                                 port_id, nb_tx_q, dev_info.max_tx_queues);
1109                 return -EINVAL;
1110         }
1111
1112         /* Check that the device supports requested interrupts */
1113         if ((dev_conf->intr_conf.lsc == 1) &&
1114                 (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
1115                         RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
1116                                         dev->device->driver->name);
1117                         return -EINVAL;
1118         }
1119         if ((dev_conf->intr_conf.rmv == 1) &&
1120             (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
1121                 RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
1122                                     dev->device->driver->name);
1123                 return -EINVAL;
1124         }
1125
1126         /*
1127          * If jumbo frames are enabled, check that the maximum RX packet
1128          * length is supported by the configured device.
1129          */
1130         if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1131                 if (dev_conf->rxmode.max_rx_pkt_len >
1132                     dev_info.max_rx_pktlen) {
1133                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1134                                 " > max valid value %u\n",
1135                                 port_id,
1136                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1137                                 (unsigned)dev_info.max_rx_pktlen);
1138                         return -EINVAL;
1139                 } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
1140                         RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
1141                                 " < min valid value %u\n",
1142                                 port_id,
1143                                 (unsigned)dev_conf->rxmode.max_rx_pkt_len,
1144                                 (unsigned)ETHER_MIN_LEN);
1145                         return -EINVAL;
1146                 }
1147         } else {
1148                 if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
1149                         dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
1150                         /* Use default value */
1151                         dev->data->dev_conf.rxmode.max_rx_pkt_len =
1152                                                         ETHER_MAX_LEN;
1153         }
1154
1155         /*
1156          * Setup new number of RX/TX queues and reconfigure device.
1157          */
1158         diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
1159         if (diag != 0) {
1160                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
1161                                 port_id, diag);
1162                 return diag;
1163         }
1164
1165         diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
1166         if (diag != 0) {
1167                 RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
1168                                 port_id, diag);
1169                 rte_eth_dev_rx_queue_config(dev, 0);
1170                 return diag;
1171         }
1172
1173         diag = (*dev->dev_ops->dev_configure)(dev);
1174         if (diag != 0) {
1175                 RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
1176                                 port_id, diag);
1177                 rte_eth_dev_rx_queue_config(dev, 0);
1178                 rte_eth_dev_tx_queue_config(dev, 0);
1179                 return eth_err(port_id, diag);
1180         }
1181
1182         /* Initialize Rx profiling if enabled at compilation time. */
1183         diag = __rte_eth_profile_rx_init(port_id, dev);
1184         if (diag != 0) {
1185                 RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
1186                                 port_id, diag);
1187                 rte_eth_dev_rx_queue_config(dev, 0);
1188                 rte_eth_dev_tx_queue_config(dev, 0);
1189                 return eth_err(port_id, diag);
1190         }
1191
1192         return 0;
1193 }
1194
1195 void
1196 _rte_eth_dev_reset(struct rte_eth_dev *dev)
1197 {
1198         if (dev->data->dev_started) {
1199                 RTE_PMD_DEBUG_TRACE(
1200                         "port %d must be stopped to allow reset\n",
1201                         dev->data->port_id);
1202                 return;
1203         }
1204
1205         rte_eth_dev_rx_queue_config(dev, 0);
1206         rte_eth_dev_tx_queue_config(dev, 0);
1207
1208         memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
1209 }
1210
1211 static void
1212 rte_eth_dev_config_restore(uint16_t port_id)
1213 {
1214         struct rte_eth_dev *dev;
1215         struct rte_eth_dev_info dev_info;
1216         struct ether_addr *addr;
1217         uint16_t i;
1218         uint32_t pool = 0;
1219         uint64_t pool_mask;
1220
1221         dev = &rte_eth_devices[port_id];
1222
1223         rte_eth_dev_info_get(port_id, &dev_info);
1224
1225         /* replay MAC address configuration including default MAC */
1226         addr = &dev->data->mac_addrs[0];
1227         if (*dev->dev_ops->mac_addr_set != NULL)
1228                 (*dev->dev_ops->mac_addr_set)(dev, addr);
1229         else if (*dev->dev_ops->mac_addr_add != NULL)
1230                 (*dev->dev_ops->mac_addr_add)(dev, addr, 0, pool);
1231
1232         if (*dev->dev_ops->mac_addr_add != NULL) {
1233                 for (i = 1; i < dev_info.max_mac_addrs; i++) {
1234                         addr = &dev->data->mac_addrs[i];
1235
1236                         /* skip zero address */
1237                         if (is_zero_ether_addr(addr))
1238                                 continue;
1239
1240                         pool = 0;
1241                         pool_mask = dev->data->mac_pool_sel[i];
1242
1243                         do {
1244                                 if (pool_mask & 1ULL)
1245                                         (*dev->dev_ops->mac_addr_add)(dev,
1246                                                 addr, i, pool);
1247                                 pool_mask >>= 1;
1248                                 pool++;
1249                         } while (pool_mask);
1250                 }
1251         }
1252
1253         /* replay promiscuous configuration */
1254         if (rte_eth_promiscuous_get(port_id) == 1)
1255                 rte_eth_promiscuous_enable(port_id);
1256         else if (rte_eth_promiscuous_get(port_id) == 0)
1257                 rte_eth_promiscuous_disable(port_id);
1258
1259         /* replay all multicast configuration */
1260         if (rte_eth_allmulticast_get(port_id) == 1)
1261                 rte_eth_allmulticast_enable(port_id);
1262         else if (rte_eth_allmulticast_get(port_id) == 0)
1263                 rte_eth_allmulticast_disable(port_id);
1264 }
1265
1266 int
1267 rte_eth_dev_start(uint16_t port_id)
1268 {
1269         struct rte_eth_dev *dev;
1270         int diag;
1271
1272         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1273
1274         dev = &rte_eth_devices[port_id];
1275
1276         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1277
1278         if (dev->data->dev_started != 0) {
1279                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1280                         " already started\n",
1281                         port_id);
1282                 return 0;
1283         }
1284
1285         diag = (*dev->dev_ops->dev_start)(dev);
1286         if (diag == 0)
1287                 dev->data->dev_started = 1;
1288         else
1289                 return eth_err(port_id, diag);
1290
1291         rte_eth_dev_config_restore(port_id);
1292
1293         if (dev->data->dev_conf.intr_conf.lsc == 0) {
1294                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
1295                 (*dev->dev_ops->link_update)(dev, 0);
1296         }
1297         return 0;
1298 }
1299
1300 void
1301 rte_eth_dev_stop(uint16_t port_id)
1302 {
1303         struct rte_eth_dev *dev;
1304
1305         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1306         dev = &rte_eth_devices[port_id];
1307
1308         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1309
1310         if (dev->data->dev_started == 0) {
1311                 RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
1312                         " already stopped\n",
1313                         port_id);
1314                 return;
1315         }
1316
1317         dev->data->dev_started = 0;
1318         (*dev->dev_ops->dev_stop)(dev);
1319 }
1320
1321 int
1322 rte_eth_dev_set_link_up(uint16_t port_id)
1323 {
1324         struct rte_eth_dev *dev;
1325
1326         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1327
1328         dev = &rte_eth_devices[port_id];
1329
1330         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
1331         return eth_err(port_id, (*dev->dev_ops->dev_set_link_up)(dev));
1332 }
1333
1334 int
1335 rte_eth_dev_set_link_down(uint16_t port_id)
1336 {
1337         struct rte_eth_dev *dev;
1338
1339         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1340
1341         dev = &rte_eth_devices[port_id];
1342
1343         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
1344         return eth_err(port_id, (*dev->dev_ops->dev_set_link_down)(dev));
1345 }
1346
1347 void
1348 rte_eth_dev_close(uint16_t port_id)
1349 {
1350         struct rte_eth_dev *dev;
1351
1352         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1353         dev = &rte_eth_devices[port_id];
1354
1355         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
1356         dev->data->dev_started = 0;
1357         (*dev->dev_ops->dev_close)(dev);
1358
1359         dev->data->nb_rx_queues = 0;
1360         rte_free(dev->data->rx_queues);
1361         dev->data->rx_queues = NULL;
1362         dev->data->nb_tx_queues = 0;
1363         rte_free(dev->data->tx_queues);
1364         dev->data->tx_queues = NULL;
1365 }
1366
1367 int
1368 rte_eth_dev_reset(uint16_t port_id)
1369 {
1370         struct rte_eth_dev *dev;
1371         int ret;
1372
1373         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1374         dev = &rte_eth_devices[port_id];
1375
1376         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
1377
1378         rte_eth_dev_stop(port_id);
1379         ret = dev->dev_ops->dev_reset(dev);
1380
1381         return eth_err(port_id, ret);
1382 }
1383
1384 int __rte_experimental
1385 rte_eth_dev_is_removed(uint16_t port_id)
1386 {
1387         struct rte_eth_dev *dev;
1388         int ret;
1389
1390         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
1391
1392         dev = &rte_eth_devices[port_id];
1393
1394         if (dev->state == RTE_ETH_DEV_REMOVED)
1395                 return 1;
1396
1397         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->is_removed, 0);
1398
1399         ret = dev->dev_ops->is_removed(dev);
1400         if (ret != 0)
1401                 /* Device is physically removed. */
1402                 dev->state = RTE_ETH_DEV_REMOVED;
1403
1404         return ret;
1405 }
1406
1407 int
1408 rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
1409                        uint16_t nb_rx_desc, unsigned int socket_id,
1410                        const struct rte_eth_rxconf *rx_conf,
1411                        struct rte_mempool *mp)
1412 {
1413         int ret;
1414         uint32_t mbp_buf_size;
1415         struct rte_eth_dev *dev;
1416         struct rte_eth_dev_info dev_info;
1417         struct rte_eth_rxconf local_conf;
1418         void **rxq;
1419
1420         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1421
1422         dev = &rte_eth_devices[port_id];
1423         if (rx_queue_id >= dev->data->nb_rx_queues) {
1424                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1425                 return -EINVAL;
1426         }
1427
1428         if (dev->data->dev_started) {
1429                 RTE_PMD_DEBUG_TRACE(
1430                     "port %d must be stopped to allow configuration\n", port_id);
1431                 return -EBUSY;
1432         }
1433
1434         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1435         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1436
1437         /*
1438          * Check the size of the mbuf data buffer.
1439          * This value must be provided in the private data of the memory pool.
1440          * First check that the memory pool has a valid private data.
1441          */
1442         rte_eth_dev_info_get(port_id, &dev_info);
1443         if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1444                 RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1445                                 mp->name, (int) mp->private_data_size,
1446                                 (int) sizeof(struct rte_pktmbuf_pool_private));
1447                 return -ENOSPC;
1448         }
1449         mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1450
1451         if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1452                 RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1453                                 "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1454                                 "=%d)\n",
1455                                 mp->name,
1456                                 (int)mbp_buf_size,
1457                                 (int)(RTE_PKTMBUF_HEADROOM +
1458                                       dev_info.min_rx_bufsize),
1459                                 (int)RTE_PKTMBUF_HEADROOM,
1460                                 (int)dev_info.min_rx_bufsize);
1461                 return -EINVAL;
1462         }
1463
1464         if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1465                         nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1466                         nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1467
1468                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1469                         "should be: <= %hu, = %hu, and a product of %hu\n",
1470                         nb_rx_desc,
1471                         dev_info.rx_desc_lim.nb_max,
1472                         dev_info.rx_desc_lim.nb_min,
1473                         dev_info.rx_desc_lim.nb_align);
1474                 return -EINVAL;
1475         }
1476
1477         rxq = dev->data->rx_queues;
1478         if (rxq[rx_queue_id]) {
1479                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
1480                                         -ENOTSUP);
1481                 (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
1482                 rxq[rx_queue_id] = NULL;
1483         }
1484
1485         if (rx_conf == NULL)
1486                 rx_conf = &dev_info.default_rxconf;
1487
1488         local_conf = *rx_conf;
1489         if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
1490                 /**
1491                  * Reflect port offloads to queue offloads in order for
1492                  * offloads to not be discarded.
1493                  */
1494                 rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
1495                                                     &local_conf.offloads);
1496         }
1497
1498         ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1499                                               socket_id, &local_conf, mp);
1500         if (!ret) {
1501                 if (!dev->data->min_rx_buf_size ||
1502                     dev->data->min_rx_buf_size > mbp_buf_size)
1503                         dev->data->min_rx_buf_size = mbp_buf_size;
1504         }
1505
1506         return eth_err(port_id, ret);
1507 }
1508
1509 /**
1510  * A conversion function from txq_flags API.
1511  */
1512 static void
1513 rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
1514 {
1515         uint64_t offloads = 0;
1516
1517         if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
1518                 offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
1519         if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
1520                 offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
1521         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
1522                 offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
1523         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
1524                 offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
1525         if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
1526                 offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
1527         if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
1528             (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
1529                 offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
1530
1531         *tx_offloads = offloads;
1532 }
1533
1534 /**
1535  * A conversion function from offloads API.
1536  */
1537 static void
1538 rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
1539 {
1540         uint32_t flags = 0;
1541
1542         if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
1543                 flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
1544         if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
1545                 flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
1546         if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
1547                 flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
1548         if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
1549                 flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
1550         if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
1551                 flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
1552         if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
1553                 flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
1554
1555         *txq_flags = flags;
1556 }
1557
1558 int
1559 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
1560                        uint16_t nb_tx_desc, unsigned int socket_id,
1561                        const struct rte_eth_txconf *tx_conf)
1562 {
1563         struct rte_eth_dev *dev;
1564         struct rte_eth_dev_info dev_info;
1565         struct rte_eth_txconf local_conf;
1566         void **txq;
1567
1568         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1569
1570         dev = &rte_eth_devices[port_id];
1571         if (tx_queue_id >= dev->data->nb_tx_queues) {
1572                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1573                 return -EINVAL;
1574         }
1575
1576         if (dev->data->dev_started) {
1577                 RTE_PMD_DEBUG_TRACE(
1578                     "port %d must be stopped to allow configuration\n", port_id);
1579                 return -EBUSY;
1580         }
1581
1582         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1583         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1584
1585         rte_eth_dev_info_get(port_id, &dev_info);
1586
1587         if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1588             nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1589             nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1590                 RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1591                                 "should be: <= %hu, = %hu, and a product of %hu\n",
1592                                 nb_tx_desc,
1593                                 dev_info.tx_desc_lim.nb_max,
1594                                 dev_info.tx_desc_lim.nb_min,
1595                                 dev_info.tx_desc_lim.nb_align);
1596                 return -EINVAL;
1597         }
1598
1599         txq = dev->data->tx_queues;
1600         if (txq[tx_queue_id]) {
1601                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
1602                                         -ENOTSUP);
1603                 (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
1604                 txq[tx_queue_id] = NULL;
1605         }
1606
1607         if (tx_conf == NULL)
1608                 tx_conf = &dev_info.default_txconf;
1609
1610         /*
1611          * Convert between the offloads API to enable PMDs to support
1612          * only one of them.
1613          */
1614         local_conf = *tx_conf;
1615         if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
1616                 rte_eth_convert_txq_offloads(tx_conf->offloads,
1617                                              &local_conf.txq_flags);
1618                 /* Keep the ignore flag. */
1619                 local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
1620         } else {
1621                 rte_eth_convert_txq_flags(tx_conf->txq_flags,
1622                                           &local_conf.offloads);
1623         }
1624
1625         return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
1626                        tx_queue_id, nb_tx_desc, socket_id, &local_conf));
1627 }
1628
1629 void
1630 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1631                 void *userdata __rte_unused)
1632 {
1633         unsigned i;
1634
1635         for (i = 0; i < unsent; i++)
1636                 rte_pktmbuf_free(pkts[i]);
1637 }
1638
1639 void
1640 rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1641                 void *userdata)
1642 {
1643         uint64_t *count = userdata;
1644         unsigned i;
1645
1646         for (i = 0; i < unsent; i++)
1647                 rte_pktmbuf_free(pkts[i]);
1648
1649         *count += unsent;
1650 }
1651
1652 int
1653 rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1654                 buffer_tx_error_fn cbfn, void *userdata)
1655 {
1656         buffer->error_callback = cbfn;
1657         buffer->error_userdata = userdata;
1658         return 0;
1659 }
1660
1661 int
1662 rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1663 {
1664         int ret = 0;
1665
1666         if (buffer == NULL)
1667                 return -EINVAL;
1668
1669         buffer->size = size;
1670         if (buffer->error_callback == NULL) {
1671                 ret = rte_eth_tx_buffer_set_err_callback(
1672                         buffer, rte_eth_tx_buffer_drop_callback, NULL);
1673         }
1674
1675         return ret;
1676 }
1677
1678 int
1679 rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
1680 {
1681         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1682         int ret;
1683
1684         /* Validate Input Data. Bail if not valid or not supported. */
1685         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1686         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_done_cleanup, -ENOTSUP);
1687
1688         /* Call driver to free pending mbufs. */
1689         ret = (*dev->dev_ops->tx_done_cleanup)(dev->data->tx_queues[queue_id],
1690                                                free_cnt);
1691         return eth_err(port_id, ret);
1692 }
1693
1694 void
1695 rte_eth_promiscuous_enable(uint16_t port_id)
1696 {
1697         struct rte_eth_dev *dev;
1698
1699         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1700         dev = &rte_eth_devices[port_id];
1701
1702         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1703         (*dev->dev_ops->promiscuous_enable)(dev);
1704         dev->data->promiscuous = 1;
1705 }
1706
1707 void
1708 rte_eth_promiscuous_disable(uint16_t port_id)
1709 {
1710         struct rte_eth_dev *dev;
1711
1712         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1713         dev = &rte_eth_devices[port_id];
1714
1715         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1716         dev->data->promiscuous = 0;
1717         (*dev->dev_ops->promiscuous_disable)(dev);
1718 }
1719
1720 int
1721 rte_eth_promiscuous_get(uint16_t port_id)
1722 {
1723         struct rte_eth_dev *dev;
1724
1725         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1726
1727         dev = &rte_eth_devices[port_id];
1728         return dev->data->promiscuous;
1729 }
1730
1731 void
1732 rte_eth_allmulticast_enable(uint16_t port_id)
1733 {
1734         struct rte_eth_dev *dev;
1735
1736         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1737         dev = &rte_eth_devices[port_id];
1738
1739         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1740         (*dev->dev_ops->allmulticast_enable)(dev);
1741         dev->data->all_multicast = 1;
1742 }
1743
1744 void
1745 rte_eth_allmulticast_disable(uint16_t port_id)
1746 {
1747         struct rte_eth_dev *dev;
1748
1749         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1750         dev = &rte_eth_devices[port_id];
1751
1752         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1753         dev->data->all_multicast = 0;
1754         (*dev->dev_ops->allmulticast_disable)(dev);
1755 }
1756
1757 int
1758 rte_eth_allmulticast_get(uint16_t port_id)
1759 {
1760         struct rte_eth_dev *dev;
1761
1762         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1763
1764         dev = &rte_eth_devices[port_id];
1765         return dev->data->all_multicast;
1766 }
1767
1768 static inline int
1769 rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1770                                 struct rte_eth_link *link)
1771 {
1772         struct rte_eth_link *dst = link;
1773         struct rte_eth_link *src = &(dev->data->dev_link);
1774
1775         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1776                                         *(uint64_t *)src) == 0)
1777                 return -1;
1778
1779         return 0;
1780 }
1781
1782 void
1783 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
1784 {
1785         struct rte_eth_dev *dev;
1786
1787         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1788         dev = &rte_eth_devices[port_id];
1789
1790         if (dev->data->dev_conf.intr_conf.lsc != 0)
1791                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1792         else {
1793                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1794                 (*dev->dev_ops->link_update)(dev, 1);
1795                 *eth_link = dev->data->dev_link;
1796         }
1797 }
1798
1799 void
1800 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
1801 {
1802         struct rte_eth_dev *dev;
1803
1804         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1805         dev = &rte_eth_devices[port_id];
1806
1807         if (dev->data->dev_conf.intr_conf.lsc != 0)
1808                 rte_eth_dev_atomic_read_link_status(dev, eth_link);
1809         else {
1810                 RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1811                 (*dev->dev_ops->link_update)(dev, 0);
1812                 *eth_link = dev->data->dev_link;
1813         }
1814 }
1815
1816 int
1817 rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
1818 {
1819         struct rte_eth_dev *dev;
1820
1821         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1822
1823         dev = &rte_eth_devices[port_id];
1824         memset(stats, 0, sizeof(*stats));
1825
1826         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1827         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1828         return eth_err(port_id, (*dev->dev_ops->stats_get)(dev, stats));
1829 }
1830
1831 int
1832 rte_eth_stats_reset(uint16_t port_id)
1833 {
1834         struct rte_eth_dev *dev;
1835
1836         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1837         dev = &rte_eth_devices[port_id];
1838
1839         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
1840         (*dev->dev_ops->stats_reset)(dev);
1841         dev->data->rx_mbuf_alloc_failed = 0;
1842
1843         return 0;
1844 }
1845
1846 static inline int
1847 get_xstats_basic_count(struct rte_eth_dev *dev)
1848 {
1849         uint16_t nb_rxqs, nb_txqs;
1850         int count;
1851
1852         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1853         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1854
1855         count = RTE_NB_STATS;
1856         count += nb_rxqs * RTE_NB_RXQ_STATS;
1857         count += nb_txqs * RTE_NB_TXQ_STATS;
1858
1859         return count;
1860 }
1861
1862 static int
1863 get_xstats_count(uint16_t port_id)
1864 {
1865         struct rte_eth_dev *dev;
1866         int count;
1867
1868         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1869         dev = &rte_eth_devices[port_id];
1870         if (dev->dev_ops->xstats_get_names_by_id != NULL) {
1871                 count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
1872                                 NULL, 0);
1873                 if (count < 0)
1874                         return eth_err(port_id, count);
1875         }
1876         if (dev->dev_ops->xstats_get_names != NULL) {
1877                 count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1878                 if (count < 0)
1879                         return eth_err(port_id, count);
1880         } else
1881                 count = 0;
1882
1883
1884         count += get_xstats_basic_count(dev);
1885
1886         return count;
1887 }
1888
1889 int
1890 rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
1891                 uint64_t *id)
1892 {
1893         int cnt_xstats, idx_xstat;
1894
1895         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1896
1897         if (!id) {
1898                 RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
1899                 return -ENOMEM;
1900         }
1901
1902         if (!xstat_name) {
1903                 RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
1904                 return -ENOMEM;
1905         }
1906
1907         /* Get count */
1908         cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
1909         if (cnt_xstats  < 0) {
1910                 RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
1911                 return -ENODEV;
1912         }
1913
1914         /* Get id-name lookup table */
1915         struct rte_eth_xstat_name xstats_names[cnt_xstats];
1916
1917         if (cnt_xstats != rte_eth_xstats_get_names_by_id(
1918                         port_id, xstats_names, cnt_xstats, NULL)) {
1919                 RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
1920                 return -1;
1921         }
1922
1923         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
1924                 if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) {
1925                         *id = idx_xstat;
1926                         return 0;
1927                 };
1928         }
1929
1930         return -EINVAL;
1931 }
1932
1933 /* retrieve basic stats names */
1934 static int
1935 rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
1936         struct rte_eth_xstat_name *xstats_names)
1937 {
1938         int cnt_used_entries = 0;
1939         uint32_t idx, id_queue;
1940         uint16_t num_q;
1941
1942         for (idx = 0; idx < RTE_NB_STATS; idx++) {
1943                 snprintf(xstats_names[cnt_used_entries].name,
1944                         sizeof(xstats_names[0].name),
1945                         "%s", rte_stats_strings[idx].name);
1946                 cnt_used_entries++;
1947         }
1948         num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1949         for (id_queue = 0; id_queue < num_q; id_queue++) {
1950                 for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1951                         snprintf(xstats_names[cnt_used_entries].name,
1952                                 sizeof(xstats_names[0].name),
1953                                 "rx_q%u%s",
1954                                 id_queue, rte_rxq_stats_strings[idx].name);
1955                         cnt_used_entries++;
1956                 }
1957
1958         }
1959         num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
1960         for (id_queue = 0; id_queue < num_q; id_queue++) {
1961                 for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1962                         snprintf(xstats_names[cnt_used_entries].name,
1963                                 sizeof(xstats_names[0].name),
1964                                 "tx_q%u%s",
1965                                 id_queue, rte_txq_stats_strings[idx].name);
1966                         cnt_used_entries++;
1967                 }
1968         }
1969         return cnt_used_entries;
1970 }
1971
1972 /* retrieve ethdev extended statistics names */
1973 int
1974 rte_eth_xstats_get_names_by_id(uint16_t port_id,
1975         struct rte_eth_xstat_name *xstats_names, unsigned int size,
1976         uint64_t *ids)
1977 {
1978         struct rte_eth_xstat_name *xstats_names_copy;
1979         unsigned int no_basic_stat_requested = 1;
1980         unsigned int no_ext_stat_requested = 1;
1981         unsigned int expected_entries;
1982         unsigned int basic_count;
1983         struct rte_eth_dev *dev;
1984         unsigned int i;
1985         int ret;
1986
1987         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1988         dev = &rte_eth_devices[port_id];
1989
1990         basic_count = get_xstats_basic_count(dev);
1991         ret = get_xstats_count(port_id);
1992         if (ret < 0)
1993                 return ret;
1994         expected_entries = (unsigned int)ret;
1995
1996         /* Return max number of stats if no ids given */
1997         if (!ids) {
1998                 if (!xstats_names)
1999                         return expected_entries;
2000                 else if (xstats_names && size < expected_entries)
2001                         return expected_entries;
2002         }
2003
2004         if (ids && !xstats_names)
2005                 return -EINVAL;
2006
2007         if (ids && dev->dev_ops->xstats_get_names_by_id != NULL && size > 0) {
2008                 uint64_t ids_copy[size];
2009
2010                 for (i = 0; i < size; i++) {
2011                         if (ids[i] < basic_count) {
2012                                 no_basic_stat_requested = 0;
2013                                 break;
2014                         }
2015
2016                         /*
2017                          * Convert ids to xstats ids that PMD knows.
2018                          * ids known by user are basic + extended stats.
2019                          */
2020                         ids_copy[i] = ids[i] - basic_count;
2021                 }
2022
2023                 if (no_basic_stat_requested)
2024                         return (*dev->dev_ops->xstats_get_names_by_id)(dev,
2025                                         xstats_names, ids_copy, size);
2026         }
2027
2028         /* Retrieve all stats */
2029         if (!ids) {
2030                 int num_stats = rte_eth_xstats_get_names(port_id, xstats_names,
2031                                 expected_entries);
2032                 if (num_stats < 0 || num_stats > (int)expected_entries)
2033                         return num_stats;
2034                 else
2035                         return expected_entries;
2036         }
2037
2038         xstats_names_copy = calloc(expected_entries,
2039                 sizeof(struct rte_eth_xstat_name));
2040
2041         if (!xstats_names_copy) {
2042                 RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
2043                 return -ENOMEM;
2044         }
2045
2046         if (ids) {
2047                 for (i = 0; i < size; i++) {
2048                         if (ids[i] >= basic_count) {
2049                                 no_ext_stat_requested = 0;
2050                                 break;
2051                         }
2052                 }
2053         }
2054
2055         /* Fill xstats_names_copy structure */
2056         if (ids && no_ext_stat_requested) {
2057                 rte_eth_basic_stats_get_names(dev, xstats_names_copy);
2058         } else {
2059                 ret = rte_eth_xstats_get_names(port_id, xstats_names_copy,
2060                         expected_entries);
2061                 if (ret < 0) {
2062                         free(xstats_names_copy);
2063                         return ret;
2064                 }
2065         }
2066
2067         /* Filter stats */
2068         for (i = 0; i < size; i++) {
2069                 if (ids[i] >= expected_entries) {
2070                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2071                         free(xstats_names_copy);
2072                         return -1;
2073                 }
2074                 xstats_names[i] = xstats_names_copy[ids[i]];
2075         }
2076
2077         free(xstats_names_copy);
2078         return size;
2079 }
2080
2081 int
2082 rte_eth_xstats_get_names(uint16_t port_id,
2083         struct rte_eth_xstat_name *xstats_names,
2084         unsigned int size)
2085 {
2086         struct rte_eth_dev *dev;
2087         int cnt_used_entries;
2088         int cnt_expected_entries;
2089         int cnt_driver_entries;
2090
2091         cnt_expected_entries = get_xstats_count(port_id);
2092         if (xstats_names == NULL || cnt_expected_entries < 0 ||
2093                         (int)size < cnt_expected_entries)
2094                 return cnt_expected_entries;
2095
2096         /* port_id checked in get_xstats_count() */
2097         dev = &rte_eth_devices[port_id];
2098
2099         cnt_used_entries = rte_eth_basic_stats_get_names(
2100                 dev, xstats_names);
2101
2102         if (dev->dev_ops->xstats_get_names != NULL) {
2103                 /* If there are any driver-specific xstats, append them
2104                  * to end of list.
2105                  */
2106                 cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
2107                         dev,
2108                         xstats_names + cnt_used_entries,
2109                         size - cnt_used_entries);
2110                 if (cnt_driver_entries < 0)
2111                         return eth_err(port_id, cnt_driver_entries);
2112                 cnt_used_entries += cnt_driver_entries;
2113         }
2114
2115         return cnt_used_entries;
2116 }
2117
2118
2119 static int
2120 rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
2121 {
2122         struct rte_eth_dev *dev;
2123         struct rte_eth_stats eth_stats;
2124         unsigned int count = 0, i, q;
2125         uint64_t val, *stats_ptr;
2126         uint16_t nb_rxqs, nb_txqs;
2127         int ret;
2128
2129         ret = rte_eth_stats_get(port_id, &eth_stats);
2130         if (ret < 0)
2131                 return ret;
2132
2133         dev = &rte_eth_devices[port_id];
2134
2135         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2136         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2137
2138         /* global stats */
2139         for (i = 0; i < RTE_NB_STATS; i++) {
2140                 stats_ptr = RTE_PTR_ADD(&eth_stats,
2141                                         rte_stats_strings[i].offset);
2142                 val = *stats_ptr;
2143                 xstats[count++].value = val;
2144         }
2145
2146         /* per-rxq stats */
2147         for (q = 0; q < nb_rxqs; q++) {
2148                 for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
2149                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2150                                         rte_rxq_stats_strings[i].offset +
2151                                         q * sizeof(uint64_t));
2152                         val = *stats_ptr;
2153                         xstats[count++].value = val;
2154                 }
2155         }
2156
2157         /* per-txq stats */
2158         for (q = 0; q < nb_txqs; q++) {
2159                 for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
2160                         stats_ptr = RTE_PTR_ADD(&eth_stats,
2161                                         rte_txq_stats_strings[i].offset +
2162                                         q * sizeof(uint64_t));
2163                         val = *stats_ptr;
2164                         xstats[count++].value = val;
2165                 }
2166         }
2167         return count;
2168 }
2169
2170 /* retrieve ethdev extended statistics */
2171 int
2172 rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
2173                          uint64_t *values, unsigned int size)
2174 {
2175         unsigned int no_basic_stat_requested = 1;
2176         unsigned int no_ext_stat_requested = 1;
2177         unsigned int num_xstats_filled;
2178         unsigned int basic_count;
2179         uint16_t expected_entries;
2180         struct rte_eth_dev *dev;
2181         unsigned int i;
2182         int ret;
2183
2184         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2185         ret = get_xstats_count(port_id);
2186         if (ret < 0)
2187                 return ret;
2188         expected_entries = (uint16_t)ret;
2189         struct rte_eth_xstat xstats[expected_entries];
2190         dev = &rte_eth_devices[port_id];
2191         basic_count = get_xstats_basic_count(dev);
2192
2193         /* Return max number of stats if no ids given */
2194         if (!ids) {
2195                 if (!values)
2196                         return expected_entries;
2197                 else if (values && size < expected_entries)
2198                         return expected_entries;
2199         }
2200
2201         if (ids && !values)
2202                 return -EINVAL;
2203
2204         if (ids && dev->dev_ops->xstats_get_by_id != NULL && size) {
2205                 unsigned int basic_count = get_xstats_basic_count(dev);
2206                 uint64_t ids_copy[size];
2207
2208                 for (i = 0; i < size; i++) {
2209                         if (ids[i] < basic_count) {
2210                                 no_basic_stat_requested = 0;
2211                                 break;
2212                         }
2213
2214                         /*
2215                          * Convert ids to xstats ids that PMD knows.
2216                          * ids known by user are basic + extended stats.
2217                          */
2218                         ids_copy[i] = ids[i] - basic_count;
2219                 }
2220
2221                 if (no_basic_stat_requested)
2222                         return (*dev->dev_ops->xstats_get_by_id)(dev, ids_copy,
2223                                         values, size);
2224         }
2225
2226         if (ids) {
2227                 for (i = 0; i < size; i++) {
2228                         if (ids[i] >= basic_count) {
2229                                 no_ext_stat_requested = 0;
2230                                 break;
2231                         }
2232                 }
2233         }
2234
2235         /* Fill the xstats structure */
2236         if (ids && no_ext_stat_requested)
2237                 ret = rte_eth_basic_stats_get(port_id, xstats);
2238         else
2239                 ret = rte_eth_xstats_get(port_id, xstats, expected_entries);
2240
2241         if (ret < 0)
2242                 return ret;
2243         num_xstats_filled = (unsigned int)ret;
2244
2245         /* Return all stats */
2246         if (!ids) {
2247                 for (i = 0; i < num_xstats_filled; i++)
2248                         values[i] = xstats[i].value;
2249                 return expected_entries;
2250         }
2251
2252         /* Filter stats */
2253         for (i = 0; i < size; i++) {
2254                 if (ids[i] >= expected_entries) {
2255                         RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
2256                         return -1;
2257                 }
2258                 values[i] = xstats[ids[i]].value;
2259         }
2260         return size;
2261 }
2262
2263 int
2264 rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
2265         unsigned int n)
2266 {
2267         struct rte_eth_dev *dev;
2268         unsigned int count = 0, i;
2269         signed int xcount = 0;
2270         uint16_t nb_rxqs, nb_txqs;
2271         int ret;
2272
2273         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2274
2275         dev = &rte_eth_devices[port_id];
2276
2277         nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2278         nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
2279
2280         /* Return generic statistics */
2281         count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
2282                 (nb_txqs * RTE_NB_TXQ_STATS);
2283
2284         /* implemented by the driver */
2285         if (dev->dev_ops->xstats_get != NULL) {
2286                 /* Retrieve the xstats from the driver at the end of the
2287                  * xstats struct.
2288                  */
2289                 xcount = (*dev->dev_ops->xstats_get)(dev,
2290                                      xstats ? xstats + count : NULL,
2291                                      (n > count) ? n - count : 0);
2292
2293                 if (xcount < 0)
2294                         return eth_err(port_id, xcount);
2295         }
2296
2297         if (n < count + xcount || xstats == NULL)
2298                 return count + xcount;
2299
2300         /* now fill the xstats structure */
2301         ret = rte_eth_basic_stats_get(port_id, xstats);
2302         if (ret < 0)
2303                 return ret;
2304         count = ret;
2305
2306         for (i = 0; i < count; i++)
2307                 xstats[i].id = i;
2308         /* add an offset to driver-specific stats */
2309         for ( ; i < count + xcount; i++)
2310                 xstats[i].id += count;
2311
2312         return count + xcount;
2313 }
2314
2315 /* reset ethdev extended statistics */
2316 void
2317 rte_eth_xstats_reset(uint16_t port_id)
2318 {
2319         struct rte_eth_dev *dev;
2320
2321         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2322         dev = &rte_eth_devices[port_id];
2323
2324         /* implemented by the driver */
2325         if (dev->dev_ops->xstats_reset != NULL) {
2326                 (*dev->dev_ops->xstats_reset)(dev);
2327                 return;
2328         }
2329
2330         /* fallback to default */
2331         rte_eth_stats_reset(port_id);
2332 }
2333
2334 static int
2335 set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
2336                 uint8_t is_rx)
2337 {
2338         struct rte_eth_dev *dev;
2339
2340         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2341
2342         dev = &rte_eth_devices[port_id];
2343
2344         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
2345         return (*dev->dev_ops->queue_stats_mapping_set)
2346                         (dev, queue_id, stat_idx, is_rx);
2347 }
2348
2349
2350 int
2351 rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id,
2352                 uint8_t stat_idx)
2353 {
2354         return eth_err(port_id, set_queue_stats_mapping(port_id, tx_queue_id,
2355                                                 stat_idx, STAT_QMAP_TX));
2356 }
2357
2358
2359 int
2360 rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id,
2361                 uint8_t stat_idx)
2362 {
2363         return eth_err(port_id, set_queue_stats_mapping(port_id, rx_queue_id,
2364                                                 stat_idx, STAT_QMAP_RX));
2365 }
2366
2367 int
2368 rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
2369 {
2370         struct rte_eth_dev *dev;
2371
2372         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2373         dev = &rte_eth_devices[port_id];
2374
2375         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fw_version_get, -ENOTSUP);
2376         return eth_err(port_id, (*dev->dev_ops->fw_version_get)(dev,
2377                                                         fw_version, fw_size));
2378 }
2379
2380 void
2381 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
2382 {
2383         struct rte_eth_dev *dev;
2384         const struct rte_eth_desc_lim lim = {
2385                 .nb_max = UINT16_MAX,
2386                 .nb_min = 0,
2387                 .nb_align = 1,
2388         };
2389
2390         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2391         dev = &rte_eth_devices[port_id];
2392
2393         memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
2394         dev_info->rx_desc_lim = lim;
2395         dev_info->tx_desc_lim = lim;
2396
2397         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
2398         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
2399         dev_info->driver_name = dev->device->driver->name;
2400         dev_info->nb_rx_queues = dev->data->nb_rx_queues;
2401         dev_info->nb_tx_queues = dev->data->nb_tx_queues;
2402 }
2403
2404 int
2405 rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
2406                                  uint32_t *ptypes, int num)
2407 {
2408         int i, j;
2409         struct rte_eth_dev *dev;
2410         const uint32_t *all_ptypes;
2411
2412         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2413         dev = &rte_eth_devices[port_id];
2414         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
2415         all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
2416
2417         if (!all_ptypes)
2418                 return 0;
2419
2420         for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
2421                 if (all_ptypes[i] & ptype_mask) {
2422                         if (j < num)
2423                                 ptypes[j] = all_ptypes[i];
2424                         j++;
2425                 }
2426
2427         return j;
2428 }
2429
2430 void
2431 rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
2432 {
2433         struct rte_eth_dev *dev;
2434
2435         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2436         dev = &rte_eth_devices[port_id];
2437         ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
2438 }
2439
2440
2441 int
2442 rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
2443 {
2444         struct rte_eth_dev *dev;
2445
2446         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2447
2448         dev = &rte_eth_devices[port_id];
2449         *mtu = dev->data->mtu;
2450         return 0;
2451 }
2452
2453 int
2454 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
2455 {
2456         int ret;
2457         struct rte_eth_dev *dev;
2458
2459         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2460         dev = &rte_eth_devices[port_id];
2461         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
2462
2463         ret = (*dev->dev_ops->mtu_set)(dev, mtu);
2464         if (!ret)
2465                 dev->data->mtu = mtu;
2466
2467         return eth_err(port_id, ret);
2468 }
2469
2470 int
2471 rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
2472 {
2473         struct rte_eth_dev *dev;
2474         int ret;
2475
2476         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2477         dev = &rte_eth_devices[port_id];
2478         if (!(dev->data->dev_conf.rxmode.offloads &
2479               DEV_RX_OFFLOAD_VLAN_FILTER)) {
2480                 RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
2481                 return -ENOSYS;
2482         }
2483
2484         if (vlan_id > 4095) {
2485                 RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
2486                                 port_id, (unsigned) vlan_id);
2487                 return -EINVAL;
2488         }
2489         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
2490
2491         ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
2492         if (ret == 0) {
2493                 struct rte_vlan_filter_conf *vfc;
2494                 int vidx;
2495                 int vbit;
2496
2497                 vfc = &dev->data->vlan_filter_conf;
2498                 vidx = vlan_id / 64;
2499                 vbit = vlan_id % 64;
2500
2501                 if (on)
2502                         vfc->ids[vidx] |= UINT64_C(1) << vbit;
2503                 else
2504                         vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
2505         }
2506
2507         return eth_err(port_id, ret);
2508 }
2509
2510 int
2511 rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
2512                                     int on)
2513 {
2514         struct rte_eth_dev *dev;
2515
2516         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2517         dev = &rte_eth_devices[port_id];
2518         if (rx_queue_id >= dev->data->nb_rx_queues) {
2519                 RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
2520                 return -EINVAL;
2521         }
2522
2523         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
2524         (*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
2525
2526         return 0;
2527 }
2528
2529 int
2530 rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
2531                                 enum rte_vlan_type vlan_type,
2532                                 uint16_t tpid)
2533 {
2534         struct rte_eth_dev *dev;
2535
2536         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2537         dev = &rte_eth_devices[port_id];
2538         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
2539
2540         return eth_err(port_id, (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type,
2541                                                                tpid));
2542 }
2543
2544 int
2545 rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
2546 {
2547         struct rte_eth_dev *dev;
2548         int ret = 0;
2549         int mask = 0;
2550         int cur, org = 0;
2551         uint64_t orig_offloads;
2552
2553         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2554         dev = &rte_eth_devices[port_id];
2555
2556         /* save original values in case of failure */
2557         orig_offloads = dev->data->dev_conf.rxmode.offloads;
2558
2559         /*check which option changed by application*/
2560         cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
2561         org = !!(dev->data->dev_conf.rxmode.offloads &
2562                  DEV_RX_OFFLOAD_VLAN_STRIP);
2563         if (cur != org) {
2564                 if (cur)
2565                         dev->data->dev_conf.rxmode.offloads |=
2566                                 DEV_RX_OFFLOAD_VLAN_STRIP;
2567                 else
2568                         dev->data->dev_conf.rxmode.offloads &=
2569                                 ~DEV_RX_OFFLOAD_VLAN_STRIP;
2570                 mask |= ETH_VLAN_STRIP_MASK;
2571         }
2572
2573         cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
2574         org = !!(dev->data->dev_conf.rxmode.offloads &
2575                  DEV_RX_OFFLOAD_VLAN_FILTER);
2576         if (cur != org) {
2577                 if (cur)
2578                         dev->data->dev_conf.rxmode.offloads |=
2579                                 DEV_RX_OFFLOAD_VLAN_FILTER;
2580                 else
2581                         dev->data->dev_conf.rxmode.offloads &=
2582                                 ~DEV_RX_OFFLOAD_VLAN_FILTER;
2583                 mask |= ETH_VLAN_FILTER_MASK;
2584         }
2585
2586         cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
2587         org = !!(dev->data->dev_conf.rxmode.offloads &
2588                  DEV_RX_OFFLOAD_VLAN_EXTEND);
2589         if (cur != org) {
2590                 if (cur)
2591                         dev->data->dev_conf.rxmode.offloads |=
2592                                 DEV_RX_OFFLOAD_VLAN_EXTEND;
2593                 else
2594                         dev->data->dev_conf.rxmode.offloads &=
2595                                 ~DEV_RX_OFFLOAD_VLAN_EXTEND;
2596                 mask |= ETH_VLAN_EXTEND_MASK;
2597         }
2598
2599         /*no change*/
2600         if (mask == 0)
2601                 return ret;
2602
2603         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
2604
2605         /*
2606          * Convert to the offload bitfield API just in case the underlying PMD
2607          * still supporting it.
2608          */
2609         rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2610                                     &dev->data->dev_conf.rxmode);
2611         ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
2612         if (ret) {
2613                 /* hit an error restore  original values */
2614                 dev->data->dev_conf.rxmode.offloads = orig_offloads;
2615                 rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
2616                                             &dev->data->dev_conf.rxmode);
2617         }
2618
2619         return eth_err(port_id, ret);
2620 }
2621
2622 int
2623 rte_eth_dev_get_vlan_offload(uint16_t port_id)
2624 {
2625         struct rte_eth_dev *dev;
2626         int ret = 0;
2627
2628         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2629         dev = &rte_eth_devices[port_id];
2630
2631         if (dev->data->dev_conf.rxmode.offloads &
2632             DEV_RX_OFFLOAD_VLAN_STRIP)
2633                 ret |= ETH_VLAN_STRIP_OFFLOAD;
2634
2635         if (dev->data->dev_conf.rxmode.offloads &
2636             DEV_RX_OFFLOAD_VLAN_FILTER)
2637                 ret |= ETH_VLAN_FILTER_OFFLOAD;
2638
2639         if (dev->data->dev_conf.rxmode.offloads &
2640             DEV_RX_OFFLOAD_VLAN_EXTEND)
2641                 ret |= ETH_VLAN_EXTEND_OFFLOAD;
2642
2643         return ret;
2644 }
2645
2646 int
2647 rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
2648 {
2649         struct rte_eth_dev *dev;
2650
2651         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2652         dev = &rte_eth_devices[port_id];
2653         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
2654
2655         return eth_err(port_id, (*dev->dev_ops->vlan_pvid_set)(dev, pvid, on));
2656 }
2657
2658 int
2659 rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2660 {
2661         struct rte_eth_dev *dev;
2662
2663         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2664         dev = &rte_eth_devices[port_id];
2665         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
2666         memset(fc_conf, 0, sizeof(*fc_conf));
2667         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf));
2668 }
2669
2670 int
2671 rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
2672 {
2673         struct rte_eth_dev *dev;
2674
2675         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2676         if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
2677                 RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
2678                 return -EINVAL;
2679         }
2680
2681         dev = &rte_eth_devices[port_id];
2682         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
2683         return eth_err(port_id, (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf));
2684 }
2685
2686 int
2687 rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
2688                                    struct rte_eth_pfc_conf *pfc_conf)
2689 {
2690         struct rte_eth_dev *dev;
2691
2692         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2693         if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
2694                 RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
2695                 return -EINVAL;
2696         }
2697
2698         dev = &rte_eth_devices[port_id];
2699         /* High water, low water validation are device specific */
2700         if  (*dev->dev_ops->priority_flow_ctrl_set)
2701                 return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_set)
2702                                         (dev, pfc_conf));
2703         return -ENOTSUP;
2704 }
2705
2706 static int
2707 rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
2708                         uint16_t reta_size)
2709 {
2710         uint16_t i, num;
2711
2712         if (!reta_conf)
2713                 return -EINVAL;
2714
2715         num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
2716         for (i = 0; i < num; i++) {
2717                 if (reta_conf[i].mask)
2718                         return 0;
2719         }
2720
2721         return -EINVAL;
2722 }
2723
2724 static int
2725 rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
2726                          uint16_t reta_size,
2727                          uint16_t max_rxq)
2728 {
2729         uint16_t i, idx, shift;
2730
2731         if (!reta_conf)
2732                 return -EINVAL;
2733
2734         if (max_rxq == 0) {
2735                 RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
2736                 return -EINVAL;
2737         }
2738
2739         for (i = 0; i < reta_size; i++) {
2740                 idx = i / RTE_RETA_GROUP_SIZE;
2741                 shift = i % RTE_RETA_GROUP_SIZE;
2742                 if ((reta_conf[idx].mask & (1ULL << shift)) &&
2743                         (reta_conf[idx].reta[shift] >= max_rxq)) {
2744                         RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
2745                                 "the maximum rxq index: %u\n", idx, shift,
2746                                 reta_conf[idx].reta[shift], max_rxq);
2747                         return -EINVAL;
2748                 }
2749         }
2750
2751         return 0;
2752 }
2753
2754 int
2755 rte_eth_dev_rss_reta_update(uint16_t port_id,
2756                             struct rte_eth_rss_reta_entry64 *reta_conf,
2757                             uint16_t reta_size)
2758 {
2759         struct rte_eth_dev *dev;
2760         int ret;
2761
2762         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2763         /* Check mask bits */
2764         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2765         if (ret < 0)
2766                 return ret;
2767
2768         dev = &rte_eth_devices[port_id];
2769
2770         /* Check entry value */
2771         ret = rte_eth_check_reta_entry(reta_conf, reta_size,
2772                                 dev->data->nb_rx_queues);
2773         if (ret < 0)
2774                 return ret;
2775
2776         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
2777         return eth_err(port_id, (*dev->dev_ops->reta_update)(dev, reta_conf,
2778                                                              reta_size));
2779 }
2780
2781 int
2782 rte_eth_dev_rss_reta_query(uint16_t port_id,
2783                            struct rte_eth_rss_reta_entry64 *reta_conf,
2784                            uint16_t reta_size)
2785 {
2786         struct rte_eth_dev *dev;
2787         int ret;
2788
2789         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2790
2791         /* Check mask bits */
2792         ret = rte_eth_check_reta_mask(reta_conf, reta_size);
2793         if (ret < 0)
2794                 return ret;
2795
2796         dev = &rte_eth_devices[port_id];
2797         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
2798         return eth_err(port_id, (*dev->dev_ops->reta_query)(dev, reta_conf,
2799                                                             reta_size));
2800 }
2801
2802 int
2803 rte_eth_dev_rss_hash_update(uint16_t port_id,
2804                             struct rte_eth_rss_conf *rss_conf)
2805 {
2806         struct rte_eth_dev *dev;
2807
2808         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2809         dev = &rte_eth_devices[port_id];
2810         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
2811         return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
2812                                                                  rss_conf));
2813 }
2814
2815 int
2816 rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
2817                               struct rte_eth_rss_conf *rss_conf)
2818 {
2819         struct rte_eth_dev *dev;
2820
2821         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2822         dev = &rte_eth_devices[port_id];
2823         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
2824         return eth_err(port_id, (*dev->dev_ops->rss_hash_conf_get)(dev,
2825                                                                    rss_conf));
2826 }
2827
2828 int
2829 rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
2830                                 struct rte_eth_udp_tunnel *udp_tunnel)
2831 {
2832         struct rte_eth_dev *dev;
2833
2834         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2835         if (udp_tunnel == NULL) {
2836                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2837                 return -EINVAL;
2838         }
2839
2840         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2841                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2842                 return -EINVAL;
2843         }
2844
2845         dev = &rte_eth_devices[port_id];
2846         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
2847         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_add)(dev,
2848                                                                 udp_tunnel));
2849 }
2850
2851 int
2852 rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
2853                                    struct rte_eth_udp_tunnel *udp_tunnel)
2854 {
2855         struct rte_eth_dev *dev;
2856
2857         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2858         dev = &rte_eth_devices[port_id];
2859
2860         if (udp_tunnel == NULL) {
2861                 RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
2862                 return -EINVAL;
2863         }
2864
2865         if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
2866                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
2867                 return -EINVAL;
2868         }
2869
2870         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
2871         return eth_err(port_id, (*dev->dev_ops->udp_tunnel_port_del)(dev,
2872                                                                 udp_tunnel));
2873 }
2874
2875 int
2876 rte_eth_led_on(uint16_t port_id)
2877 {
2878         struct rte_eth_dev *dev;
2879
2880         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2881         dev = &rte_eth_devices[port_id];
2882         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
2883         return eth_err(port_id, (*dev->dev_ops->dev_led_on)(dev));
2884 }
2885
2886 int
2887 rte_eth_led_off(uint16_t port_id)
2888 {
2889         struct rte_eth_dev *dev;
2890
2891         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2892         dev = &rte_eth_devices[port_id];
2893         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2894         return eth_err(port_id, (*dev->dev_ops->dev_led_off)(dev));
2895 }
2896
2897 /*
2898  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2899  * an empty spot.
2900  */
2901 static int
2902 get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
2903 {
2904         struct rte_eth_dev_info dev_info;
2905         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2906         unsigned i;
2907
2908         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2909         rte_eth_dev_info_get(port_id, &dev_info);
2910
2911         for (i = 0; i < dev_info.max_mac_addrs; i++)
2912                 if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2913                         return i;
2914
2915         return -1;
2916 }
2917
2918 static const struct ether_addr null_mac_addr;
2919
2920 int
2921 rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
2922                         uint32_t pool)
2923 {
2924         struct rte_eth_dev *dev;
2925         int index;
2926         uint64_t pool_mask;
2927         int ret;
2928
2929         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2930         dev = &rte_eth_devices[port_id];
2931         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2932
2933         if (is_zero_ether_addr(addr)) {
2934                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2935                         port_id);
2936                 return -EINVAL;
2937         }
2938         if (pool >= ETH_64_POOLS) {
2939                 RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2940                 return -EINVAL;
2941         }
2942
2943         index = get_mac_addr_index(port_id, addr);
2944         if (index < 0) {
2945                 index = get_mac_addr_index(port_id, &null_mac_addr);
2946                 if (index < 0) {
2947                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2948                                 port_id);
2949                         return -ENOSPC;
2950                 }
2951         } else {
2952                 pool_mask = dev->data->mac_pool_sel[index];
2953
2954                 /* Check if both MAC address and pool is already there, and do nothing */
2955                 if (pool_mask & (1ULL << pool))
2956                         return 0;
2957         }
2958
2959         /* Update NIC */
2960         ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2961
2962         if (ret == 0) {
2963                 /* Update address in NIC data structure */
2964                 ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2965
2966                 /* Update pool bitmap in NIC data structure */
2967                 dev->data->mac_pool_sel[index] |= (1ULL << pool);
2968         }
2969
2970         return eth_err(port_id, ret);
2971 }
2972
2973 int
2974 rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
2975 {
2976         struct rte_eth_dev *dev;
2977         int index;
2978
2979         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2980         dev = &rte_eth_devices[port_id];
2981         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2982
2983         index = get_mac_addr_index(port_id, addr);
2984         if (index == 0) {
2985                 RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2986                 return -EADDRINUSE;
2987         } else if (index < 0)
2988                 return 0;  /* Do nothing if address wasn't found */
2989
2990         /* Update NIC */
2991         (*dev->dev_ops->mac_addr_remove)(dev, index);
2992
2993         /* Update address in NIC data structure */
2994         ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2995
2996         /* reset pool bitmap */
2997         dev->data->mac_pool_sel[index] = 0;
2998
2999         return 0;
3000 }
3001
3002 int
3003 rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
3004 {
3005         struct rte_eth_dev *dev;
3006
3007         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3008
3009         if (!is_valid_assigned_ether_addr(addr))
3010                 return -EINVAL;
3011
3012         dev = &rte_eth_devices[port_id];
3013         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
3014
3015         /* Update default address in NIC data structure */
3016         ether_addr_copy(addr, &dev->data->mac_addrs[0]);
3017
3018         (*dev->dev_ops->mac_addr_set)(dev, addr);
3019
3020         return 0;
3021 }
3022
3023
3024 /*
3025  * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
3026  * an empty spot.
3027  */
3028 static int
3029 get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
3030 {
3031         struct rte_eth_dev_info dev_info;
3032         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3033         unsigned i;
3034
3035         rte_eth_dev_info_get(port_id, &dev_info);
3036         if (!dev->data->hash_mac_addrs)
3037                 return -1;
3038
3039         for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
3040                 if (memcmp(addr, &dev->data->hash_mac_addrs[i],
3041                         ETHER_ADDR_LEN) == 0)
3042                         return i;
3043
3044         return -1;
3045 }
3046
3047 int
3048 rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
3049                                 uint8_t on)
3050 {
3051         int index;
3052         int ret;
3053         struct rte_eth_dev *dev;
3054
3055         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3056
3057         dev = &rte_eth_devices[port_id];
3058         if (is_zero_ether_addr(addr)) {
3059                 RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
3060                         port_id);
3061                 return -EINVAL;
3062         }
3063
3064         index = get_hash_mac_addr_index(port_id, addr);
3065         /* Check if it's already there, and do nothing */
3066         if ((index >= 0) && on)
3067                 return 0;
3068
3069         if (index < 0) {
3070                 if (!on) {
3071                         RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
3072                                 "set in UTA\n", port_id);
3073                         return -EINVAL;
3074                 }
3075
3076                 index = get_hash_mac_addr_index(port_id, &null_mac_addr);
3077                 if (index < 0) {
3078                         RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
3079                                         port_id);
3080                         return -ENOSPC;
3081                 }
3082         }
3083
3084         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
3085         ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
3086         if (ret == 0) {
3087                 /* Update address in NIC data structure */
3088                 if (on)
3089                         ether_addr_copy(addr,
3090                                         &dev->data->hash_mac_addrs[index]);
3091                 else
3092                         ether_addr_copy(&null_mac_addr,
3093                                         &dev->data->hash_mac_addrs[index]);
3094         }
3095
3096         return eth_err(port_id, ret);
3097 }
3098
3099 int
3100 rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
3101 {
3102         struct rte_eth_dev *dev;
3103
3104         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3105
3106         dev = &rte_eth_devices[port_id];
3107
3108         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
3109         return eth_err(port_id, (*dev->dev_ops->uc_all_hash_table_set)(dev,
3110                                                                        on));
3111 }
3112
3113 int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
3114                                         uint16_t tx_rate)
3115 {
3116         struct rte_eth_dev *dev;
3117         struct rte_eth_dev_info dev_info;
3118         struct rte_eth_link link;
3119
3120         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3121
3122         dev = &rte_eth_devices[port_id];
3123         rte_eth_dev_info_get(port_id, &dev_info);
3124         link = dev->data->dev_link;
3125
3126         if (queue_idx > dev_info.max_tx_queues) {
3127                 RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
3128                                 "invalid queue id=%d\n", port_id, queue_idx);
3129                 return -EINVAL;
3130         }
3131
3132         if (tx_rate > link.link_speed) {
3133                 RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
3134                                 "bigger than link speed= %d\n",
3135                         tx_rate, link.link_speed);
3136                 return -EINVAL;
3137         }
3138
3139         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
3140         return eth_err(port_id, (*dev->dev_ops->set_queue_rate_limit)(dev,
3141                                                         queue_idx, tx_rate));
3142 }
3143
3144 int
3145 rte_eth_mirror_rule_set(uint16_t port_id,
3146                         struct rte_eth_mirror_conf *mirror_conf,
3147                         uint8_t rule_id, uint8_t on)
3148 {
3149         struct rte_eth_dev *dev;
3150
3151         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3152         if (mirror_conf->rule_type == 0) {
3153                 RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
3154                 return -EINVAL;
3155         }
3156
3157         if (mirror_conf->dst_pool >= ETH_64_POOLS) {
3158                 RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
3159                                 ETH_64_POOLS - 1);
3160                 return -EINVAL;
3161         }
3162
3163         if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
3164              ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
3165             (mirror_conf->pool_mask == 0)) {
3166                 RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
3167                 return -EINVAL;
3168         }
3169
3170         if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
3171             mirror_conf->vlan.vlan_mask == 0) {
3172                 RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
3173                 return -EINVAL;
3174         }
3175
3176         dev = &rte_eth_devices[port_id];
3177         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
3178
3179         return eth_err(port_id, (*dev->dev_ops->mirror_rule_set)(dev,
3180                                                 mirror_conf, rule_id, on));
3181 }
3182
3183 int
3184 rte_eth_mirror_rule_reset(uint16_t port_id, uint8_t rule_id)
3185 {
3186         struct rte_eth_dev *dev;
3187
3188         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3189
3190         dev = &rte_eth_devices[port_id];
3191         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
3192
3193         return eth_err(port_id, (*dev->dev_ops->mirror_rule_reset)(dev,
3194                                                                    rule_id));
3195 }
3196
3197 RTE_INIT(eth_dev_init_cb_lists)
3198 {
3199         int i;
3200
3201         for (i = 0; i < RTE_MAX_ETHPORTS; i++)
3202                 TAILQ_INIT(&rte_eth_devices[i].link_intr_cbs);
3203 }
3204
3205 int
3206 rte_eth_dev_callback_register(uint16_t port_id,
3207                         enum rte_eth_event_type event,
3208                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3209 {
3210         struct rte_eth_dev *dev;
3211         struct rte_eth_dev_callback *user_cb;
3212         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3213         uint16_t last_port;
3214
3215         if (!cb_fn)
3216                 return -EINVAL;
3217
3218         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3219                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3220                 return -EINVAL;
3221         }
3222
3223         if (port_id == RTE_ETH_ALL) {
3224                 next_port = 0;
3225                 last_port = RTE_MAX_ETHPORTS - 1;
3226         } else {
3227                 next_port = last_port = port_id;
3228         }
3229
3230         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3231
3232         do {
3233                 dev = &rte_eth_devices[next_port];
3234
3235                 TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
3236                         if (user_cb->cb_fn == cb_fn &&
3237                                 user_cb->cb_arg == cb_arg &&
3238                                 user_cb->event == event) {
3239                                 break;
3240                         }
3241                 }
3242
3243                 /* create a new callback. */
3244                 if (user_cb == NULL) {
3245                         user_cb = rte_zmalloc("INTR_USER_CALLBACK",
3246                                 sizeof(struct rte_eth_dev_callback), 0);
3247                         if (user_cb != NULL) {
3248                                 user_cb->cb_fn = cb_fn;
3249                                 user_cb->cb_arg = cb_arg;
3250                                 user_cb->event = event;
3251                                 TAILQ_INSERT_TAIL(&(dev->link_intr_cbs),
3252                                                   user_cb, next);
3253                         } else {
3254                                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3255                                 rte_eth_dev_callback_unregister(port_id, event,
3256                                                                 cb_fn, cb_arg);
3257                                 return -ENOMEM;
3258                         }
3259
3260                 }
3261         } while (++next_port <= last_port);
3262
3263         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3264         return 0;
3265 }
3266
3267 int
3268 rte_eth_dev_callback_unregister(uint16_t port_id,
3269                         enum rte_eth_event_type event,
3270                         rte_eth_dev_cb_fn cb_fn, void *cb_arg)
3271 {
3272         int ret;
3273         struct rte_eth_dev *dev;
3274         struct rte_eth_dev_callback *cb, *next;
3275         uint32_t next_port; /* size is 32-bit to prevent loop wrap-around */
3276         uint16_t last_port;
3277
3278         if (!cb_fn)
3279                 return -EINVAL;
3280
3281         if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
3282                 RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
3283                 return -EINVAL;
3284         }
3285
3286         if (port_id == RTE_ETH_ALL) {
3287                 next_port = 0;
3288                 last_port = RTE_MAX_ETHPORTS - 1;
3289         } else {
3290                 next_port = last_port = port_id;
3291         }
3292
3293         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3294
3295         do {
3296                 dev = &rte_eth_devices[next_port];
3297                 ret = 0;
3298                 for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL;
3299                      cb = next) {
3300
3301                         next = TAILQ_NEXT(cb, next);
3302
3303                         if (cb->cb_fn != cb_fn || cb->event != event ||
3304                             (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
3305                                 continue;
3306
3307                         /*
3308                          * if this callback is not executing right now,
3309                          * then remove it.
3310                          */
3311                         if (cb->active == 0) {
3312                                 TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
3313                                 rte_free(cb);
3314                         } else {
3315                                 ret = -EAGAIN;
3316                         }
3317                 }
3318         } while (++next_port <= last_port);
3319
3320         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3321         return ret;
3322 }
3323
3324 int
3325 _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
3326         enum rte_eth_event_type event, void *ret_param)
3327 {
3328         struct rte_eth_dev_callback *cb_lst;
3329         struct rte_eth_dev_callback dev_cb;
3330         int rc = 0;
3331
3332         rte_spinlock_lock(&rte_eth_dev_cb_lock);
3333         TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
3334                 if (cb_lst->cb_fn == NULL || cb_lst->event != event)
3335                         continue;
3336                 dev_cb = *cb_lst;
3337                 cb_lst->active = 1;
3338                 if (ret_param != NULL)
3339                         dev_cb.ret_param = ret_param;
3340
3341                 rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3342                 rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
3343                                 dev_cb.cb_arg, dev_cb.ret_param);
3344                 rte_spinlock_lock(&rte_eth_dev_cb_lock);
3345                 cb_lst->active = 0;
3346         }
3347         rte_spinlock_unlock(&rte_eth_dev_cb_lock);
3348         return rc;
3349 }
3350
3351 int
3352 rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
3353 {
3354         uint32_t vec;
3355         struct rte_eth_dev *dev;
3356         struct rte_intr_handle *intr_handle;
3357         uint16_t qid;
3358         int rc;
3359
3360         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3361
3362         dev = &rte_eth_devices[port_id];
3363
3364         if (!dev->intr_handle) {
3365                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3366                 return -ENOTSUP;
3367         }
3368
3369         intr_handle = dev->intr_handle;
3370         if (!intr_handle->intr_vec) {
3371                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3372                 return -EPERM;
3373         }
3374
3375         for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
3376                 vec = intr_handle->intr_vec[qid];
3377                 rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3378                 if (rc && rc != -EEXIST) {
3379                         RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3380                                         " op %d epfd %d vec %u\n",
3381                                         port_id, qid, op, epfd, vec);
3382                 }
3383         }
3384
3385         return 0;
3386 }
3387
3388 const struct rte_memzone *
3389 rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
3390                          uint16_t queue_id, size_t size, unsigned align,
3391                          int socket_id)
3392 {
3393         char z_name[RTE_MEMZONE_NAMESIZE];
3394         const struct rte_memzone *mz;
3395
3396         snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
3397                  dev->device->driver->name, ring_name,
3398                  dev->data->port_id, queue_id);
3399
3400         mz = rte_memzone_lookup(z_name);
3401         if (mz)
3402                 return mz;
3403
3404         return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
3405 }
3406
3407 int
3408 rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
3409                           int epfd, int op, void *data)
3410 {
3411         uint32_t vec;
3412         struct rte_eth_dev *dev;
3413         struct rte_intr_handle *intr_handle;
3414         int rc;
3415
3416         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3417
3418         dev = &rte_eth_devices[port_id];
3419         if (queue_id >= dev->data->nb_rx_queues) {
3420                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
3421                 return -EINVAL;
3422         }
3423
3424         if (!dev->intr_handle) {
3425                 RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
3426                 return -ENOTSUP;
3427         }
3428
3429         intr_handle = dev->intr_handle;
3430         if (!intr_handle->intr_vec) {
3431                 RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
3432                 return -EPERM;
3433         }
3434
3435         vec = intr_handle->intr_vec[queue_id];
3436         rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
3437         if (rc && rc != -EEXIST) {
3438                 RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
3439                                 " op %d epfd %d vec %u\n",
3440                                 port_id, queue_id, op, epfd, vec);
3441                 return rc;
3442         }
3443
3444         return 0;
3445 }
3446
3447 int
3448 rte_eth_dev_rx_intr_enable(uint16_t port_id,
3449                            uint16_t queue_id)
3450 {
3451         struct rte_eth_dev *dev;
3452
3453         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3454
3455         dev = &rte_eth_devices[port_id];
3456
3457         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
3458         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_enable)(dev,
3459                                                                 queue_id));
3460 }
3461
3462 int
3463 rte_eth_dev_rx_intr_disable(uint16_t port_id,
3464                             uint16_t queue_id)
3465 {
3466         struct rte_eth_dev *dev;
3467
3468         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3469
3470         dev = &rte_eth_devices[port_id];
3471
3472         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
3473         return eth_err(port_id, (*dev->dev_ops->rx_queue_intr_disable)(dev,
3474                                                                 queue_id));
3475 }
3476
3477
3478 int
3479 rte_eth_dev_filter_supported(uint16_t port_id,
3480                              enum rte_filter_type filter_type)
3481 {
3482         struct rte_eth_dev *dev;
3483
3484         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3485
3486         dev = &rte_eth_devices[port_id];
3487         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3488         return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3489                                 RTE_ETH_FILTER_NOP, NULL);
3490 }
3491
3492 int
3493 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3494                             enum rte_filter_type filter_type,
3495                             enum rte_filter_op filter_op, void *arg);
3496
3497 int
3498 rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
3499                             enum rte_filter_type filter_type,
3500                             enum rte_filter_op filter_op, void *arg)
3501 {
3502         struct rte_eth_fdir_info_v22 {
3503                 enum rte_fdir_mode mode;
3504                 struct rte_eth_fdir_masks mask;
3505                 struct rte_eth_fdir_flex_conf flex_conf;
3506                 uint32_t guarant_spc;
3507                 uint32_t best_spc;
3508                 uint32_t flow_types_mask[1];
3509                 uint32_t max_flexpayload;
3510                 uint32_t flex_payload_unit;
3511                 uint32_t max_flex_payload_segment_num;
3512                 uint16_t flex_payload_limit;
3513                 uint32_t flex_bitmask_unit;
3514                 uint32_t max_flex_bitmask_num;
3515         };
3516
3517         struct rte_eth_hash_global_conf_v22 {
3518                 enum rte_eth_hash_function hash_func;
3519                 uint32_t sym_hash_enable_mask[1];
3520                 uint32_t valid_bit_mask[1];
3521         };
3522
3523         struct rte_eth_hash_filter_info_v22 {
3524                 enum rte_eth_hash_filter_info_type info_type;
3525                 union {
3526                         uint8_t enable;
3527                         struct rte_eth_hash_global_conf_v22 global_conf;
3528                         struct rte_eth_input_set_conf input_set_conf;
3529                 } info;
3530         };
3531
3532         struct rte_eth_dev *dev;
3533
3534         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3535
3536         dev = &rte_eth_devices[port_id];
3537         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3538         if (filter_op == RTE_ETH_FILTER_INFO) {
3539                 int retval;
3540                 struct rte_eth_fdir_info_v22 *fdir_info_v22;
3541                 struct rte_eth_fdir_info fdir_info;
3542
3543                 fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
3544
3545                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3546                           filter_op, (void *)&fdir_info);
3547                 fdir_info_v22->mode = fdir_info.mode;
3548                 fdir_info_v22->mask = fdir_info.mask;
3549                 fdir_info_v22->flex_conf = fdir_info.flex_conf;
3550                 fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
3551                 fdir_info_v22->best_spc = fdir_info.best_spc;
3552                 fdir_info_v22->flow_types_mask[0] =
3553                         (uint32_t)fdir_info.flow_types_mask[0];
3554                 fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
3555                 fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
3556                 fdir_info_v22->max_flex_payload_segment_num =
3557                         fdir_info.max_flex_payload_segment_num;
3558                 fdir_info_v22->flex_payload_limit =
3559                         fdir_info.flex_payload_limit;
3560                 fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
3561                 fdir_info_v22->max_flex_bitmask_num =
3562                         fdir_info.max_flex_bitmask_num;
3563                 return retval;
3564         } else if (filter_op == RTE_ETH_FILTER_GET) {
3565                 int retval;
3566                 struct rte_eth_hash_filter_info f_info;
3567                 struct rte_eth_hash_filter_info_v22 *f_info_v22 =
3568                         (struct rte_eth_hash_filter_info_v22 *)arg;
3569
3570                 f_info.info_type = f_info_v22->info_type;
3571                 retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3572                           filter_op, (void *)&f_info);
3573
3574                 switch (f_info_v22->info_type) {
3575                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3576                         f_info_v22->info.enable = f_info.info.enable;
3577                         break;
3578                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3579                         f_info_v22->info.global_conf.hash_func =
3580                                 f_info.info.global_conf.hash_func;
3581                         f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
3582                                 (uint32_t)
3583                                 f_info.info.global_conf.sym_hash_enable_mask[0];
3584                         f_info_v22->info.global_conf.valid_bit_mask[0] =
3585                                 (uint32_t)
3586                                 f_info.info.global_conf.valid_bit_mask[0];
3587                         break;
3588                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3589                         f_info_v22->info.input_set_conf =
3590                                 f_info.info.input_set_conf;
3591                         break;
3592                 default:
3593                         break;
3594                 }
3595                 return retval;
3596         } else if (filter_op == RTE_ETH_FILTER_SET) {
3597                 struct rte_eth_hash_filter_info f_info;
3598                 struct rte_eth_hash_filter_info_v22 *f_v22 =
3599                         (struct rte_eth_hash_filter_info_v22 *)arg;
3600
3601                 f_info.info_type = f_v22->info_type;
3602                 switch (f_v22->info_type) {
3603                 case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
3604                         f_info.info.enable = f_v22->info.enable;
3605                         break;
3606                 case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
3607                         f_info.info.global_conf.hash_func =
3608                                 f_v22->info.global_conf.hash_func;
3609                         f_info.info.global_conf.sym_hash_enable_mask[0] =
3610                                 (uint32_t)
3611                                 f_v22->info.global_conf.sym_hash_enable_mask[0];
3612                         f_info.info.global_conf.valid_bit_mask[0] =
3613                                 (uint32_t)
3614                                 f_v22->info.global_conf.valid_bit_mask[0];
3615                         break;
3616                 case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
3617                         f_info.info.input_set_conf =
3618                                 f_v22->info.input_set_conf;
3619                         break;
3620                 default:
3621                         break;
3622                 }
3623                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3624                                                     (void *)&f_info);
3625         } else
3626                 return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
3627                                                     arg);
3628 }
3629 VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
3630
3631 int
3632 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3633                               enum rte_filter_type filter_type,
3634                               enum rte_filter_op filter_op, void *arg);
3635
3636 int
3637 rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
3638                               enum rte_filter_type filter_type,
3639                               enum rte_filter_op filter_op, void *arg)
3640 {
3641         struct rte_eth_dev *dev;
3642
3643         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3644
3645         dev = &rte_eth_devices[port_id];
3646         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
3647         return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
3648                                                              filter_op, arg));
3649 }
3650 BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
3651 MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
3652                   enum rte_filter_type filter_type,
3653                   enum rte_filter_op filter_op, void *arg),
3654                   rte_eth_dev_filter_ctrl_v1802);
3655
3656 void *
3657 rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
3658                 rte_rx_callback_fn fn, void *user_param)
3659 {
3660 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3661         rte_errno = ENOTSUP;
3662         return NULL;
3663 #endif
3664         /* check input parameters */
3665         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3666                     queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3667                 rte_errno = EINVAL;
3668                 return NULL;
3669         }
3670         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3671
3672         if (cb == NULL) {
3673                 rte_errno = ENOMEM;
3674                 return NULL;
3675         }
3676
3677         cb->fn.rx = fn;
3678         cb->param = user_param;
3679
3680         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3681         /* Add the callbacks in fifo order. */
3682         struct rte_eth_rxtx_callback *tail =
3683                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3684
3685         if (!tail) {
3686                 rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3687
3688         } else {
3689                 while (tail->next)
3690                         tail = tail->next;
3691                 tail->next = cb;
3692         }
3693         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3694
3695         return cb;
3696 }
3697
3698 void *
3699 rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
3700                 rte_rx_callback_fn fn, void *user_param)
3701 {
3702 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3703         rte_errno = ENOTSUP;
3704         return NULL;
3705 #endif
3706         /* check input parameters */
3707         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3708                 queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
3709                 rte_errno = EINVAL;
3710                 return NULL;
3711         }
3712
3713         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3714
3715         if (cb == NULL) {
3716                 rte_errno = ENOMEM;
3717                 return NULL;
3718         }
3719
3720         cb->fn.rx = fn;
3721         cb->param = user_param;
3722
3723         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3724         /* Add the callbacks at fisrt position*/
3725         cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
3726         rte_smp_wmb();
3727         rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
3728         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3729
3730         return cb;
3731 }
3732
3733 void *
3734 rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
3735                 rte_tx_callback_fn fn, void *user_param)
3736 {
3737 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3738         rte_errno = ENOTSUP;
3739         return NULL;
3740 #endif
3741         /* check input parameters */
3742         if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
3743                     queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
3744                 rte_errno = EINVAL;
3745                 return NULL;
3746         }
3747
3748         struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
3749
3750         if (cb == NULL) {
3751                 rte_errno = ENOMEM;
3752                 return NULL;
3753         }
3754
3755         cb->fn.tx = fn;
3756         cb->param = user_param;
3757
3758         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3759         /* Add the callbacks in fifo order. */
3760         struct rte_eth_rxtx_callback *tail =
3761                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
3762
3763         if (!tail) {
3764                 rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
3765
3766         } else {
3767                 while (tail->next)
3768                         tail = tail->next;
3769                 tail->next = cb;
3770         }
3771         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3772
3773         return cb;
3774 }
3775
3776 int
3777 rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
3778                 struct rte_eth_rxtx_callback *user_cb)
3779 {
3780 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3781         return -ENOTSUP;
3782 #endif
3783         /* Check input parameters. */
3784         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3785         if (user_cb == NULL ||
3786                         queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
3787                 return -EINVAL;
3788
3789         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3790         struct rte_eth_rxtx_callback *cb;
3791         struct rte_eth_rxtx_callback **prev_cb;
3792         int ret = -EINVAL;
3793
3794         rte_spinlock_lock(&rte_eth_rx_cb_lock);
3795         prev_cb = &dev->post_rx_burst_cbs[queue_id];
3796         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3797                 cb = *prev_cb;
3798                 if (cb == user_cb) {
3799                         /* Remove the user cb from the callback list. */
3800                         *prev_cb = cb->next;
3801                         ret = 0;
3802                         break;
3803                 }
3804         }
3805         rte_spinlock_unlock(&rte_eth_rx_cb_lock);
3806
3807         return ret;
3808 }
3809
3810 int
3811 rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
3812                 struct rte_eth_rxtx_callback *user_cb)
3813 {
3814 #ifndef RTE_ETHDEV_RXTX_CALLBACKS
3815         return -ENOTSUP;
3816 #endif
3817         /* Check input parameters. */
3818         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
3819         if (user_cb == NULL ||
3820                         queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
3821                 return -EINVAL;
3822
3823         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
3824         int ret = -EINVAL;
3825         struct rte_eth_rxtx_callback *cb;
3826         struct rte_eth_rxtx_callback **prev_cb;
3827
3828         rte_spinlock_lock(&rte_eth_tx_cb_lock);
3829         prev_cb = &dev->pre_tx_burst_cbs[queue_id];
3830         for (; *prev_cb != NULL; prev_cb = &cb->next) {
3831                 cb = *prev_cb;
3832                 if (cb == user_cb) {
3833                         /* Remove the user cb from the callback list. */
3834                         *prev_cb = cb->next;
3835                         ret = 0;
3836                         break;
3837                 }
3838         }
3839         rte_spinlock_unlock(&rte_eth_tx_cb_lock);
3840
3841         return ret;
3842 }
3843
3844 int
3845 rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3846         struct rte_eth_rxq_info *qinfo)
3847 {
3848         struct rte_eth_dev *dev;
3849
3850         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3851
3852         if (qinfo == NULL)
3853                 return -EINVAL;
3854
3855         dev = &rte_eth_devices[port_id];
3856         if (queue_id >= dev->data->nb_rx_queues) {
3857                 RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3858                 return -EINVAL;
3859         }
3860
3861         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3862
3863         memset(qinfo, 0, sizeof(*qinfo));
3864         dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3865         return 0;
3866 }
3867
3868 int
3869 rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
3870         struct rte_eth_txq_info *qinfo)
3871 {
3872         struct rte_eth_dev *dev;
3873
3874         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3875
3876         if (qinfo == NULL)
3877                 return -EINVAL;
3878
3879         dev = &rte_eth_devices[port_id];
3880         if (queue_id >= dev->data->nb_tx_queues) {
3881                 RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3882                 return -EINVAL;
3883         }
3884
3885         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3886
3887         memset(qinfo, 0, sizeof(*qinfo));
3888         dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3889         return 0;
3890 }
3891
3892 int
3893 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
3894                              struct ether_addr *mc_addr_set,
3895                              uint32_t nb_mc_addr)
3896 {
3897         struct rte_eth_dev *dev;
3898
3899         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3900
3901         dev = &rte_eth_devices[port_id];
3902         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3903         return eth_err(port_id, dev->dev_ops->set_mc_addr_list(dev,
3904                                                 mc_addr_set, nb_mc_addr));
3905 }
3906
3907 int
3908 rte_eth_timesync_enable(uint16_t port_id)
3909 {
3910         struct rte_eth_dev *dev;
3911
3912         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3913         dev = &rte_eth_devices[port_id];
3914
3915         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3916         return eth_err(port_id, (*dev->dev_ops->timesync_enable)(dev));
3917 }
3918
3919 int
3920 rte_eth_timesync_disable(uint16_t port_id)
3921 {
3922         struct rte_eth_dev *dev;
3923
3924         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3925         dev = &rte_eth_devices[port_id];
3926
3927         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3928         return eth_err(port_id, (*dev->dev_ops->timesync_disable)(dev));
3929 }
3930
3931 int
3932 rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
3933                                    uint32_t flags)
3934 {
3935         struct rte_eth_dev *dev;
3936
3937         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3938         dev = &rte_eth_devices[port_id];
3939
3940         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3941         return eth_err(port_id, (*dev->dev_ops->timesync_read_rx_timestamp)
3942                                 (dev, timestamp, flags));
3943 }
3944
3945 int
3946 rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
3947                                    struct timespec *timestamp)
3948 {
3949         struct rte_eth_dev *dev;
3950
3951         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3952         dev = &rte_eth_devices[port_id];
3953
3954         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3955         return eth_err(port_id, (*dev->dev_ops->timesync_read_tx_timestamp)
3956                                 (dev, timestamp));
3957 }
3958
3959 int
3960 rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
3961 {
3962         struct rte_eth_dev *dev;
3963
3964         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3965         dev = &rte_eth_devices[port_id];
3966
3967         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3968         return eth_err(port_id, (*dev->dev_ops->timesync_adjust_time)(dev,
3969                                                                       delta));
3970 }
3971
3972 int
3973 rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
3974 {
3975         struct rte_eth_dev *dev;
3976
3977         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3978         dev = &rte_eth_devices[port_id];
3979
3980         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3981         return eth_err(port_id, (*dev->dev_ops->timesync_read_time)(dev,
3982                                                                 timestamp));
3983 }
3984
3985 int
3986 rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
3987 {
3988         struct rte_eth_dev *dev;
3989
3990         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3991         dev = &rte_eth_devices[port_id];
3992
3993         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3994         return eth_err(port_id, (*dev->dev_ops->timesync_write_time)(dev,
3995                                                                 timestamp));
3996 }
3997
3998 int
3999 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
4000 {
4001         struct rte_eth_dev *dev;
4002
4003         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4004
4005         dev = &rte_eth_devices[port_id];
4006         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
4007         return eth_err(port_id, (*dev->dev_ops->get_reg)(dev, info));
4008 }
4009
4010 int
4011 rte_eth_dev_get_eeprom_length(uint16_t port_id)
4012 {
4013         struct rte_eth_dev *dev;
4014
4015         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4016
4017         dev = &rte_eth_devices[port_id];
4018         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
4019         return eth_err(port_id, (*dev->dev_ops->get_eeprom_length)(dev));
4020 }
4021
4022 int
4023 rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4024 {
4025         struct rte_eth_dev *dev;
4026
4027         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4028
4029         dev = &rte_eth_devices[port_id];
4030         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
4031         return eth_err(port_id, (*dev->dev_ops->get_eeprom)(dev, info));
4032 }
4033
4034 int
4035 rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
4036 {
4037         struct rte_eth_dev *dev;
4038
4039         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4040
4041         dev = &rte_eth_devices[port_id];
4042         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
4043         return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
4044 }
4045
4046 int
4047 rte_eth_dev_get_dcb_info(uint16_t port_id,
4048                              struct rte_eth_dcb_info *dcb_info)
4049 {
4050         struct rte_eth_dev *dev;
4051
4052         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4053
4054         dev = &rte_eth_devices[port_id];
4055         memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
4056
4057         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
4058         return eth_err(port_id, (*dev->dev_ops->get_dcb_info)(dev, dcb_info));
4059 }
4060
4061 int
4062 rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
4063                                     struct rte_eth_l2_tunnel_conf *l2_tunnel)
4064 {
4065         struct rte_eth_dev *dev;
4066
4067         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4068         if (l2_tunnel == NULL) {
4069                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4070                 return -EINVAL;
4071         }
4072
4073         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4074                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
4075                 return -EINVAL;
4076         }
4077
4078         dev = &rte_eth_devices[port_id];
4079         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
4080                                 -ENOTSUP);
4081         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev,
4082                                                                 l2_tunnel));
4083 }
4084
4085 int
4086 rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
4087                                   struct rte_eth_l2_tunnel_conf *l2_tunnel,
4088                                   uint32_t mask,
4089                                   uint8_t en)
4090 {
4091         struct rte_eth_dev *dev;
4092
4093         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4094
4095         if (l2_tunnel == NULL) {
4096                 RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
4097                 return -EINVAL;
4098         }
4099
4100         if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
4101                 RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
4102                 return -EINVAL;
4103         }
4104
4105         if (mask == 0) {
4106                 RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
4107                 return -EINVAL;
4108         }
4109
4110         dev = &rte_eth_devices[port_id];
4111         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
4112                                 -ENOTSUP);
4113         return eth_err(port_id, (*dev->dev_ops->l2_tunnel_offload_set)(dev,
4114                                                         l2_tunnel, mask, en));
4115 }
4116
4117 static void
4118 rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
4119                            const struct rte_eth_desc_lim *desc_lim)
4120 {
4121         if (desc_lim->nb_align != 0)
4122                 *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
4123
4124         if (desc_lim->nb_max != 0)
4125                 *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
4126
4127         *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
4128 }
4129
4130 int
4131 rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
4132                                  uint16_t *nb_rx_desc,
4133                                  uint16_t *nb_tx_desc)
4134 {
4135         struct rte_eth_dev *dev;
4136         struct rte_eth_dev_info dev_info;
4137
4138         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4139
4140         dev = &rte_eth_devices[port_id];
4141         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
4142
4143         rte_eth_dev_info_get(port_id, &dev_info);
4144
4145         if (nb_rx_desc != NULL)
4146                 rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
4147
4148         if (nb_tx_desc != NULL)
4149                 rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
4150
4151         return 0;
4152 }
4153
4154 int
4155 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
4156 {
4157         struct rte_eth_dev *dev;
4158
4159         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
4160
4161         if (pool == NULL)
4162                 return -EINVAL;
4163
4164         dev = &rte_eth_devices[port_id];
4165
4166         if (*dev->dev_ops->pool_ops_supported == NULL)
4167                 return 1; /* all pools are supported */
4168
4169         return (*dev->dev_ops->pool_ops_supported)(dev, pool);
4170 }