New upstream version 18.02
[deb_dpdk.git] / drivers / net / ring / rte_eth_ring.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4
5 #include "rte_eth_ring.h"
6 #include <rte_mbuf.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_malloc.h>
9 #include <rte_memcpy.h>
10 #include <rte_string_fns.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_kvargs.h>
13 #include <rte_errno.h>
14
15 #define ETH_RING_NUMA_NODE_ACTION_ARG   "nodeaction"
16 #define ETH_RING_ACTION_CREATE          "CREATE"
17 #define ETH_RING_ACTION_ATTACH          "ATTACH"
18 #define ETH_RING_INTERNAL_ARG           "internal"
19
20 static const char *valid_arguments[] = {
21         ETH_RING_NUMA_NODE_ACTION_ARG,
22         ETH_RING_INTERNAL_ARG,
23         NULL
24 };
25
26 struct ring_internal_args {
27         struct rte_ring * const *rx_queues;
28         const unsigned int nb_rx_queues;
29         struct rte_ring * const *tx_queues;
30         const unsigned int nb_tx_queues;
31         const unsigned int numa_node;
32         void *addr; /* self addr for sanity check */
33 };
34
35 enum dev_action {
36         DEV_CREATE,
37         DEV_ATTACH
38 };
39
40 struct ring_queue {
41         struct rte_ring *rng;
42         rte_atomic64_t rx_pkts;
43         rte_atomic64_t tx_pkts;
44         rte_atomic64_t err_pkts;
45 };
46
47 struct pmd_internals {
48         unsigned max_rx_queues;
49         unsigned max_tx_queues;
50
51         struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
52         struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
53
54         struct ether_addr address;
55         enum dev_action action;
56 };
57
58
59 static struct rte_eth_link pmd_link = {
60                 .link_speed = ETH_SPEED_NUM_10G,
61                 .link_duplex = ETH_LINK_FULL_DUPLEX,
62                 .link_status = ETH_LINK_DOWN,
63                 .link_autoneg = ETH_LINK_AUTONEG
64 };
65
66 static uint16_t
67 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
68 {
69         void **ptrs = (void *)&bufs[0];
70         struct ring_queue *r = q;
71         const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
72                         ptrs, nb_bufs, NULL);
73         if (r->rng->flags & RING_F_SC_DEQ)
74                 r->rx_pkts.cnt += nb_rx;
75         else
76                 rte_atomic64_add(&(r->rx_pkts), nb_rx);
77         return nb_rx;
78 }
79
80 static uint16_t
81 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
82 {
83         void **ptrs = (void *)&bufs[0];
84         struct ring_queue *r = q;
85         const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
86                         ptrs, nb_bufs, NULL);
87         if (r->rng->flags & RING_F_SP_ENQ) {
88                 r->tx_pkts.cnt += nb_tx;
89                 r->err_pkts.cnt += nb_bufs - nb_tx;
90         } else {
91                 rte_atomic64_add(&(r->tx_pkts), nb_tx);
92                 rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
93         }
94         return nb_tx;
95 }
96
97 static int
98 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
99
100 static int
101 eth_dev_start(struct rte_eth_dev *dev)
102 {
103         dev->data->dev_link.link_status = ETH_LINK_UP;
104         return 0;
105 }
106
107 static void
108 eth_dev_stop(struct rte_eth_dev *dev)
109 {
110         dev->data->dev_link.link_status = ETH_LINK_DOWN;
111 }
112
113 static int
114 eth_dev_set_link_down(struct rte_eth_dev *dev)
115 {
116         dev->data->dev_link.link_status = ETH_LINK_DOWN;
117         return 0;
118 }
119
120 static int
121 eth_dev_set_link_up(struct rte_eth_dev *dev)
122 {
123         dev->data->dev_link.link_status = ETH_LINK_UP;
124         return 0;
125 }
126
127 static int
128 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
129                                     uint16_t nb_rx_desc __rte_unused,
130                                     unsigned int socket_id __rte_unused,
131                                     const struct rte_eth_rxconf *rx_conf __rte_unused,
132                                     struct rte_mempool *mb_pool __rte_unused)
133 {
134         struct pmd_internals *internals = dev->data->dev_private;
135         dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
136         return 0;
137 }
138
139 static int
140 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
141                                     uint16_t nb_tx_desc __rte_unused,
142                                     unsigned int socket_id __rte_unused,
143                                     const struct rte_eth_txconf *tx_conf __rte_unused)
144 {
145         struct pmd_internals *internals = dev->data->dev_private;
146         dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
147         return 0;
148 }
149
150
151 static void
152 eth_dev_info(struct rte_eth_dev *dev,
153                 struct rte_eth_dev_info *dev_info)
154 {
155         struct pmd_internals *internals = dev->data->dev_private;
156         dev_info->max_mac_addrs = 1;
157         dev_info->max_rx_pktlen = (uint32_t)-1;
158         dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
159         dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
160         dev_info->min_rx_bufsize = 0;
161 }
162
163 static int
164 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
165 {
166         unsigned i;
167         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
168         const struct pmd_internals *internal = dev->data->dev_private;
169
170         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
171                         i < dev->data->nb_rx_queues; i++) {
172                 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
173                 rx_total += stats->q_ipackets[i];
174         }
175
176         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
177                         i < dev->data->nb_tx_queues; i++) {
178                 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
179                 stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
180                 tx_total += stats->q_opackets[i];
181                 tx_err_total += stats->q_errors[i];
182         }
183
184         stats->ipackets = rx_total;
185         stats->opackets = tx_total;
186         stats->oerrors = tx_err_total;
187
188         return 0;
189 }
190
191 static void
192 eth_stats_reset(struct rte_eth_dev *dev)
193 {
194         unsigned i;
195         struct pmd_internals *internal = dev->data->dev_private;
196         for (i = 0; i < dev->data->nb_rx_queues; i++)
197                 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
198         for (i = 0; i < dev->data->nb_tx_queues; i++) {
199                 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
200                 internal->tx_ring_queues[i].err_pkts.cnt = 0;
201         }
202 }
203
204 static void
205 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
206         uint32_t index __rte_unused)
207 {
208 }
209
210 static int
211 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
212         struct ether_addr *mac_addr __rte_unused,
213         uint32_t index __rte_unused,
214         uint32_t vmdq __rte_unused)
215 {
216         return 0;
217 }
218
219 static void
220 eth_queue_release(void *q __rte_unused) { ; }
221 static int
222 eth_link_update(struct rte_eth_dev *dev __rte_unused,
223                 int wait_to_complete __rte_unused) { return 0; }
224
225 static const struct eth_dev_ops ops = {
226         .dev_start = eth_dev_start,
227         .dev_stop = eth_dev_stop,
228         .dev_set_link_up = eth_dev_set_link_up,
229         .dev_set_link_down = eth_dev_set_link_down,
230         .dev_configure = eth_dev_configure,
231         .dev_infos_get = eth_dev_info,
232         .rx_queue_setup = eth_rx_queue_setup,
233         .tx_queue_setup = eth_tx_queue_setup,
234         .rx_queue_release = eth_queue_release,
235         .tx_queue_release = eth_queue_release,
236         .link_update = eth_link_update,
237         .stats_get = eth_stats_get,
238         .stats_reset = eth_stats_reset,
239         .mac_addr_remove = eth_mac_addr_remove,
240         .mac_addr_add = eth_mac_addr_add,
241 };
242
243 static struct rte_vdev_driver pmd_ring_drv;
244
245 static int
246 do_eth_dev_ring_create(const char *name,
247                 struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
248                 struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
249                 const unsigned int numa_node, enum dev_action action,
250                 struct rte_eth_dev **eth_dev_p)
251 {
252         struct rte_eth_dev_data *data = NULL;
253         struct pmd_internals *internals = NULL;
254         struct rte_eth_dev *eth_dev = NULL;
255         void **rx_queues_local = NULL;
256         void **tx_queues_local = NULL;
257         unsigned i;
258
259         RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
260                         numa_node);
261
262         /* now do all data allocation - for eth_dev structure, dummy pci driver
263          * and internal (private) data
264          */
265         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
266         if (data == NULL) {
267                 rte_errno = ENOMEM;
268                 goto error;
269         }
270
271         rx_queues_local = rte_zmalloc_socket(name,
272                         sizeof(void *) * nb_rx_queues, 0, numa_node);
273         if (rx_queues_local == NULL) {
274                 rte_errno = ENOMEM;
275                 goto error;
276         }
277
278         tx_queues_local = rte_zmalloc_socket(name,
279                         sizeof(void *) * nb_tx_queues, 0, numa_node);
280         if (tx_queues_local == NULL) {
281                 rte_errno = ENOMEM;
282                 goto error;
283         }
284
285         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
286         if (internals == NULL) {
287                 rte_errno = ENOMEM;
288                 goto error;
289         }
290
291         /* reserve an ethdev entry */
292         eth_dev = rte_eth_dev_allocate(name);
293         if (eth_dev == NULL) {
294                 rte_errno = ENOSPC;
295                 goto error;
296         }
297
298         /* now put it all together
299          * - store queue data in internals,
300          * - store numa_node info in eth_dev_data
301          * - point eth_dev_data to internals
302          * - and point eth_dev structure to new eth_dev_data structure
303          */
304         /* NOTE: we'll replace the data element, of originally allocated eth_dev
305          * so the rings are local per-process */
306
307         rte_memcpy(data, eth_dev->data, sizeof(*data));
308         data->rx_queues = rx_queues_local;
309         data->tx_queues = tx_queues_local;
310
311         internals->action = action;
312         internals->max_rx_queues = nb_rx_queues;
313         internals->max_tx_queues = nb_tx_queues;
314         for (i = 0; i < nb_rx_queues; i++) {
315                 internals->rx_ring_queues[i].rng = rx_queues[i];
316                 data->rx_queues[i] = &internals->rx_ring_queues[i];
317         }
318         for (i = 0; i < nb_tx_queues; i++) {
319                 internals->tx_ring_queues[i].rng = tx_queues[i];
320                 data->tx_queues[i] = &internals->tx_ring_queues[i];
321         }
322
323         data->dev_private = internals;
324         data->nb_rx_queues = (uint16_t)nb_rx_queues;
325         data->nb_tx_queues = (uint16_t)nb_tx_queues;
326         data->dev_link = pmd_link;
327         data->mac_addrs = &internals->address;
328
329         eth_dev->data = data;
330         eth_dev->dev_ops = &ops;
331         data->kdrv = RTE_KDRV_NONE;
332         data->numa_node = numa_node;
333
334         /* finally assign rx and tx ops */
335         eth_dev->rx_pkt_burst = eth_ring_rx;
336         eth_dev->tx_pkt_burst = eth_ring_tx;
337
338         *eth_dev_p = eth_dev;
339
340         return data->port_id;
341
342 error:
343         rte_free(rx_queues_local);
344         rte_free(tx_queues_local);
345         rte_free(data);
346         rte_free(internals);
347
348         return -1;
349 }
350
351 int
352 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
353                 const unsigned nb_rx_queues,
354                 struct rte_ring *const tx_queues[],
355                 const unsigned nb_tx_queues,
356                 const unsigned numa_node)
357 {
358         struct ring_internal_args args = {
359                 .rx_queues = rx_queues,
360                 .nb_rx_queues = nb_rx_queues,
361                 .tx_queues = tx_queues,
362                 .nb_tx_queues = nb_tx_queues,
363                 .numa_node = numa_node,
364                 .addr = &args,
365         };
366         char args_str[32] = { 0 };
367         char ring_name[32] = { 0 };
368         uint16_t port_id = RTE_MAX_ETHPORTS;
369         int ret;
370
371         /* do some parameter checking */
372         if (rx_queues == NULL && nb_rx_queues > 0) {
373                 rte_errno = EINVAL;
374                 return -1;
375         }
376         if (tx_queues == NULL && nb_tx_queues > 0) {
377                 rte_errno = EINVAL;
378                 return -1;
379         }
380         if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
381                 rte_errno = EINVAL;
382                 return -1;
383         }
384
385         snprintf(args_str, 32, "%s=%p", ETH_RING_INTERNAL_ARG, &args);
386         snprintf(ring_name, 32, "net_ring_%s", name);
387
388         ret = rte_vdev_init(ring_name, args_str);
389         if (ret) {
390                 rte_errno = EINVAL;
391                 return -1;
392         }
393
394         rte_eth_dev_get_port_by_name(ring_name, &port_id);
395
396         return port_id;
397 }
398
399 int
400 rte_eth_from_ring(struct rte_ring *r)
401 {
402         return rte_eth_from_rings(r->name, &r, 1, &r, 1,
403                         r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
404 }
405
406 static int
407 eth_dev_ring_create(const char *name, const unsigned numa_node,
408                 enum dev_action action, struct rte_eth_dev **eth_dev)
409 {
410         /* rx and tx are so-called from point of view of first port.
411          * They are inverted from the point of view of second port
412          */
413         struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
414         unsigned i;
415         char rng_name[RTE_RING_NAMESIZE];
416         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
417                         RTE_PMD_RING_MAX_TX_RINGS);
418
419         for (i = 0; i < num_rings; i++) {
420                 snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
421                 rxtx[i] = (action == DEV_CREATE) ?
422                                 rte_ring_create(rng_name, 1024, numa_node,
423                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
424                                 rte_ring_lookup(rng_name);
425                 if (rxtx[i] == NULL)
426                         return -1;
427         }
428
429         if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings,
430                 numa_node, action, eth_dev) < 0)
431                 return -1;
432
433         return 0;
434 }
435
436 struct node_action_pair {
437         char name[PATH_MAX];
438         unsigned node;
439         enum dev_action action;
440 };
441
442 struct node_action_list {
443         unsigned total;
444         unsigned count;
445         struct node_action_pair *list;
446 };
447
448 static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
449 {
450         struct node_action_list *info = data;
451         int ret;
452         char *name;
453         char *action;
454         char *node;
455         char *end;
456
457         name = strdup(value);
458
459         ret = -EINVAL;
460
461         if (!name) {
462                 RTE_LOG(WARNING, PMD, "command line parameter is empty for ring pmd!\n");
463                 goto out;
464         }
465
466         node = strchr(name, ':');
467         if (!node) {
468                 RTE_LOG(WARNING, PMD, "could not parse node value from %s\n",
469                         name);
470                 goto out;
471         }
472
473         *node = '\0';
474         node++;
475
476         action = strchr(node, ':');
477         if (!action) {
478                 RTE_LOG(WARNING, PMD, "could not parse action value from %s\n",
479                         node);
480                 goto out;
481         }
482
483         *action = '\0';
484         action++;
485
486         /*
487          * Need to do some sanity checking here
488          */
489
490         if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
491                 info->list[info->count].action = DEV_ATTACH;
492         else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
493                 info->list[info->count].action = DEV_CREATE;
494         else
495                 goto out;
496
497         errno = 0;
498         info->list[info->count].node = strtol(node, &end, 10);
499
500         if ((errno != 0) || (*end != '\0')) {
501                 RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
502                 goto out;
503         }
504
505         snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
506
507         info->count++;
508
509         ret = 0;
510 out:
511         free(name);
512         return ret;
513 }
514
515 static int
516 parse_internal_args(const char *key __rte_unused, const char *value,
517                 void *data)
518 {
519         struct ring_internal_args **internal_args = data;
520         void *args;
521
522         sscanf(value, "%p", &args);
523
524         *internal_args = args;
525
526         if ((*internal_args)->addr != args)
527                 return -1;
528
529         return 0;
530 }
531
532 static int
533 rte_pmd_ring_probe(struct rte_vdev_device *dev)
534 {
535         const char *name, *params;
536         struct rte_kvargs *kvlist = NULL;
537         int ret = 0;
538         struct node_action_list *info = NULL;
539         struct rte_eth_dev *eth_dev = NULL;
540         struct ring_internal_args *internal_args;
541
542         name = rte_vdev_device_name(dev);
543         params = rte_vdev_device_args(dev);
544
545         RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
546
547         if (params == NULL || params[0] == '\0') {
548                 ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE,
549                                 &eth_dev);
550                 if (ret == -1) {
551                         RTE_LOG(INFO, PMD,
552                                 "Attach to pmd_ring for %s\n", name);
553                         ret = eth_dev_ring_create(name, rte_socket_id(),
554                                                   DEV_ATTACH, &eth_dev);
555                 }
556         } else {
557                 kvlist = rte_kvargs_parse(params, valid_arguments);
558
559                 if (!kvlist) {
560                         RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
561                                         " rings-backed ethernet device\n");
562                         ret = eth_dev_ring_create(name, rte_socket_id(),
563                                                   DEV_CREATE, &eth_dev);
564                         if (ret == -1) {
565                                 RTE_LOG(INFO, PMD,
566                                         "Attach to pmd_ring for %s\n",
567                                         name);
568                                 ret = eth_dev_ring_create(name, rte_socket_id(),
569                                                           DEV_ATTACH, &eth_dev);
570                         }
571
572                         if (eth_dev)
573                                 eth_dev->device = &dev->device;
574
575                         return ret;
576                 }
577
578                 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
579                         ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
580                                                  parse_internal_args,
581                                                  &internal_args);
582                         if (ret < 0)
583                                 goto out_free;
584
585                         ret = do_eth_dev_ring_create(name,
586                                 internal_args->rx_queues,
587                                 internal_args->nb_rx_queues,
588                                 internal_args->tx_queues,
589                                 internal_args->nb_tx_queues,
590                                 internal_args->numa_node,
591                                 DEV_ATTACH,
592                                 &eth_dev);
593                         if (ret >= 0)
594                                 ret = 0;
595                 } else {
596                         ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
597                         info = rte_zmalloc("struct node_action_list",
598                                            sizeof(struct node_action_list) +
599                                            (sizeof(struct node_action_pair) * ret),
600                                            0);
601                         if (!info)
602                                 goto out_free;
603
604                         info->total = ret;
605                         info->list = (struct node_action_pair*)(info + 1);
606
607                         ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
608                                                  parse_kvlist, info);
609
610                         if (ret < 0)
611                                 goto out_free;
612
613                         for (info->count = 0; info->count < info->total; info->count++) {
614                                 ret = eth_dev_ring_create(info->list[info->count].name,
615                                                           info->list[info->count].node,
616                                                           info->list[info->count].action,
617                                                           &eth_dev);
618                                 if ((ret == -1) &&
619                                     (info->list[info->count].action == DEV_CREATE)) {
620                                         RTE_LOG(INFO, PMD,
621                                                 "Attach to pmd_ring for %s\n",
622                                                 name);
623                                         ret = eth_dev_ring_create(name,
624                                                         info->list[info->count].node,
625                                                         DEV_ATTACH,
626                                                         &eth_dev);
627                                 }
628                         }
629                 }
630         }
631
632         if (eth_dev)
633                 eth_dev->device = &dev->device;
634
635 out_free:
636         rte_kvargs_free(kvlist);
637         rte_free(info);
638         return ret;
639 }
640
641 static int
642 rte_pmd_ring_remove(struct rte_vdev_device *dev)
643 {
644         const char *name = rte_vdev_device_name(dev);
645         struct rte_eth_dev *eth_dev = NULL;
646         struct pmd_internals *internals = NULL;
647         struct ring_queue *r = NULL;
648         uint16_t i;
649
650         RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name);
651
652         if (name == NULL)
653                 return -EINVAL;
654
655         /* find an ethdev entry */
656         eth_dev = rte_eth_dev_allocated(name);
657         if (eth_dev == NULL)
658                 return -ENODEV;
659
660         eth_dev_stop(eth_dev);
661
662         internals = eth_dev->data->dev_private;
663         if (internals->action == DEV_CREATE) {
664                 /*
665                  * it is only necessary to delete the rings in rx_queues because
666                  * they are the same used in tx_queues
667                  */
668                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
669                         r = eth_dev->data->rx_queues[i];
670                         rte_ring_free(r->rng);
671                 }
672         }
673
674         rte_free(eth_dev->data->rx_queues);
675         rte_free(eth_dev->data->tx_queues);
676         rte_free(eth_dev->data->dev_private);
677
678         rte_free(eth_dev->data);
679
680         rte_eth_dev_release_port(eth_dev);
681         return 0;
682 }
683
684 static struct rte_vdev_driver pmd_ring_drv = {
685         .probe = rte_pmd_ring_probe,
686         .remove = rte_pmd_ring_remove,
687 };
688
689 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
690 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
691 RTE_PMD_REGISTER_PARAM_STRING(net_ring,
692         ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");