New upstream version 17.11.1
[deb_dpdk.git] / drivers / net / ring / rte_eth_ring.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include "rte_eth_ring.h"
35 #include <rte_mbuf.h>
36 #include <rte_ethdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_string_fns.h>
40 #include <rte_bus_vdev.h>
41 #include <rte_kvargs.h>
42 #include <rte_errno.h>
43
44 #define ETH_RING_NUMA_NODE_ACTION_ARG   "nodeaction"
45 #define ETH_RING_ACTION_CREATE          "CREATE"
46 #define ETH_RING_ACTION_ATTACH          "ATTACH"
47 #define ETH_RING_INTERNAL_ARG           "internal"
48
49 static const char *valid_arguments[] = {
50         ETH_RING_NUMA_NODE_ACTION_ARG,
51         ETH_RING_INTERNAL_ARG,
52         NULL
53 };
54
55 struct ring_internal_args {
56         struct rte_ring * const *rx_queues;
57         const unsigned int nb_rx_queues;
58         struct rte_ring * const *tx_queues;
59         const unsigned int nb_tx_queues;
60         const unsigned int numa_node;
61         void *addr; /* self addr for sanity check */
62 };
63
64 enum dev_action {
65         DEV_CREATE,
66         DEV_ATTACH
67 };
68
69 struct ring_queue {
70         struct rte_ring *rng;
71         rte_atomic64_t rx_pkts;
72         rte_atomic64_t tx_pkts;
73         rte_atomic64_t err_pkts;
74 };
75
76 struct pmd_internals {
77         unsigned max_rx_queues;
78         unsigned max_tx_queues;
79
80         struct ring_queue rx_ring_queues[RTE_PMD_RING_MAX_RX_RINGS];
81         struct ring_queue tx_ring_queues[RTE_PMD_RING_MAX_TX_RINGS];
82
83         struct ether_addr address;
84         enum dev_action action;
85 };
86
87
88 static struct rte_eth_link pmd_link = {
89                 .link_speed = ETH_SPEED_NUM_10G,
90                 .link_duplex = ETH_LINK_FULL_DUPLEX,
91                 .link_status = ETH_LINK_DOWN,
92                 .link_autoneg = ETH_LINK_AUTONEG
93 };
94
95 static uint16_t
96 eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
97 {
98         void **ptrs = (void *)&bufs[0];
99         struct ring_queue *r = q;
100         const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
101                         ptrs, nb_bufs, NULL);
102         if (r->rng->flags & RING_F_SC_DEQ)
103                 r->rx_pkts.cnt += nb_rx;
104         else
105                 rte_atomic64_add(&(r->rx_pkts), nb_rx);
106         return nb_rx;
107 }
108
109 static uint16_t
110 eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
111 {
112         void **ptrs = (void *)&bufs[0];
113         struct ring_queue *r = q;
114         const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
115                         ptrs, nb_bufs, NULL);
116         if (r->rng->flags & RING_F_SP_ENQ) {
117                 r->tx_pkts.cnt += nb_tx;
118                 r->err_pkts.cnt += nb_bufs - nb_tx;
119         } else {
120                 rte_atomic64_add(&(r->tx_pkts), nb_tx);
121                 rte_atomic64_add(&(r->err_pkts), nb_bufs - nb_tx);
122         }
123         return nb_tx;
124 }
125
126 static int
127 eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { return 0; }
128
129 static int
130 eth_dev_start(struct rte_eth_dev *dev)
131 {
132         dev->data->dev_link.link_status = ETH_LINK_UP;
133         return 0;
134 }
135
136 static void
137 eth_dev_stop(struct rte_eth_dev *dev)
138 {
139         dev->data->dev_link.link_status = ETH_LINK_DOWN;
140 }
141
142 static int
143 eth_dev_set_link_down(struct rte_eth_dev *dev)
144 {
145         dev->data->dev_link.link_status = ETH_LINK_DOWN;
146         return 0;
147 }
148
149 static int
150 eth_dev_set_link_up(struct rte_eth_dev *dev)
151 {
152         dev->data->dev_link.link_status = ETH_LINK_UP;
153         return 0;
154 }
155
156 static int
157 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
158                                     uint16_t nb_rx_desc __rte_unused,
159                                     unsigned int socket_id __rte_unused,
160                                     const struct rte_eth_rxconf *rx_conf __rte_unused,
161                                     struct rte_mempool *mb_pool __rte_unused)
162 {
163         struct pmd_internals *internals = dev->data->dev_private;
164         dev->data->rx_queues[rx_queue_id] = &internals->rx_ring_queues[rx_queue_id];
165         return 0;
166 }
167
168 static int
169 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
170                                     uint16_t nb_tx_desc __rte_unused,
171                                     unsigned int socket_id __rte_unused,
172                                     const struct rte_eth_txconf *tx_conf __rte_unused)
173 {
174         struct pmd_internals *internals = dev->data->dev_private;
175         dev->data->tx_queues[tx_queue_id] = &internals->tx_ring_queues[tx_queue_id];
176         return 0;
177 }
178
179
180 static void
181 eth_dev_info(struct rte_eth_dev *dev,
182                 struct rte_eth_dev_info *dev_info)
183 {
184         struct pmd_internals *internals = dev->data->dev_private;
185         dev_info->max_mac_addrs = 1;
186         dev_info->max_rx_pktlen = (uint32_t)-1;
187         dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
188         dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
189         dev_info->min_rx_bufsize = 0;
190 }
191
192 static int
193 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
194 {
195         unsigned i;
196         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
197         const struct pmd_internals *internal = dev->data->dev_private;
198
199         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
200                         i < dev->data->nb_rx_queues; i++) {
201                 stats->q_ipackets[i] = internal->rx_ring_queues[i].rx_pkts.cnt;
202                 rx_total += stats->q_ipackets[i];
203         }
204
205         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
206                         i < dev->data->nb_tx_queues; i++) {
207                 stats->q_opackets[i] = internal->tx_ring_queues[i].tx_pkts.cnt;
208                 stats->q_errors[i] = internal->tx_ring_queues[i].err_pkts.cnt;
209                 tx_total += stats->q_opackets[i];
210                 tx_err_total += stats->q_errors[i];
211         }
212
213         stats->ipackets = rx_total;
214         stats->opackets = tx_total;
215         stats->oerrors = tx_err_total;
216
217         return 0;
218 }
219
220 static void
221 eth_stats_reset(struct rte_eth_dev *dev)
222 {
223         unsigned i;
224         struct pmd_internals *internal = dev->data->dev_private;
225         for (i = 0; i < dev->data->nb_rx_queues; i++)
226                 internal->rx_ring_queues[i].rx_pkts.cnt = 0;
227         for (i = 0; i < dev->data->nb_tx_queues; i++) {
228                 internal->tx_ring_queues[i].tx_pkts.cnt = 0;
229                 internal->tx_ring_queues[i].err_pkts.cnt = 0;
230         }
231 }
232
233 static void
234 eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
235         uint32_t index __rte_unused)
236 {
237 }
238
239 static int
240 eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
241         struct ether_addr *mac_addr __rte_unused,
242         uint32_t index __rte_unused,
243         uint32_t vmdq __rte_unused)
244 {
245         return 0;
246 }
247
248 static void
249 eth_queue_release(void *q __rte_unused) { ; }
250 static int
251 eth_link_update(struct rte_eth_dev *dev __rte_unused,
252                 int wait_to_complete __rte_unused) { return 0; }
253
254 static const struct eth_dev_ops ops = {
255         .dev_start = eth_dev_start,
256         .dev_stop = eth_dev_stop,
257         .dev_set_link_up = eth_dev_set_link_up,
258         .dev_set_link_down = eth_dev_set_link_down,
259         .dev_configure = eth_dev_configure,
260         .dev_infos_get = eth_dev_info,
261         .rx_queue_setup = eth_rx_queue_setup,
262         .tx_queue_setup = eth_tx_queue_setup,
263         .rx_queue_release = eth_queue_release,
264         .tx_queue_release = eth_queue_release,
265         .link_update = eth_link_update,
266         .stats_get = eth_stats_get,
267         .stats_reset = eth_stats_reset,
268         .mac_addr_remove = eth_mac_addr_remove,
269         .mac_addr_add = eth_mac_addr_add,
270 };
271
272 static struct rte_vdev_driver pmd_ring_drv;
273
274 static int
275 do_eth_dev_ring_create(const char *name,
276                 struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
277                 struct rte_ring *const tx_queues[], const unsigned nb_tx_queues,
278                 const unsigned int numa_node, enum dev_action action,
279                 struct rte_eth_dev **eth_dev_p)
280 {
281         struct rte_eth_dev_data *data = NULL;
282         struct pmd_internals *internals = NULL;
283         struct rte_eth_dev *eth_dev = NULL;
284         void **rx_queues_local = NULL;
285         void **tx_queues_local = NULL;
286         unsigned i;
287
288         RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
289                         numa_node);
290
291         /* now do all data allocation - for eth_dev structure, dummy pci driver
292          * and internal (private) data
293          */
294         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
295         if (data == NULL) {
296                 rte_errno = ENOMEM;
297                 goto error;
298         }
299
300         rx_queues_local = rte_zmalloc_socket(name,
301                         sizeof(void *) * nb_rx_queues, 0, numa_node);
302         if (rx_queues_local == NULL) {
303                 rte_errno = ENOMEM;
304                 goto error;
305         }
306
307         tx_queues_local = rte_zmalloc_socket(name,
308                         sizeof(void *) * nb_tx_queues, 0, numa_node);
309         if (tx_queues_local == NULL) {
310                 rte_errno = ENOMEM;
311                 goto error;
312         }
313
314         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
315         if (internals == NULL) {
316                 rte_errno = ENOMEM;
317                 goto error;
318         }
319
320         /* reserve an ethdev entry */
321         eth_dev = rte_eth_dev_allocate(name);
322         if (eth_dev == NULL) {
323                 rte_errno = ENOSPC;
324                 goto error;
325         }
326
327         /* now put it all together
328          * - store queue data in internals,
329          * - store numa_node info in eth_dev_data
330          * - point eth_dev_data to internals
331          * - and point eth_dev structure to new eth_dev_data structure
332          */
333         /* NOTE: we'll replace the data element, of originally allocated eth_dev
334          * so the rings are local per-process */
335
336         rte_memcpy(data, eth_dev->data, sizeof(*data));
337         data->rx_queues = rx_queues_local;
338         data->tx_queues = tx_queues_local;
339
340         internals->action = action;
341         internals->max_rx_queues = nb_rx_queues;
342         internals->max_tx_queues = nb_tx_queues;
343         for (i = 0; i < nb_rx_queues; i++) {
344                 internals->rx_ring_queues[i].rng = rx_queues[i];
345                 data->rx_queues[i] = &internals->rx_ring_queues[i];
346         }
347         for (i = 0; i < nb_tx_queues; i++) {
348                 internals->tx_ring_queues[i].rng = tx_queues[i];
349                 data->tx_queues[i] = &internals->tx_ring_queues[i];
350         }
351
352         data->dev_private = internals;
353         data->nb_rx_queues = (uint16_t)nb_rx_queues;
354         data->nb_tx_queues = (uint16_t)nb_tx_queues;
355         data->dev_link = pmd_link;
356         data->mac_addrs = &internals->address;
357
358         eth_dev->data = data;
359         eth_dev->dev_ops = &ops;
360         data->kdrv = RTE_KDRV_NONE;
361         data->numa_node = numa_node;
362
363         /* finally assign rx and tx ops */
364         eth_dev->rx_pkt_burst = eth_ring_rx;
365         eth_dev->tx_pkt_burst = eth_ring_tx;
366
367         *eth_dev_p = eth_dev;
368
369         return data->port_id;
370
371 error:
372         rte_free(rx_queues_local);
373         rte_free(tx_queues_local);
374         rte_free(data);
375         rte_free(internals);
376
377         return -1;
378 }
379
380 int
381 rte_eth_from_rings(const char *name, struct rte_ring *const rx_queues[],
382                 const unsigned nb_rx_queues,
383                 struct rte_ring *const tx_queues[],
384                 const unsigned nb_tx_queues,
385                 const unsigned numa_node)
386 {
387         struct ring_internal_args args = {
388                 .rx_queues = rx_queues,
389                 .nb_rx_queues = nb_rx_queues,
390                 .tx_queues = tx_queues,
391                 .nb_tx_queues = nb_tx_queues,
392                 .numa_node = numa_node,
393                 .addr = &args,
394         };
395         char args_str[32] = { 0 };
396         char ring_name[32] = { 0 };
397         uint16_t port_id = RTE_MAX_ETHPORTS;
398         int ret;
399
400         /* do some parameter checking */
401         if (rx_queues == NULL && nb_rx_queues > 0) {
402                 rte_errno = EINVAL;
403                 return -1;
404         }
405         if (tx_queues == NULL && nb_tx_queues > 0) {
406                 rte_errno = EINVAL;
407                 return -1;
408         }
409         if (nb_rx_queues > RTE_PMD_RING_MAX_RX_RINGS) {
410                 rte_errno = EINVAL;
411                 return -1;
412         }
413
414         snprintf(args_str, 32, "%s=%p", ETH_RING_INTERNAL_ARG, &args);
415         snprintf(ring_name, 32, "net_ring_%s", name);
416
417         ret = rte_vdev_init(ring_name, args_str);
418         if (ret) {
419                 rte_errno = EINVAL;
420                 return -1;
421         }
422
423         rte_eth_dev_get_port_by_name(ring_name, &port_id);
424
425         return port_id;
426 }
427
428 int
429 rte_eth_from_ring(struct rte_ring *r)
430 {
431         return rte_eth_from_rings(r->name, &r, 1, &r, 1,
432                         r->memzone ? r->memzone->socket_id : SOCKET_ID_ANY);
433 }
434
435 static int
436 eth_dev_ring_create(const char *name, const unsigned numa_node,
437                 enum dev_action action, struct rte_eth_dev **eth_dev)
438 {
439         /* rx and tx are so-called from point of view of first port.
440          * They are inverted from the point of view of second port
441          */
442         struct rte_ring *rxtx[RTE_PMD_RING_MAX_RX_RINGS];
443         unsigned i;
444         char rng_name[RTE_RING_NAMESIZE];
445         unsigned num_rings = RTE_MIN(RTE_PMD_RING_MAX_RX_RINGS,
446                         RTE_PMD_RING_MAX_TX_RINGS);
447
448         for (i = 0; i < num_rings; i++) {
449                 snprintf(rng_name, sizeof(rng_name), "ETH_RXTX%u_%s", i, name);
450                 rxtx[i] = (action == DEV_CREATE) ?
451                                 rte_ring_create(rng_name, 1024, numa_node,
452                                                 RING_F_SP_ENQ|RING_F_SC_DEQ) :
453                                 rte_ring_lookup(rng_name);
454                 if (rxtx[i] == NULL)
455                         return -1;
456         }
457
458         if (do_eth_dev_ring_create(name, rxtx, num_rings, rxtx, num_rings,
459                 numa_node, action, eth_dev) < 0)
460                 return -1;
461
462         return 0;
463 }
464
465 struct node_action_pair {
466         char name[PATH_MAX];
467         unsigned node;
468         enum dev_action action;
469 };
470
471 struct node_action_list {
472         unsigned total;
473         unsigned count;
474         struct node_action_pair *list;
475 };
476
477 static int parse_kvlist (const char *key __rte_unused, const char *value, void *data)
478 {
479         struct node_action_list *info = data;
480         int ret;
481         char *name;
482         char *action;
483         char *node;
484         char *end;
485
486         name = strdup(value);
487
488         ret = -EINVAL;
489
490         if (!name) {
491                 RTE_LOG(WARNING, PMD, "command line parameter is empty for ring pmd!\n");
492                 goto out;
493         }
494
495         node = strchr(name, ':');
496         if (!node) {
497                 RTE_LOG(WARNING, PMD, "could not parse node value from %s\n",
498                         name);
499                 goto out;
500         }
501
502         *node = '\0';
503         node++;
504
505         action = strchr(node, ':');
506         if (!action) {
507                 RTE_LOG(WARNING, PMD, "could not parse action value from %s\n",
508                         node);
509                 goto out;
510         }
511
512         *action = '\0';
513         action++;
514
515         /*
516          * Need to do some sanity checking here
517          */
518
519         if (strcmp(action, ETH_RING_ACTION_ATTACH) == 0)
520                 info->list[info->count].action = DEV_ATTACH;
521         else if (strcmp(action, ETH_RING_ACTION_CREATE) == 0)
522                 info->list[info->count].action = DEV_CREATE;
523         else
524                 goto out;
525
526         errno = 0;
527         info->list[info->count].node = strtol(node, &end, 10);
528
529         if ((errno != 0) || (*end != '\0')) {
530                 RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
531                 goto out;
532         }
533
534         snprintf(info->list[info->count].name, sizeof(info->list[info->count].name), "%s", name);
535
536         info->count++;
537
538         ret = 0;
539 out:
540         free(name);
541         return ret;
542 }
543
544 static int
545 parse_internal_args(const char *key __rte_unused, const char *value,
546                 void *data)
547 {
548         struct ring_internal_args **internal_args = data;
549         void *args;
550
551         sscanf(value, "%p", &args);
552
553         *internal_args = args;
554
555         if ((*internal_args)->addr != args)
556                 return -1;
557
558         return 0;
559 }
560
561 static int
562 rte_pmd_ring_probe(struct rte_vdev_device *dev)
563 {
564         const char *name, *params;
565         struct rte_kvargs *kvlist = NULL;
566         int ret = 0;
567         struct node_action_list *info = NULL;
568         struct rte_eth_dev *eth_dev = NULL;
569         struct ring_internal_args *internal_args;
570
571         name = rte_vdev_device_name(dev);
572         params = rte_vdev_device_args(dev);
573
574         RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
575
576         if (params == NULL || params[0] == '\0') {
577                 ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE,
578                                 &eth_dev);
579                 if (ret == -1) {
580                         RTE_LOG(INFO, PMD,
581                                 "Attach to pmd_ring for %s\n", name);
582                         ret = eth_dev_ring_create(name, rte_socket_id(),
583                                                   DEV_ATTACH, &eth_dev);
584                 }
585         } else {
586                 kvlist = rte_kvargs_parse(params, valid_arguments);
587
588                 if (!kvlist) {
589                         RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
590                                         " rings-backed ethernet device\n");
591                         ret = eth_dev_ring_create(name, rte_socket_id(),
592                                                   DEV_CREATE, &eth_dev);
593                         if (ret == -1) {
594                                 RTE_LOG(INFO, PMD,
595                                         "Attach to pmd_ring for %s\n",
596                                         name);
597                                 ret = eth_dev_ring_create(name, rte_socket_id(),
598                                                           DEV_ATTACH, &eth_dev);
599                         }
600
601                         if (eth_dev)
602                                 eth_dev->device = &dev->device;
603
604                         return ret;
605                 }
606
607                 if (rte_kvargs_count(kvlist, ETH_RING_INTERNAL_ARG) == 1) {
608                         ret = rte_kvargs_process(kvlist, ETH_RING_INTERNAL_ARG,
609                                                  parse_internal_args,
610                                                  &internal_args);
611                         if (ret < 0)
612                                 goto out_free;
613
614                         ret = do_eth_dev_ring_create(name,
615                                 internal_args->rx_queues,
616                                 internal_args->nb_rx_queues,
617                                 internal_args->tx_queues,
618                                 internal_args->nb_tx_queues,
619                                 internal_args->numa_node,
620                                 DEV_ATTACH,
621                                 &eth_dev);
622                         if (ret >= 0)
623                                 ret = 0;
624                 } else {
625                         ret = rte_kvargs_count(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG);
626                         info = rte_zmalloc("struct node_action_list",
627                                            sizeof(struct node_action_list) +
628                                            (sizeof(struct node_action_pair) * ret),
629                                            0);
630                         if (!info)
631                                 goto out_free;
632
633                         info->total = ret;
634                         info->list = (struct node_action_pair*)(info + 1);
635
636                         ret = rte_kvargs_process(kvlist, ETH_RING_NUMA_NODE_ACTION_ARG,
637                                                  parse_kvlist, info);
638
639                         if (ret < 0)
640                                 goto out_free;
641
642                         for (info->count = 0; info->count < info->total; info->count++) {
643                                 ret = eth_dev_ring_create(info->list[info->count].name,
644                                                           info->list[info->count].node,
645                                                           info->list[info->count].action,
646                                                           &eth_dev);
647                                 if ((ret == -1) &&
648                                     (info->list[info->count].action == DEV_CREATE)) {
649                                         RTE_LOG(INFO, PMD,
650                                                 "Attach to pmd_ring for %s\n",
651                                                 name);
652                                         ret = eth_dev_ring_create(name,
653                                                         info->list[info->count].node,
654                                                         DEV_ATTACH,
655                                                         &eth_dev);
656                                 }
657                         }
658                 }
659         }
660
661         if (eth_dev)
662                 eth_dev->device = &dev->device;
663
664 out_free:
665         rte_kvargs_free(kvlist);
666         rte_free(info);
667         return ret;
668 }
669
670 static int
671 rte_pmd_ring_remove(struct rte_vdev_device *dev)
672 {
673         const char *name = rte_vdev_device_name(dev);
674         struct rte_eth_dev *eth_dev = NULL;
675         struct pmd_internals *internals = NULL;
676         struct ring_queue *r = NULL;
677         uint16_t i;
678
679         RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name);
680
681         if (name == NULL)
682                 return -EINVAL;
683
684         /* find an ethdev entry */
685         eth_dev = rte_eth_dev_allocated(name);
686         if (eth_dev == NULL)
687                 return -ENODEV;
688
689         eth_dev_stop(eth_dev);
690
691         internals = eth_dev->data->dev_private;
692         if (internals->action == DEV_CREATE) {
693                 /*
694                  * it is only necessary to delete the rings in rx_queues because
695                  * they are the same used in tx_queues
696                  */
697                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
698                         r = eth_dev->data->rx_queues[i];
699                         rte_ring_free(r->rng);
700                 }
701         }
702
703         rte_free(eth_dev->data->rx_queues);
704         rte_free(eth_dev->data->tx_queues);
705         rte_free(eth_dev->data->dev_private);
706
707         rte_free(eth_dev->data);
708
709         rte_eth_dev_release_port(eth_dev);
710         return 0;
711 }
712
713 static struct rte_vdev_driver pmd_ring_drv = {
714         .probe = rte_pmd_ring_probe,
715         .remove = rte_pmd_ring_remove,
716 };
717
718 RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
719 RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
720 RTE_PMD_REGISTER_PARAM_STRING(net_ring,
721         ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");