abf3ec7536bfd38aa4b87b35c92cda5078b79b68
[deb_dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42
43 #define ETH_NULL_PACKET_SIZE_ARG        "size"
44 #define ETH_NULL_PACKET_COPY_ARG        "copy"
45
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48
49 static const char *valid_arguments[] = {
50         ETH_NULL_PACKET_SIZE_ARG,
51         ETH_NULL_PACKET_COPY_ARG,
52         "driver",
53         NULL
54 };
55
56 struct pmd_internals;
57
58 struct null_queue {
59         struct pmd_internals *internals;
60
61         struct rte_mempool *mb_pool;
62         struct rte_mbuf *dummy_packet;
63
64         rte_atomic64_t rx_pkts;
65         rte_atomic64_t tx_pkts;
66         rte_atomic64_t err_pkts;
67 };
68
69 struct pmd_internals {
70         unsigned packet_size;
71         unsigned packet_copy;
72         uint8_t port_id;
73
74         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76
77         /** Bit mask of RSS offloads, the bit offset also means flow type */
78         uint64_t flow_type_rss_offloads;
79
80         rte_spinlock_t rss_lock;
81
82         uint16_t reta_size;
83         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84                         RTE_RETA_GROUP_SIZE];
85
86         uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88
89
90 static struct ether_addr eth_addr = { .addr_bytes = {0} };
91 static struct rte_eth_link pmd_link = {
92         .link_speed = ETH_SPEED_NUM_10G,
93         .link_duplex = ETH_LINK_FULL_DUPLEX,
94         .link_status = ETH_LINK_DOWN,
95         .link_autoneg = ETH_LINK_SPEED_AUTONEG,
96 };
97
98 static uint16_t
99 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
100 {
101         int i;
102         struct null_queue *h = q;
103         unsigned packet_size;
104
105         if ((q == NULL) || (bufs == NULL))
106                 return 0;
107
108         packet_size = h->internals->packet_size;
109         for (i = 0; i < nb_bufs; i++) {
110                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
111                 if (!bufs[i])
112                         break;
113                 bufs[i]->data_len = (uint16_t)packet_size;
114                 bufs[i]->pkt_len = packet_size;
115                 bufs[i]->port = h->internals->port_id;
116         }
117
118         rte_atomic64_add(&(h->rx_pkts), i);
119
120         return i;
121 }
122
123 static uint16_t
124 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
125 {
126         int i;
127         struct null_queue *h = q;
128         unsigned packet_size;
129
130         if ((q == NULL) || (bufs == NULL))
131                 return 0;
132
133         packet_size = h->internals->packet_size;
134         for (i = 0; i < nb_bufs; i++) {
135                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
136                 if (!bufs[i])
137                         break;
138                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
139                                         packet_size);
140                 bufs[i]->data_len = (uint16_t)packet_size;
141                 bufs[i]->pkt_len = packet_size;
142                 bufs[i]->nb_segs = 1;
143                 bufs[i]->next = NULL;
144                 bufs[i]->port = h->internals->port_id;
145         }
146
147         rte_atomic64_add(&(h->rx_pkts), i);
148
149         return i;
150 }
151
152 static uint16_t
153 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
154 {
155         int i;
156         struct null_queue *h = q;
157
158         if ((q == NULL) || (bufs == NULL))
159                 return 0;
160
161         for (i = 0; i < nb_bufs; i++)
162                 rte_pktmbuf_free(bufs[i]);
163
164         rte_atomic64_add(&(h->tx_pkts), i);
165
166         return i;
167 }
168
169 static uint16_t
170 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
171 {
172         int i;
173         struct null_queue *h = q;
174         unsigned packet_size;
175
176         if ((q == NULL) || (bufs == NULL))
177                 return 0;
178
179         packet_size = h->internals->packet_size;
180         for (i = 0; i < nb_bufs; i++) {
181                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
182                                         packet_size);
183                 rte_pktmbuf_free(bufs[i]);
184         }
185
186         rte_atomic64_add(&(h->tx_pkts), i);
187
188         return i;
189 }
190
191 static int
192 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
193 {
194         return 0;
195 }
196
197 static int
198 eth_dev_start(struct rte_eth_dev *dev)
199 {
200         if (dev == NULL)
201                 return -EINVAL;
202
203         dev->data->dev_link.link_status = ETH_LINK_UP;
204         return 0;
205 }
206
207 static void
208 eth_dev_stop(struct rte_eth_dev *dev)
209 {
210         if (dev == NULL)
211                 return;
212
213         dev->data->dev_link.link_status = ETH_LINK_DOWN;
214 }
215
216 static int
217 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
218                 uint16_t nb_rx_desc __rte_unused,
219                 unsigned int socket_id __rte_unused,
220                 const struct rte_eth_rxconf *rx_conf __rte_unused,
221                 struct rte_mempool *mb_pool)
222 {
223         struct rte_mbuf *dummy_packet;
224         struct pmd_internals *internals;
225         unsigned packet_size;
226
227         if ((dev == NULL) || (mb_pool == NULL))
228                 return -EINVAL;
229
230         internals = dev->data->dev_private;
231
232         if (rx_queue_id >= dev->data->nb_rx_queues)
233                 return -ENODEV;
234
235         packet_size = internals->packet_size;
236
237         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
238         dev->data->rx_queues[rx_queue_id] =
239                 &internals->rx_null_queues[rx_queue_id];
240         dummy_packet = rte_zmalloc_socket(NULL,
241                         packet_size, 0, dev->data->numa_node);
242         if (dummy_packet == NULL)
243                 return -ENOMEM;
244
245         internals->rx_null_queues[rx_queue_id].internals = internals;
246         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
247
248         return 0;
249 }
250
251 static int
252 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
253                 uint16_t nb_tx_desc __rte_unused,
254                 unsigned int socket_id __rte_unused,
255                 const struct rte_eth_txconf *tx_conf __rte_unused)
256 {
257         struct rte_mbuf *dummy_packet;
258         struct pmd_internals *internals;
259         unsigned packet_size;
260
261         if (dev == NULL)
262                 return -EINVAL;
263
264         internals = dev->data->dev_private;
265
266         if (tx_queue_id >= dev->data->nb_tx_queues)
267                 return -ENODEV;
268
269         packet_size = internals->packet_size;
270
271         dev->data->tx_queues[tx_queue_id] =
272                 &internals->tx_null_queues[tx_queue_id];
273         dummy_packet = rte_zmalloc_socket(NULL,
274                         packet_size, 0, dev->data->numa_node);
275         if (dummy_packet == NULL)
276                 return -ENOMEM;
277
278         internals->tx_null_queues[tx_queue_id].internals = internals;
279         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
280
281         return 0;
282 }
283
284
285 static void
286 eth_dev_info(struct rte_eth_dev *dev,
287                 struct rte_eth_dev_info *dev_info)
288 {
289         struct pmd_internals *internals;
290
291         if ((dev == NULL) || (dev_info == NULL))
292                 return;
293
294         internals = dev->data->dev_private;
295         dev_info->max_mac_addrs = 1;
296         dev_info->max_rx_pktlen = (uint32_t)-1;
297         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
298         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
299         dev_info->min_rx_bufsize = 0;
300         dev_info->reta_size = internals->reta_size;
301         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
302 }
303
304 static void
305 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
306 {
307         unsigned i, num_stats;
308         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
309         const struct pmd_internals *internal;
310
311         if ((dev == NULL) || (igb_stats == NULL))
312                 return;
313
314         internal = dev->data->dev_private;
315         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
316                         RTE_MIN(dev->data->nb_rx_queues,
317                                 RTE_DIM(internal->rx_null_queues)));
318         for (i = 0; i < num_stats; i++) {
319                 igb_stats->q_ipackets[i] =
320                         internal->rx_null_queues[i].rx_pkts.cnt;
321                 rx_total += igb_stats->q_ipackets[i];
322         }
323
324         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
325                         RTE_MIN(dev->data->nb_tx_queues,
326                                 RTE_DIM(internal->tx_null_queues)));
327         for (i = 0; i < num_stats; i++) {
328                 igb_stats->q_opackets[i] =
329                         internal->tx_null_queues[i].tx_pkts.cnt;
330                 igb_stats->q_errors[i] =
331                         internal->tx_null_queues[i].err_pkts.cnt;
332                 tx_total += igb_stats->q_opackets[i];
333                 tx_err_total += igb_stats->q_errors[i];
334         }
335
336         igb_stats->ipackets = rx_total;
337         igb_stats->opackets = tx_total;
338         igb_stats->oerrors = tx_err_total;
339 }
340
341 static void
342 eth_stats_reset(struct rte_eth_dev *dev)
343 {
344         unsigned i;
345         struct pmd_internals *internal;
346
347         if (dev == NULL)
348                 return;
349
350         internal = dev->data->dev_private;
351         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
352                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
353         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
354                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
355                 internal->tx_null_queues[i].err_pkts.cnt = 0;
356         }
357 }
358
359 static void
360 eth_queue_release(void *q)
361 {
362         struct null_queue *nq;
363
364         if (q == NULL)
365                 return;
366
367         nq = q;
368         rte_free(nq->dummy_packet);
369 }
370
371 static int
372 eth_link_update(struct rte_eth_dev *dev __rte_unused,
373                 int wait_to_complete __rte_unused) { return 0; }
374
375 static int
376 eth_rss_reta_update(struct rte_eth_dev *dev,
377                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
378 {
379         int i, j;
380         struct pmd_internals *internal = dev->data->dev_private;
381
382         if (reta_size != internal->reta_size)
383                 return -EINVAL;
384
385         rte_spinlock_lock(&internal->rss_lock);
386
387         /* Copy RETA table */
388         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
389                 internal->reta_conf[i].mask = reta_conf[i].mask;
390                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
391                         if ((reta_conf[i].mask >> j) & 0x01)
392                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
393         }
394
395         rte_spinlock_unlock(&internal->rss_lock);
396
397         return 0;
398 }
399
400 static int
401 eth_rss_reta_query(struct rte_eth_dev *dev,
402                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
403 {
404         int i, j;
405         struct pmd_internals *internal = dev->data->dev_private;
406
407         if (reta_size != internal->reta_size)
408                 return -EINVAL;
409
410         rte_spinlock_lock(&internal->rss_lock);
411
412         /* Copy RETA table */
413         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
414                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
415                         if ((reta_conf[i].mask >> j) & 0x01)
416                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
417         }
418
419         rte_spinlock_unlock(&internal->rss_lock);
420
421         return 0;
422 }
423
424 static int
425 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
426 {
427         struct pmd_internals *internal = dev->data->dev_private;
428
429         rte_spinlock_lock(&internal->rss_lock);
430
431         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
432                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
433                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
434
435         if (rss_conf->rss_key)
436                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
437
438         rte_spinlock_unlock(&internal->rss_lock);
439
440         return 0;
441 }
442
443 static int
444 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
445                 struct rte_eth_rss_conf *rss_conf)
446 {
447         struct pmd_internals *internal = dev->data->dev_private;
448
449         rte_spinlock_lock(&internal->rss_lock);
450
451         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
452         if (rss_conf->rss_key)
453                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
454
455         rte_spinlock_unlock(&internal->rss_lock);
456
457         return 0;
458 }
459
460 static const struct eth_dev_ops ops = {
461         .dev_start = eth_dev_start,
462         .dev_stop = eth_dev_stop,
463         .dev_configure = eth_dev_configure,
464         .dev_infos_get = eth_dev_info,
465         .rx_queue_setup = eth_rx_queue_setup,
466         .tx_queue_setup = eth_tx_queue_setup,
467         .rx_queue_release = eth_queue_release,
468         .tx_queue_release = eth_queue_release,
469         .link_update = eth_link_update,
470         .stats_get = eth_stats_get,
471         .stats_reset = eth_stats_reset,
472         .reta_update = eth_rss_reta_update,
473         .reta_query = eth_rss_reta_query,
474         .rss_hash_update = eth_rss_hash_update,
475         .rss_hash_conf_get = eth_rss_hash_conf_get
476 };
477
478 static struct rte_vdev_driver pmd_null_drv;
479
480 static int
481 eth_dev_null_create(struct rte_vdev_device *dev,
482                 unsigned packet_size,
483                 unsigned packet_copy)
484 {
485         const unsigned nb_rx_queues = 1;
486         const unsigned nb_tx_queues = 1;
487         struct rte_eth_dev_data *data = NULL;
488         struct pmd_internals *internals = NULL;
489         struct rte_eth_dev *eth_dev = NULL;
490
491         static const uint8_t default_rss_key[40] = {
492                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
493                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
494                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
495                 0xBE, 0xAC, 0x01, 0xFA
496         };
497
498         if (dev->device.numa_node == SOCKET_ID_ANY)
499                 dev->device.numa_node = rte_socket_id();
500
501         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
502                 dev->device.numa_node);
503
504         /* now do all data allocation - for eth_dev structure, dummy pci driver
505          * and internal (private) data
506          */
507         data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
508                 dev->device.numa_node);
509         if (!data)
510                 return -ENOMEM;
511
512         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
513         if (!eth_dev) {
514                 rte_free(data);
515                 return -ENOMEM;
516         }
517
518         /* now put it all together
519          * - store queue data in internals,
520          * - store numa_node info in ethdev data
521          * - point eth_dev_data to internals
522          * - and point eth_dev structure to new eth_dev_data structure
523          */
524         /* NOTE: we'll replace the data element, of originally allocated eth_dev
525          * so the nulls are local per-process */
526
527         internals = eth_dev->data->dev_private;
528         internals->packet_size = packet_size;
529         internals->packet_copy = packet_copy;
530         internals->port_id = eth_dev->data->port_id;
531
532         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
533         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
534
535         rte_memcpy(internals->rss_key, default_rss_key, 40);
536
537         rte_memcpy(data, eth_dev->data, sizeof(*data));
538         data->nb_rx_queues = (uint16_t)nb_rx_queues;
539         data->nb_tx_queues = (uint16_t)nb_tx_queues;
540         data->dev_link = pmd_link;
541         data->mac_addrs = &eth_addr;
542
543         eth_dev->data = data;
544         eth_dev->dev_ops = &ops;
545
546         data->dev_flags = RTE_ETH_DEV_DETACHABLE;
547
548         /* finally assign rx and tx ops */
549         if (packet_copy) {
550                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
551                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
552         } else {
553                 eth_dev->rx_pkt_burst = eth_null_rx;
554                 eth_dev->tx_pkt_burst = eth_null_tx;
555         }
556
557         return 0;
558 }
559
560 static inline int
561 get_packet_size_arg(const char *key __rte_unused,
562                 const char *value, void *extra_args)
563 {
564         const char *a = value;
565         unsigned *packet_size = extra_args;
566
567         if ((value == NULL) || (extra_args == NULL))
568                 return -EINVAL;
569
570         *packet_size = (unsigned)strtoul(a, NULL, 0);
571         if (*packet_size == UINT_MAX)
572                 return -1;
573
574         return 0;
575 }
576
577 static inline int
578 get_packet_copy_arg(const char *key __rte_unused,
579                 const char *value, void *extra_args)
580 {
581         const char *a = value;
582         unsigned *packet_copy = extra_args;
583
584         if ((value == NULL) || (extra_args == NULL))
585                 return -EINVAL;
586
587         *packet_copy = (unsigned)strtoul(a, NULL, 0);
588         if (*packet_copy == UINT_MAX)
589                 return -1;
590
591         return 0;
592 }
593
594 static int
595 rte_pmd_null_probe(struct rte_vdev_device *dev)
596 {
597         const char *name, *params;
598         unsigned packet_size = default_packet_size;
599         unsigned packet_copy = default_packet_copy;
600         struct rte_kvargs *kvlist = NULL;
601         int ret;
602
603         if (!dev)
604                 return -EINVAL;
605
606         name = rte_vdev_device_name(dev);
607         params = rte_vdev_device_args(dev);
608         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
609
610         if (params != NULL) {
611                 kvlist = rte_kvargs_parse(params, valid_arguments);
612                 if (kvlist == NULL)
613                         return -1;
614
615                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
616
617                         ret = rte_kvargs_process(kvlist,
618                                         ETH_NULL_PACKET_SIZE_ARG,
619                                         &get_packet_size_arg, &packet_size);
620                         if (ret < 0)
621                                 goto free_kvlist;
622                 }
623
624                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
625
626                         ret = rte_kvargs_process(kvlist,
627                                         ETH_NULL_PACKET_COPY_ARG,
628                                         &get_packet_copy_arg, &packet_copy);
629                         if (ret < 0)
630                                 goto free_kvlist;
631                 }
632         }
633
634         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
635                         "packet copy is %s\n", packet_size,
636                         packet_copy ? "enabled" : "disabled");
637
638         ret = eth_dev_null_create(dev, packet_size, packet_copy);
639
640 free_kvlist:
641         if (kvlist)
642                 rte_kvargs_free(kvlist);
643         return ret;
644 }
645
646 static int
647 rte_pmd_null_remove(struct rte_vdev_device *dev)
648 {
649         struct rte_eth_dev *eth_dev = NULL;
650
651         if (!dev)
652                 return -EINVAL;
653
654         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
655                         rte_socket_id());
656
657         /* find the ethdev entry */
658         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
659         if (eth_dev == NULL)
660                 return -1;
661
662         rte_free(eth_dev->data->dev_private);
663         rte_free(eth_dev->data);
664
665         rte_eth_dev_release_port(eth_dev);
666
667         return 0;
668 }
669
670 static struct rte_vdev_driver pmd_null_drv = {
671         .probe = rte_pmd_null_probe,
672         .remove = rte_pmd_null_remove,
673 };
674
675 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
676 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
677 RTE_PMD_REGISTER_PARAM_STRING(net_null,
678         "size=<int> "
679         "copy=<int>");