New upstream version 18.02
[deb_dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42
43 #define ETH_NULL_PACKET_SIZE_ARG        "size"
44 #define ETH_NULL_PACKET_COPY_ARG        "copy"
45
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48
49 static const char *valid_arguments[] = {
50         ETH_NULL_PACKET_SIZE_ARG,
51         ETH_NULL_PACKET_COPY_ARG,
52         NULL
53 };
54
55 struct pmd_internals;
56
57 struct null_queue {
58         struct pmd_internals *internals;
59
60         struct rte_mempool *mb_pool;
61         struct rte_mbuf *dummy_packet;
62
63         rte_atomic64_t rx_pkts;
64         rte_atomic64_t tx_pkts;
65         rte_atomic64_t err_pkts;
66 };
67
68 struct pmd_internals {
69         unsigned packet_size;
70         unsigned packet_copy;
71         uint16_t port_id;
72
73         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75
76         /** Bit mask of RSS offloads, the bit offset also means flow type */
77         uint64_t flow_type_rss_offloads;
78
79         rte_spinlock_t rss_lock;
80
81         uint16_t reta_size;
82         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
83                         RTE_RETA_GROUP_SIZE];
84
85         uint8_t rss_key[40];                /**< 40-byte hash key. */
86 };
87
88
89 static struct ether_addr eth_addr = { .addr_bytes = {0} };
90 static struct rte_eth_link pmd_link = {
91         .link_speed = ETH_SPEED_NUM_10G,
92         .link_duplex = ETH_LINK_FULL_DUPLEX,
93         .link_status = ETH_LINK_DOWN,
94         .link_autoneg = ETH_LINK_AUTONEG,
95 };
96
97 static uint16_t
98 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
99 {
100         int i;
101         struct null_queue *h = q;
102         unsigned packet_size;
103
104         if ((q == NULL) || (bufs == NULL))
105                 return 0;
106
107         packet_size = h->internals->packet_size;
108         for (i = 0; i < nb_bufs; i++) {
109                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
110                 if (!bufs[i])
111                         break;
112                 bufs[i]->data_len = (uint16_t)packet_size;
113                 bufs[i]->pkt_len = packet_size;
114                 bufs[i]->port = h->internals->port_id;
115         }
116
117         rte_atomic64_add(&(h->rx_pkts), i);
118
119         return i;
120 }
121
122 static uint16_t
123 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
124 {
125         int i;
126         struct null_queue *h = q;
127         unsigned packet_size;
128
129         if ((q == NULL) || (bufs == NULL))
130                 return 0;
131
132         packet_size = h->internals->packet_size;
133         for (i = 0; i < nb_bufs; i++) {
134                 bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
135                 if (!bufs[i])
136                         break;
137                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
138                                         packet_size);
139                 bufs[i]->data_len = (uint16_t)packet_size;
140                 bufs[i]->pkt_len = packet_size;
141                 bufs[i]->port = h->internals->port_id;
142         }
143
144         rte_atomic64_add(&(h->rx_pkts), i);
145
146         return i;
147 }
148
149 static uint16_t
150 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
151 {
152         int i;
153         struct null_queue *h = q;
154
155         if ((q == NULL) || (bufs == NULL))
156                 return 0;
157
158         for (i = 0; i < nb_bufs; i++)
159                 rte_pktmbuf_free(bufs[i]);
160
161         rte_atomic64_add(&(h->tx_pkts), i);
162
163         return i;
164 }
165
166 static uint16_t
167 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
168 {
169         int i;
170         struct null_queue *h = q;
171         unsigned packet_size;
172
173         if ((q == NULL) || (bufs == NULL))
174                 return 0;
175
176         packet_size = h->internals->packet_size;
177         for (i = 0; i < nb_bufs; i++) {
178                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
179                                         packet_size);
180                 rte_pktmbuf_free(bufs[i]);
181         }
182
183         rte_atomic64_add(&(h->tx_pkts), i);
184
185         return i;
186 }
187
188 static int
189 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
190 {
191         return 0;
192 }
193
194 static int
195 eth_dev_start(struct rte_eth_dev *dev)
196 {
197         if (dev == NULL)
198                 return -EINVAL;
199
200         dev->data->dev_link.link_status = ETH_LINK_UP;
201         return 0;
202 }
203
204 static void
205 eth_dev_stop(struct rte_eth_dev *dev)
206 {
207         if (dev == NULL)
208                 return;
209
210         dev->data->dev_link.link_status = ETH_LINK_DOWN;
211 }
212
213 static int
214 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
215                 uint16_t nb_rx_desc __rte_unused,
216                 unsigned int socket_id __rte_unused,
217                 const struct rte_eth_rxconf *rx_conf __rte_unused,
218                 struct rte_mempool *mb_pool)
219 {
220         struct rte_mbuf *dummy_packet;
221         struct pmd_internals *internals;
222         unsigned packet_size;
223
224         if ((dev == NULL) || (mb_pool == NULL))
225                 return -EINVAL;
226
227         internals = dev->data->dev_private;
228
229         if (rx_queue_id >= dev->data->nb_rx_queues)
230                 return -ENODEV;
231
232         packet_size = internals->packet_size;
233
234         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
235         dev->data->rx_queues[rx_queue_id] =
236                 &internals->rx_null_queues[rx_queue_id];
237         dummy_packet = rte_zmalloc_socket(NULL,
238                         packet_size, 0, dev->data->numa_node);
239         if (dummy_packet == NULL)
240                 return -ENOMEM;
241
242         internals->rx_null_queues[rx_queue_id].internals = internals;
243         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
244
245         return 0;
246 }
247
248 static int
249 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
250                 uint16_t nb_tx_desc __rte_unused,
251                 unsigned int socket_id __rte_unused,
252                 const struct rte_eth_txconf *tx_conf __rte_unused)
253 {
254         struct rte_mbuf *dummy_packet;
255         struct pmd_internals *internals;
256         unsigned packet_size;
257
258         if (dev == NULL)
259                 return -EINVAL;
260
261         internals = dev->data->dev_private;
262
263         if (tx_queue_id >= dev->data->nb_tx_queues)
264                 return -ENODEV;
265
266         packet_size = internals->packet_size;
267
268         dev->data->tx_queues[tx_queue_id] =
269                 &internals->tx_null_queues[tx_queue_id];
270         dummy_packet = rte_zmalloc_socket(NULL,
271                         packet_size, 0, dev->data->numa_node);
272         if (dummy_packet == NULL)
273                 return -ENOMEM;
274
275         internals->tx_null_queues[tx_queue_id].internals = internals;
276         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
277
278         return 0;
279 }
280
281 static int
282 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
283 {
284         return 0;
285 }
286
287 static void
288 eth_dev_info(struct rte_eth_dev *dev,
289                 struct rte_eth_dev_info *dev_info)
290 {
291         struct pmd_internals *internals;
292
293         if ((dev == NULL) || (dev_info == NULL))
294                 return;
295
296         internals = dev->data->dev_private;
297         dev_info->max_mac_addrs = 1;
298         dev_info->max_rx_pktlen = (uint32_t)-1;
299         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
300         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
301         dev_info->min_rx_bufsize = 0;
302         dev_info->reta_size = internals->reta_size;
303         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
304 }
305
306 static int
307 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
308 {
309         unsigned i, num_stats;
310         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
311         const struct pmd_internals *internal;
312
313         if ((dev == NULL) || (igb_stats == NULL))
314                 return -EINVAL;
315
316         internal = dev->data->dev_private;
317         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
318                         RTE_MIN(dev->data->nb_rx_queues,
319                                 RTE_DIM(internal->rx_null_queues)));
320         for (i = 0; i < num_stats; i++) {
321                 igb_stats->q_ipackets[i] =
322                         internal->rx_null_queues[i].rx_pkts.cnt;
323                 rx_total += igb_stats->q_ipackets[i];
324         }
325
326         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
327                         RTE_MIN(dev->data->nb_tx_queues,
328                                 RTE_DIM(internal->tx_null_queues)));
329         for (i = 0; i < num_stats; i++) {
330                 igb_stats->q_opackets[i] =
331                         internal->tx_null_queues[i].tx_pkts.cnt;
332                 igb_stats->q_errors[i] =
333                         internal->tx_null_queues[i].err_pkts.cnt;
334                 tx_total += igb_stats->q_opackets[i];
335                 tx_err_total += igb_stats->q_errors[i];
336         }
337
338         igb_stats->ipackets = rx_total;
339         igb_stats->opackets = tx_total;
340         igb_stats->oerrors = tx_err_total;
341
342         return 0;
343 }
344
345 static void
346 eth_stats_reset(struct rte_eth_dev *dev)
347 {
348         unsigned i;
349         struct pmd_internals *internal;
350
351         if (dev == NULL)
352                 return;
353
354         internal = dev->data->dev_private;
355         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
356                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
357         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
358                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
359                 internal->tx_null_queues[i].err_pkts.cnt = 0;
360         }
361 }
362
363 static void
364 eth_queue_release(void *q)
365 {
366         struct null_queue *nq;
367
368         if (q == NULL)
369                 return;
370
371         nq = q;
372         rte_free(nq->dummy_packet);
373 }
374
375 static int
376 eth_link_update(struct rte_eth_dev *dev __rte_unused,
377                 int wait_to_complete __rte_unused) { return 0; }
378
379 static int
380 eth_rss_reta_update(struct rte_eth_dev *dev,
381                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
382 {
383         int i, j;
384         struct pmd_internals *internal = dev->data->dev_private;
385
386         if (reta_size != internal->reta_size)
387                 return -EINVAL;
388
389         rte_spinlock_lock(&internal->rss_lock);
390
391         /* Copy RETA table */
392         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
393                 internal->reta_conf[i].mask = reta_conf[i].mask;
394                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
395                         if ((reta_conf[i].mask >> j) & 0x01)
396                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
397         }
398
399         rte_spinlock_unlock(&internal->rss_lock);
400
401         return 0;
402 }
403
404 static int
405 eth_rss_reta_query(struct rte_eth_dev *dev,
406                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
407 {
408         int i, j;
409         struct pmd_internals *internal = dev->data->dev_private;
410
411         if (reta_size != internal->reta_size)
412                 return -EINVAL;
413
414         rte_spinlock_lock(&internal->rss_lock);
415
416         /* Copy RETA table */
417         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
418                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
419                         if ((reta_conf[i].mask >> j) & 0x01)
420                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
421         }
422
423         rte_spinlock_unlock(&internal->rss_lock);
424
425         return 0;
426 }
427
428 static int
429 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
430 {
431         struct pmd_internals *internal = dev->data->dev_private;
432
433         rte_spinlock_lock(&internal->rss_lock);
434
435         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
436                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
437                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
438
439         if (rss_conf->rss_key)
440                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
441
442         rte_spinlock_unlock(&internal->rss_lock);
443
444         return 0;
445 }
446
447 static int
448 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
449                 struct rte_eth_rss_conf *rss_conf)
450 {
451         struct pmd_internals *internal = dev->data->dev_private;
452
453         rte_spinlock_lock(&internal->rss_lock);
454
455         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
456         if (rss_conf->rss_key)
457                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
458
459         rte_spinlock_unlock(&internal->rss_lock);
460
461         return 0;
462 }
463
464 static void
465 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
466                     __rte_unused struct ether_addr *addr)
467 {
468 }
469
470 static const struct eth_dev_ops ops = {
471         .dev_start = eth_dev_start,
472         .dev_stop = eth_dev_stop,
473         .dev_configure = eth_dev_configure,
474         .dev_infos_get = eth_dev_info,
475         .rx_queue_setup = eth_rx_queue_setup,
476         .tx_queue_setup = eth_tx_queue_setup,
477         .rx_queue_release = eth_queue_release,
478         .tx_queue_release = eth_queue_release,
479         .mtu_set = eth_mtu_set,
480         .link_update = eth_link_update,
481         .mac_addr_set = eth_mac_address_set,
482         .stats_get = eth_stats_get,
483         .stats_reset = eth_stats_reset,
484         .reta_update = eth_rss_reta_update,
485         .reta_query = eth_rss_reta_query,
486         .rss_hash_update = eth_rss_hash_update,
487         .rss_hash_conf_get = eth_rss_hash_conf_get
488 };
489
490 static struct rte_vdev_driver pmd_null_drv;
491
492 static int
493 eth_dev_null_create(struct rte_vdev_device *dev,
494                 unsigned packet_size,
495                 unsigned packet_copy)
496 {
497         const unsigned nb_rx_queues = 1;
498         const unsigned nb_tx_queues = 1;
499         struct rte_eth_dev_data *data = NULL;
500         struct pmd_internals *internals = NULL;
501         struct rte_eth_dev *eth_dev = NULL;
502
503         static const uint8_t default_rss_key[40] = {
504                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
505                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
506                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
507                 0xBE, 0xAC, 0x01, 0xFA
508         };
509
510         if (dev->device.numa_node == SOCKET_ID_ANY)
511                 dev->device.numa_node = rte_socket_id();
512
513         RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
514                 dev->device.numa_node);
515
516         /* now do all data allocation - for eth_dev structure, dummy pci driver
517          * and internal (private) data
518          */
519         data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
520                 dev->device.numa_node);
521         if (!data)
522                 return -ENOMEM;
523
524         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
525         if (!eth_dev) {
526                 rte_free(data);
527                 return -ENOMEM;
528         }
529
530         /* now put it all together
531          * - store queue data in internals,
532          * - store numa_node info in ethdev data
533          * - point eth_dev_data to internals
534          * - and point eth_dev structure to new eth_dev_data structure
535          */
536         /* NOTE: we'll replace the data element, of originally allocated eth_dev
537          * so the nulls are local per-process */
538
539         internals = eth_dev->data->dev_private;
540         internals->packet_size = packet_size;
541         internals->packet_copy = packet_copy;
542         internals->port_id = eth_dev->data->port_id;
543
544         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
545         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
546
547         rte_memcpy(internals->rss_key, default_rss_key, 40);
548
549         rte_memcpy(data, eth_dev->data, sizeof(*data));
550         data->nb_rx_queues = (uint16_t)nb_rx_queues;
551         data->nb_tx_queues = (uint16_t)nb_tx_queues;
552         data->dev_link = pmd_link;
553         data->mac_addrs = &eth_addr;
554
555         eth_dev->data = data;
556         eth_dev->dev_ops = &ops;
557
558         /* finally assign rx and tx ops */
559         if (packet_copy) {
560                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
561                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
562         } else {
563                 eth_dev->rx_pkt_burst = eth_null_rx;
564                 eth_dev->tx_pkt_burst = eth_null_tx;
565         }
566
567         return 0;
568 }
569
570 static inline int
571 get_packet_size_arg(const char *key __rte_unused,
572                 const char *value, void *extra_args)
573 {
574         const char *a = value;
575         unsigned *packet_size = extra_args;
576
577         if ((value == NULL) || (extra_args == NULL))
578                 return -EINVAL;
579
580         *packet_size = (unsigned)strtoul(a, NULL, 0);
581         if (*packet_size == UINT_MAX)
582                 return -1;
583
584         return 0;
585 }
586
587 static inline int
588 get_packet_copy_arg(const char *key __rte_unused,
589                 const char *value, void *extra_args)
590 {
591         const char *a = value;
592         unsigned *packet_copy = extra_args;
593
594         if ((value == NULL) || (extra_args == NULL))
595                 return -EINVAL;
596
597         *packet_copy = (unsigned)strtoul(a, NULL, 0);
598         if (*packet_copy == UINT_MAX)
599                 return -1;
600
601         return 0;
602 }
603
604 static int
605 rte_pmd_null_probe(struct rte_vdev_device *dev)
606 {
607         const char *name, *params;
608         unsigned packet_size = default_packet_size;
609         unsigned packet_copy = default_packet_copy;
610         struct rte_kvargs *kvlist = NULL;
611         int ret;
612
613         if (!dev)
614                 return -EINVAL;
615
616         name = rte_vdev_device_name(dev);
617         params = rte_vdev_device_args(dev);
618         RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
619
620         if (params != NULL) {
621                 kvlist = rte_kvargs_parse(params, valid_arguments);
622                 if (kvlist == NULL)
623                         return -1;
624
625                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
626
627                         ret = rte_kvargs_process(kvlist,
628                                         ETH_NULL_PACKET_SIZE_ARG,
629                                         &get_packet_size_arg, &packet_size);
630                         if (ret < 0)
631                                 goto free_kvlist;
632                 }
633
634                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
635
636                         ret = rte_kvargs_process(kvlist,
637                                         ETH_NULL_PACKET_COPY_ARG,
638                                         &get_packet_copy_arg, &packet_copy);
639                         if (ret < 0)
640                                 goto free_kvlist;
641                 }
642         }
643
644         RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
645                         "packet copy is %s\n", packet_size,
646                         packet_copy ? "enabled" : "disabled");
647
648         ret = eth_dev_null_create(dev, packet_size, packet_copy);
649
650 free_kvlist:
651         if (kvlist)
652                 rte_kvargs_free(kvlist);
653         return ret;
654 }
655
656 static int
657 rte_pmd_null_remove(struct rte_vdev_device *dev)
658 {
659         struct rte_eth_dev *eth_dev = NULL;
660
661         if (!dev)
662                 return -EINVAL;
663
664         RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
665                         rte_socket_id());
666
667         /* find the ethdev entry */
668         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
669         if (eth_dev == NULL)
670                 return -1;
671
672         rte_free(eth_dev->data->dev_private);
673         rte_free(eth_dev->data);
674
675         rte_eth_dev_release_port(eth_dev);
676
677         return 0;
678 }
679
680 static struct rte_vdev_driver pmd_null_drv = {
681         .probe = rte_pmd_null_probe,
682         .remove = rte_pmd_null_remove,
683 };
684
685 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
686 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
687 RTE_PMD_REGISTER_PARAM_STRING(net_null,
688         "size=<int> "
689         "copy=<int>");