New upstream version 18.08
[deb_dpdk.git] / drivers / net / null / rte_eth_null.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (C) IGEL Co.,Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_ethdev_vdev.h>
37 #include <rte_malloc.h>
38 #include <rte_memcpy.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_kvargs.h>
41 #include <rte_spinlock.h>
42
43 #define ETH_NULL_PACKET_SIZE_ARG        "size"
44 #define ETH_NULL_PACKET_COPY_ARG        "copy"
45
46 static unsigned default_packet_size = 64;
47 static unsigned default_packet_copy;
48
49 static const char *valid_arguments[] = {
50         ETH_NULL_PACKET_SIZE_ARG,
51         ETH_NULL_PACKET_COPY_ARG,
52         NULL
53 };
54
55 struct pmd_internals;
56
57 struct null_queue {
58         struct pmd_internals *internals;
59
60         struct rte_mempool *mb_pool;
61         struct rte_mbuf *dummy_packet;
62
63         rte_atomic64_t rx_pkts;
64         rte_atomic64_t tx_pkts;
65         rte_atomic64_t err_pkts;
66 };
67
68 struct pmd_internals {
69         unsigned packet_size;
70         unsigned packet_copy;
71         uint16_t port_id;
72
73         struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
74         struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75
76         struct ether_addr eth_addr;
77         /** Bit mask of RSS offloads, the bit offset also means flow type */
78         uint64_t flow_type_rss_offloads;
79
80         rte_spinlock_t rss_lock;
81
82         uint16_t reta_size;
83         struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84                         RTE_RETA_GROUP_SIZE];
85
86         uint8_t rss_key[40];                /**< 40-byte hash key. */
87 };
88 static struct rte_eth_link pmd_link = {
89         .link_speed = ETH_SPEED_NUM_10G,
90         .link_duplex = ETH_LINK_FULL_DUPLEX,
91         .link_status = ETH_LINK_DOWN,
92         .link_autoneg = ETH_LINK_FIXED,
93 };
94
95 static int eth_null_logtype;
96
97 #define PMD_LOG(level, fmt, args...) \
98         rte_log(RTE_LOG_ ## level, eth_null_logtype, \
99                 "%s(): " fmt "\n", __func__, ##args)
100
101 static uint16_t
102 eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
103 {
104         int i;
105         struct null_queue *h = q;
106         unsigned packet_size;
107
108         if ((q == NULL) || (bufs == NULL))
109                 return 0;
110
111         packet_size = h->internals->packet_size;
112         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
113                 return 0;
114
115         for (i = 0; i < nb_bufs; i++) {
116                 bufs[i]->data_len = (uint16_t)packet_size;
117                 bufs[i]->pkt_len = packet_size;
118                 bufs[i]->port = h->internals->port_id;
119         }
120
121         rte_atomic64_add(&(h->rx_pkts), i);
122
123         return i;
124 }
125
126 static uint16_t
127 eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
128 {
129         int i;
130         struct null_queue *h = q;
131         unsigned packet_size;
132
133         if ((q == NULL) || (bufs == NULL))
134                 return 0;
135
136         packet_size = h->internals->packet_size;
137         if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
138                 return 0;
139
140         for (i = 0; i < nb_bufs; i++) {
141                 rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
142                                         packet_size);
143                 bufs[i]->data_len = (uint16_t)packet_size;
144                 bufs[i]->pkt_len = packet_size;
145                 bufs[i]->port = h->internals->port_id;
146         }
147
148         rte_atomic64_add(&(h->rx_pkts), i);
149
150         return i;
151 }
152
153 static uint16_t
154 eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
155 {
156         int i;
157         struct null_queue *h = q;
158
159         if ((q == NULL) || (bufs == NULL))
160                 return 0;
161
162         for (i = 0; i < nb_bufs; i++)
163                 rte_pktmbuf_free(bufs[i]);
164
165         rte_atomic64_add(&(h->tx_pkts), i);
166
167         return i;
168 }
169
170 static uint16_t
171 eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
172 {
173         int i;
174         struct null_queue *h = q;
175         unsigned packet_size;
176
177         if ((q == NULL) || (bufs == NULL))
178                 return 0;
179
180         packet_size = h->internals->packet_size;
181         for (i = 0; i < nb_bufs; i++) {
182                 rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
183                                         packet_size);
184                 rte_pktmbuf_free(bufs[i]);
185         }
186
187         rte_atomic64_add(&(h->tx_pkts), i);
188
189         return i;
190 }
191
192 static int
193 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
194 {
195         return 0;
196 }
197
198 static int
199 eth_dev_start(struct rte_eth_dev *dev)
200 {
201         if (dev == NULL)
202                 return -EINVAL;
203
204         dev->data->dev_link.link_status = ETH_LINK_UP;
205         return 0;
206 }
207
208 static void
209 eth_dev_stop(struct rte_eth_dev *dev)
210 {
211         if (dev == NULL)
212                 return;
213
214         dev->data->dev_link.link_status = ETH_LINK_DOWN;
215 }
216
217 static int
218 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
219                 uint16_t nb_rx_desc __rte_unused,
220                 unsigned int socket_id __rte_unused,
221                 const struct rte_eth_rxconf *rx_conf __rte_unused,
222                 struct rte_mempool *mb_pool)
223 {
224         struct rte_mbuf *dummy_packet;
225         struct pmd_internals *internals;
226         unsigned packet_size;
227
228         if ((dev == NULL) || (mb_pool == NULL))
229                 return -EINVAL;
230
231         internals = dev->data->dev_private;
232
233         if (rx_queue_id >= dev->data->nb_rx_queues)
234                 return -ENODEV;
235
236         packet_size = internals->packet_size;
237
238         internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
239         dev->data->rx_queues[rx_queue_id] =
240                 &internals->rx_null_queues[rx_queue_id];
241         dummy_packet = rte_zmalloc_socket(NULL,
242                         packet_size, 0, dev->data->numa_node);
243         if (dummy_packet == NULL)
244                 return -ENOMEM;
245
246         internals->rx_null_queues[rx_queue_id].internals = internals;
247         internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
248
249         return 0;
250 }
251
252 static int
253 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
254                 uint16_t nb_tx_desc __rte_unused,
255                 unsigned int socket_id __rte_unused,
256                 const struct rte_eth_txconf *tx_conf __rte_unused)
257 {
258         struct rte_mbuf *dummy_packet;
259         struct pmd_internals *internals;
260         unsigned packet_size;
261
262         if (dev == NULL)
263                 return -EINVAL;
264
265         internals = dev->data->dev_private;
266
267         if (tx_queue_id >= dev->data->nb_tx_queues)
268                 return -ENODEV;
269
270         packet_size = internals->packet_size;
271
272         dev->data->tx_queues[tx_queue_id] =
273                 &internals->tx_null_queues[tx_queue_id];
274         dummy_packet = rte_zmalloc_socket(NULL,
275                         packet_size, 0, dev->data->numa_node);
276         if (dummy_packet == NULL)
277                 return -ENOMEM;
278
279         internals->tx_null_queues[tx_queue_id].internals = internals;
280         internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
281
282         return 0;
283 }
284
285 static int
286 eth_mtu_set(struct rte_eth_dev *dev __rte_unused, uint16_t mtu __rte_unused)
287 {
288         return 0;
289 }
290
291 static void
292 eth_dev_info(struct rte_eth_dev *dev,
293                 struct rte_eth_dev_info *dev_info)
294 {
295         struct pmd_internals *internals;
296
297         if ((dev == NULL) || (dev_info == NULL))
298                 return;
299
300         internals = dev->data->dev_private;
301         dev_info->max_mac_addrs = 1;
302         dev_info->max_rx_pktlen = (uint32_t)-1;
303         dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
304         dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
305         dev_info->min_rx_bufsize = 0;
306         dev_info->reta_size = internals->reta_size;
307         dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
308         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
309 }
310
311 static int
312 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
313 {
314         unsigned i, num_stats;
315         unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
316         const struct pmd_internals *internal;
317
318         if ((dev == NULL) || (igb_stats == NULL))
319                 return -EINVAL;
320
321         internal = dev->data->dev_private;
322         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
323                         RTE_MIN(dev->data->nb_rx_queues,
324                                 RTE_DIM(internal->rx_null_queues)));
325         for (i = 0; i < num_stats; i++) {
326                 igb_stats->q_ipackets[i] =
327                         internal->rx_null_queues[i].rx_pkts.cnt;
328                 rx_total += igb_stats->q_ipackets[i];
329         }
330
331         num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
332                         RTE_MIN(dev->data->nb_tx_queues,
333                                 RTE_DIM(internal->tx_null_queues)));
334         for (i = 0; i < num_stats; i++) {
335                 igb_stats->q_opackets[i] =
336                         internal->tx_null_queues[i].tx_pkts.cnt;
337                 igb_stats->q_errors[i] =
338                         internal->tx_null_queues[i].err_pkts.cnt;
339                 tx_total += igb_stats->q_opackets[i];
340                 tx_err_total += igb_stats->q_errors[i];
341         }
342
343         igb_stats->ipackets = rx_total;
344         igb_stats->opackets = tx_total;
345         igb_stats->oerrors = tx_err_total;
346
347         return 0;
348 }
349
350 static void
351 eth_stats_reset(struct rte_eth_dev *dev)
352 {
353         unsigned i;
354         struct pmd_internals *internal;
355
356         if (dev == NULL)
357                 return;
358
359         internal = dev->data->dev_private;
360         for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
361                 internal->rx_null_queues[i].rx_pkts.cnt = 0;
362         for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
363                 internal->tx_null_queues[i].tx_pkts.cnt = 0;
364                 internal->tx_null_queues[i].err_pkts.cnt = 0;
365         }
366 }
367
368 static void
369 eth_queue_release(void *q)
370 {
371         struct null_queue *nq;
372
373         if (q == NULL)
374                 return;
375
376         nq = q;
377         rte_free(nq->dummy_packet);
378 }
379
380 static int
381 eth_link_update(struct rte_eth_dev *dev __rte_unused,
382                 int wait_to_complete __rte_unused) { return 0; }
383
384 static int
385 eth_rss_reta_update(struct rte_eth_dev *dev,
386                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
387 {
388         int i, j;
389         struct pmd_internals *internal = dev->data->dev_private;
390
391         if (reta_size != internal->reta_size)
392                 return -EINVAL;
393
394         rte_spinlock_lock(&internal->rss_lock);
395
396         /* Copy RETA table */
397         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
398                 internal->reta_conf[i].mask = reta_conf[i].mask;
399                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
400                         if ((reta_conf[i].mask >> j) & 0x01)
401                                 internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
402         }
403
404         rte_spinlock_unlock(&internal->rss_lock);
405
406         return 0;
407 }
408
409 static int
410 eth_rss_reta_query(struct rte_eth_dev *dev,
411                 struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
412 {
413         int i, j;
414         struct pmd_internals *internal = dev->data->dev_private;
415
416         if (reta_size != internal->reta_size)
417                 return -EINVAL;
418
419         rte_spinlock_lock(&internal->rss_lock);
420
421         /* Copy RETA table */
422         for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
423                 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
424                         if ((reta_conf[i].mask >> j) & 0x01)
425                                 reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
426         }
427
428         rte_spinlock_unlock(&internal->rss_lock);
429
430         return 0;
431 }
432
433 static int
434 eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
435 {
436         struct pmd_internals *internal = dev->data->dev_private;
437
438         rte_spinlock_lock(&internal->rss_lock);
439
440         if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
441                 dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
442                                 rss_conf->rss_hf & internal->flow_type_rss_offloads;
443
444         if (rss_conf->rss_key)
445                 rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
446
447         rte_spinlock_unlock(&internal->rss_lock);
448
449         return 0;
450 }
451
452 static int
453 eth_rss_hash_conf_get(struct rte_eth_dev *dev,
454                 struct rte_eth_rss_conf *rss_conf)
455 {
456         struct pmd_internals *internal = dev->data->dev_private;
457
458         rte_spinlock_lock(&internal->rss_lock);
459
460         rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
461         if (rss_conf->rss_key)
462                 rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
463
464         rte_spinlock_unlock(&internal->rss_lock);
465
466         return 0;
467 }
468
469 static int
470 eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
471                     __rte_unused struct ether_addr *addr)
472 {
473         return 0;
474 }
475
476 static const struct eth_dev_ops ops = {
477         .dev_start = eth_dev_start,
478         .dev_stop = eth_dev_stop,
479         .dev_configure = eth_dev_configure,
480         .dev_infos_get = eth_dev_info,
481         .rx_queue_setup = eth_rx_queue_setup,
482         .tx_queue_setup = eth_tx_queue_setup,
483         .rx_queue_release = eth_queue_release,
484         .tx_queue_release = eth_queue_release,
485         .mtu_set = eth_mtu_set,
486         .link_update = eth_link_update,
487         .mac_addr_set = eth_mac_address_set,
488         .stats_get = eth_stats_get,
489         .stats_reset = eth_stats_reset,
490         .reta_update = eth_rss_reta_update,
491         .reta_query = eth_rss_reta_query,
492         .rss_hash_update = eth_rss_hash_update,
493         .rss_hash_conf_get = eth_rss_hash_conf_get
494 };
495
496 static struct rte_vdev_driver pmd_null_drv;
497
498 static int
499 eth_dev_null_create(struct rte_vdev_device *dev,
500                 unsigned packet_size,
501                 unsigned packet_copy)
502 {
503         const unsigned nb_rx_queues = 1;
504         const unsigned nb_tx_queues = 1;
505         struct rte_eth_dev_data *data;
506         struct pmd_internals *internals = NULL;
507         struct rte_eth_dev *eth_dev = NULL;
508
509         static const uint8_t default_rss_key[40] = {
510                 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
511                 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
512                 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
513                 0xBE, 0xAC, 0x01, 0xFA
514         };
515
516         if (dev->device.numa_node == SOCKET_ID_ANY)
517                 dev->device.numa_node = rte_socket_id();
518
519         PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
520                 dev->device.numa_node);
521
522         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
523         if (!eth_dev)
524                 return -ENOMEM;
525
526         /* now put it all together
527          * - store queue data in internals,
528          * - store numa_node info in ethdev data
529          * - point eth_dev_data to internals
530          * - and point eth_dev structure to new eth_dev_data structure
531          */
532         /* NOTE: we'll replace the data element, of originally allocated eth_dev
533          * so the nulls are local per-process */
534
535         internals = eth_dev->data->dev_private;
536         internals->packet_size = packet_size;
537         internals->packet_copy = packet_copy;
538         internals->port_id = eth_dev->data->port_id;
539         eth_random_addr(internals->eth_addr.addr_bytes);
540
541         internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
542         internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
543
544         rte_memcpy(internals->rss_key, default_rss_key, 40);
545
546         data = eth_dev->data;
547         data->nb_rx_queues = (uint16_t)nb_rx_queues;
548         data->nb_tx_queues = (uint16_t)nb_tx_queues;
549         data->dev_link = pmd_link;
550         data->mac_addrs = &internals->eth_addr;
551
552         eth_dev->dev_ops = &ops;
553
554         /* finally assign rx and tx ops */
555         if (packet_copy) {
556                 eth_dev->rx_pkt_burst = eth_null_copy_rx;
557                 eth_dev->tx_pkt_burst = eth_null_copy_tx;
558         } else {
559                 eth_dev->rx_pkt_burst = eth_null_rx;
560                 eth_dev->tx_pkt_burst = eth_null_tx;
561         }
562
563         rte_eth_dev_probing_finish(eth_dev);
564         return 0;
565 }
566
567 static inline int
568 get_packet_size_arg(const char *key __rte_unused,
569                 const char *value, void *extra_args)
570 {
571         const char *a = value;
572         unsigned *packet_size = extra_args;
573
574         if ((value == NULL) || (extra_args == NULL))
575                 return -EINVAL;
576
577         *packet_size = (unsigned)strtoul(a, NULL, 0);
578         if (*packet_size == UINT_MAX)
579                 return -1;
580
581         return 0;
582 }
583
584 static inline int
585 get_packet_copy_arg(const char *key __rte_unused,
586                 const char *value, void *extra_args)
587 {
588         const char *a = value;
589         unsigned *packet_copy = extra_args;
590
591         if ((value == NULL) || (extra_args == NULL))
592                 return -EINVAL;
593
594         *packet_copy = (unsigned)strtoul(a, NULL, 0);
595         if (*packet_copy == UINT_MAX)
596                 return -1;
597
598         return 0;
599 }
600
601 static int
602 rte_pmd_null_probe(struct rte_vdev_device *dev)
603 {
604         const char *name, *params;
605         unsigned packet_size = default_packet_size;
606         unsigned packet_copy = default_packet_copy;
607         struct rte_kvargs *kvlist = NULL;
608         struct rte_eth_dev *eth_dev;
609         int ret;
610
611         if (!dev)
612                 return -EINVAL;
613
614         name = rte_vdev_device_name(dev);
615         params = rte_vdev_device_args(dev);
616         PMD_LOG(INFO, "Initializing pmd_null for %s", name);
617
618         if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
619             strlen(params) == 0) {
620                 eth_dev = rte_eth_dev_attach_secondary(name);
621                 if (!eth_dev) {
622                         PMD_LOG(ERR, "Failed to probe %s", name);
623                         return -1;
624                 }
625                 /* TODO: request info from primary to set up Rx and Tx */
626                 eth_dev->dev_ops = &ops;
627                 eth_dev->device = &dev->device;
628                 rte_eth_dev_probing_finish(eth_dev);
629                 return 0;
630         }
631
632         if (params != NULL) {
633                 kvlist = rte_kvargs_parse(params, valid_arguments);
634                 if (kvlist == NULL)
635                         return -1;
636
637                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
638
639                         ret = rte_kvargs_process(kvlist,
640                                         ETH_NULL_PACKET_SIZE_ARG,
641                                         &get_packet_size_arg, &packet_size);
642                         if (ret < 0)
643                                 goto free_kvlist;
644                 }
645
646                 if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
647
648                         ret = rte_kvargs_process(kvlist,
649                                         ETH_NULL_PACKET_COPY_ARG,
650                                         &get_packet_copy_arg, &packet_copy);
651                         if (ret < 0)
652                                 goto free_kvlist;
653                 }
654         }
655
656         PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
657                         "packet copy is %s", packet_size,
658                         packet_copy ? "enabled" : "disabled");
659
660         ret = eth_dev_null_create(dev, packet_size, packet_copy);
661
662 free_kvlist:
663         if (kvlist)
664                 rte_kvargs_free(kvlist);
665         return ret;
666 }
667
668 static int
669 rte_pmd_null_remove(struct rte_vdev_device *dev)
670 {
671         struct rte_eth_dev *eth_dev = NULL;
672
673         if (!dev)
674                 return -EINVAL;
675
676         PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
677                         rte_socket_id());
678
679         /* find the ethdev entry */
680         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
681         if (eth_dev == NULL)
682                 return -1;
683
684         rte_free(eth_dev->data->dev_private);
685
686         rte_eth_dev_release_port(eth_dev);
687
688         return 0;
689 }
690
691 static struct rte_vdev_driver pmd_null_drv = {
692         .probe = rte_pmd_null_probe,
693         .remove = rte_pmd_null_remove,
694 };
695
696 RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
697 RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
698 RTE_PMD_REGISTER_PARAM_STRING(net_null,
699         "size=<int> "
700         "copy=<int>");
701
702 RTE_INIT(eth_null_init_log)
703 {
704         eth_null_logtype = rte_log_register("pmd.net.null");
705         if (eth_null_logtype >= 0)
706                 rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
707 }