New upstream version 18.02
[deb_dpdk.git] / drivers / net / softnic / rte_eth_softnic.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_ethdev_driver.h>
10 #include <rte_ethdev_vdev.h>
11 #include <rte_malloc.h>
12 #include <rte_bus_vdev.h>
13 #include <rte_kvargs.h>
14 #include <rte_errno.h>
15 #include <rte_ring.h>
16 #include <rte_sched.h>
17 #include <rte_tm_driver.h>
18
19 #include "rte_eth_softnic.h"
20 #include "rte_eth_softnic_internals.h"
21
22 #define DEV_HARD(p)                                     \
23         (&rte_eth_devices[p->hard.port_id])
24
25 #define PMD_PARAM_SOFT_TM                                       "soft_tm"
26 #define PMD_PARAM_SOFT_TM_RATE                          "soft_tm_rate"
27 #define PMD_PARAM_SOFT_TM_NB_QUEUES                     "soft_tm_nb_queues"
28 #define PMD_PARAM_SOFT_TM_QSIZE0                        "soft_tm_qsize0"
29 #define PMD_PARAM_SOFT_TM_QSIZE1                        "soft_tm_qsize1"
30 #define PMD_PARAM_SOFT_TM_QSIZE2                        "soft_tm_qsize2"
31 #define PMD_PARAM_SOFT_TM_QSIZE3                        "soft_tm_qsize3"
32 #define PMD_PARAM_SOFT_TM_ENQ_BSZ                       "soft_tm_enq_bsz"
33 #define PMD_PARAM_SOFT_TM_DEQ_BSZ                       "soft_tm_deq_bsz"
34
35 #define PMD_PARAM_HARD_NAME                                     "hard_name"
36 #define PMD_PARAM_HARD_TX_QUEUE_ID                      "hard_tx_queue_id"
37
38 static const char *pmd_valid_args[] = {
39         PMD_PARAM_SOFT_TM,
40         PMD_PARAM_SOFT_TM_RATE,
41         PMD_PARAM_SOFT_TM_NB_QUEUES,
42         PMD_PARAM_SOFT_TM_QSIZE0,
43         PMD_PARAM_SOFT_TM_QSIZE1,
44         PMD_PARAM_SOFT_TM_QSIZE2,
45         PMD_PARAM_SOFT_TM_QSIZE3,
46         PMD_PARAM_SOFT_TM_ENQ_BSZ,
47         PMD_PARAM_SOFT_TM_DEQ_BSZ,
48         PMD_PARAM_HARD_NAME,
49         PMD_PARAM_HARD_TX_QUEUE_ID,
50         NULL
51 };
52
53 static const struct rte_eth_dev_info pmd_dev_info = {
54         .min_rx_bufsize = 0,
55         .max_rx_pktlen = UINT32_MAX,
56         .max_rx_queues = UINT16_MAX,
57         .max_tx_queues = UINT16_MAX,
58         .rx_desc_lim = {
59                 .nb_max = UINT16_MAX,
60                 .nb_min = 0,
61                 .nb_align = 1,
62         },
63         .tx_desc_lim = {
64                 .nb_max = UINT16_MAX,
65                 .nb_min = 0,
66                 .nb_align = 1,
67         },
68 };
69
70 static void
71 pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
72         struct rte_eth_dev_info *dev_info)
73 {
74         memcpy(dev_info, &pmd_dev_info, sizeof(*dev_info));
75 }
76
77 static int
78 pmd_dev_configure(struct rte_eth_dev *dev)
79 {
80         struct pmd_internals *p = dev->data->dev_private;
81         struct rte_eth_dev *hard_dev = DEV_HARD(p);
82
83         if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues)
84                 return -1;
85
86         if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues)
87                 return -1;
88
89         return 0;
90 }
91
92 static int
93 pmd_rx_queue_setup(struct rte_eth_dev *dev,
94         uint16_t rx_queue_id,
95         uint16_t nb_rx_desc __rte_unused,
96         unsigned int socket_id,
97         const struct rte_eth_rxconf *rx_conf __rte_unused,
98         struct rte_mempool *mb_pool __rte_unused)
99 {
100         struct pmd_internals *p = dev->data->dev_private;
101
102         if (p->params.soft.intrusive == 0) {
103                 struct pmd_rx_queue *rxq;
104
105                 rxq = rte_zmalloc_socket(p->params.soft.name,
106                         sizeof(struct pmd_rx_queue), 0, socket_id);
107                 if (rxq == NULL)
108                         return -ENOMEM;
109
110                 rxq->hard.port_id = p->hard.port_id;
111                 rxq->hard.rx_queue_id = rx_queue_id;
112                 dev->data->rx_queues[rx_queue_id] = rxq;
113         } else {
114                 struct rte_eth_dev *hard_dev = DEV_HARD(p);
115                 void *rxq = hard_dev->data->rx_queues[rx_queue_id];
116
117                 if (rxq == NULL)
118                         return -1;
119
120                 dev->data->rx_queues[rx_queue_id] = rxq;
121         }
122         return 0;
123 }
124
125 static int
126 pmd_tx_queue_setup(struct rte_eth_dev *dev,
127         uint16_t tx_queue_id,
128         uint16_t nb_tx_desc,
129         unsigned int socket_id,
130         const struct rte_eth_txconf *tx_conf __rte_unused)
131 {
132         uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4;
133         char name[size];
134         struct rte_ring *r;
135
136         snprintf(name, sizeof(name), "%s_txq%04x",
137                 dev->data->name, tx_queue_id);
138         r = rte_ring_create(name, nb_tx_desc, socket_id,
139                 RING_F_SP_ENQ | RING_F_SC_DEQ);
140         if (r == NULL)
141                 return -1;
142
143         dev->data->tx_queues[tx_queue_id] = r;
144         return 0;
145 }
146
147 static int
148 pmd_dev_start(struct rte_eth_dev *dev)
149 {
150         struct pmd_internals *p = dev->data->dev_private;
151
152         if (tm_used(dev)) {
153                 int status = tm_start(p);
154
155                 if (status)
156                         return status;
157         }
158
159         dev->data->dev_link.link_status = ETH_LINK_UP;
160
161         if (p->params.soft.intrusive) {
162                 struct rte_eth_dev *hard_dev = DEV_HARD(p);
163
164                 /* The hard_dev->rx_pkt_burst should be stable by now */
165                 dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
166         }
167
168         return 0;
169 }
170
171 static void
172 pmd_dev_stop(struct rte_eth_dev *dev)
173 {
174         struct pmd_internals *p = dev->data->dev_private;
175
176         dev->data->dev_link.link_status = ETH_LINK_DOWN;
177
178         if (tm_used(dev))
179                 tm_stop(p);
180 }
181
182 static void
183 pmd_dev_close(struct rte_eth_dev *dev)
184 {
185         uint32_t i;
186
187         /* TX queues */
188         for (i = 0; i < dev->data->nb_tx_queues; i++)
189                 rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]);
190 }
191
192 static int
193 pmd_link_update(struct rte_eth_dev *dev __rte_unused,
194         int wait_to_complete __rte_unused)
195 {
196         return 0;
197 }
198
199 static int
200 pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg)
201 {
202         *(const struct rte_tm_ops **)arg =
203                 (tm_enabled(dev)) ? &pmd_tm_ops : NULL;
204
205         return 0;
206 }
207
208 static const struct eth_dev_ops pmd_ops = {
209         .dev_configure = pmd_dev_configure,
210         .dev_start = pmd_dev_start,
211         .dev_stop = pmd_dev_stop,
212         .dev_close = pmd_dev_close,
213         .link_update = pmd_link_update,
214         .dev_infos_get = pmd_dev_infos_get,
215         .rx_queue_setup = pmd_rx_queue_setup,
216         .tx_queue_setup = pmd_tx_queue_setup,
217         .tm_ops_get = pmd_tm_ops_get,
218 };
219
220 static uint16_t
221 pmd_rx_pkt_burst(void *rxq,
222         struct rte_mbuf **rx_pkts,
223         uint16_t nb_pkts)
224 {
225         struct pmd_rx_queue *rx_queue = rxq;
226
227         return rte_eth_rx_burst(rx_queue->hard.port_id,
228                 rx_queue->hard.rx_queue_id,
229                 rx_pkts,
230                 nb_pkts);
231 }
232
233 static uint16_t
234 pmd_tx_pkt_burst(void *txq,
235         struct rte_mbuf **tx_pkts,
236         uint16_t nb_pkts)
237 {
238         return (uint16_t)rte_ring_enqueue_burst(txq,
239                 (void **)tx_pkts,
240                 nb_pkts,
241                 NULL);
242 }
243
244 static __rte_always_inline int
245 run_default(struct rte_eth_dev *dev)
246 {
247         struct pmd_internals *p = dev->data->dev_private;
248
249         /* Persistent context: Read Only (update not required) */
250         struct rte_mbuf **pkts = p->soft.def.pkts;
251         uint16_t nb_tx_queues = dev->data->nb_tx_queues;
252
253         /* Persistent context: Read - Write (update required) */
254         uint32_t txq_pos = p->soft.def.txq_pos;
255         uint32_t pkts_len = p->soft.def.pkts_len;
256         uint32_t flush_count = p->soft.def.flush_count;
257
258         /* Not part of the persistent context */
259         uint32_t pos;
260         uint16_t i;
261
262         /* Soft device TXQ read, Hard device TXQ write */
263         for (i = 0; i < nb_tx_queues; i++) {
264                 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
265
266                 /* Read soft device TXQ burst to packet enqueue buffer */
267                 pkts_len += rte_ring_sc_dequeue_burst(txq,
268                         (void **)&pkts[pkts_len],
269                         DEFAULT_BURST_SIZE,
270                         NULL);
271
272                 /* Increment soft device TXQ */
273                 txq_pos++;
274                 if (txq_pos >= nb_tx_queues)
275                         txq_pos = 0;
276
277                 /* Hard device TXQ write when complete burst is available */
278                 if (pkts_len >= DEFAULT_BURST_SIZE) {
279                         for (pos = 0; pos < pkts_len; )
280                                 pos += rte_eth_tx_burst(p->hard.port_id,
281                                         p->params.hard.tx_queue_id,
282                                         &pkts[pos],
283                                         (uint16_t)(pkts_len - pos));
284
285                         pkts_len = 0;
286                         flush_count = 0;
287                         break;
288                 }
289         }
290
291         if (flush_count >= FLUSH_COUNT_THRESHOLD) {
292                 for (pos = 0; pos < pkts_len; )
293                         pos += rte_eth_tx_burst(p->hard.port_id,
294                                 p->params.hard.tx_queue_id,
295                                 &pkts[pos],
296                                 (uint16_t)(pkts_len - pos));
297
298                 pkts_len = 0;
299                 flush_count = 0;
300         }
301
302         p->soft.def.txq_pos = txq_pos;
303         p->soft.def.pkts_len = pkts_len;
304         p->soft.def.flush_count = flush_count + 1;
305
306         return 0;
307 }
308
309 static __rte_always_inline int
310 run_tm(struct rte_eth_dev *dev)
311 {
312         struct pmd_internals *p = dev->data->dev_private;
313
314         /* Persistent context: Read Only (update not required) */
315         struct rte_sched_port *sched = p->soft.tm.sched;
316         struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
317         struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
318         uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
319         uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
320         uint16_t nb_tx_queues = dev->data->nb_tx_queues;
321
322         /* Persistent context: Read - Write (update required) */
323         uint32_t txq_pos = p->soft.tm.txq_pos;
324         uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
325         uint32_t flush_count = p->soft.tm.flush_count;
326
327         /* Not part of the persistent context */
328         uint32_t pkts_deq_len, pos;
329         uint16_t i;
330
331         /* Soft device TXQ read, TM enqueue */
332         for (i = 0; i < nb_tx_queues; i++) {
333                 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
334
335                 /* Read TXQ burst to packet enqueue buffer */
336                 pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
337                         (void **)&pkts_enq[pkts_enq_len],
338                         enq_bsz,
339                         NULL);
340
341                 /* Increment TXQ */
342                 txq_pos++;
343                 if (txq_pos >= nb_tx_queues)
344                         txq_pos = 0;
345
346                 /* TM enqueue when complete burst is available */
347                 if (pkts_enq_len >= enq_bsz) {
348                         rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
349
350                         pkts_enq_len = 0;
351                         flush_count = 0;
352                         break;
353                 }
354         }
355
356         if (flush_count >= FLUSH_COUNT_THRESHOLD) {
357                 if (pkts_enq_len)
358                         rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
359
360                 pkts_enq_len = 0;
361                 flush_count = 0;
362         }
363
364         p->soft.tm.txq_pos = txq_pos;
365         p->soft.tm.pkts_enq_len = pkts_enq_len;
366         p->soft.tm.flush_count = flush_count + 1;
367
368         /* TM dequeue, Hard device TXQ write */
369         pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
370
371         for (pos = 0; pos < pkts_deq_len; )
372                 pos += rte_eth_tx_burst(p->hard.port_id,
373                         p->params.hard.tx_queue_id,
374                         &pkts_deq[pos],
375                         (uint16_t)(pkts_deq_len - pos));
376
377         return 0;
378 }
379
380 int
381 rte_pmd_softnic_run(uint16_t port_id)
382 {
383         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
384
385 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
386         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
387 #endif
388
389         return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
390 }
391
392 static struct ether_addr eth_addr = { .addr_bytes = {0} };
393
394 static uint32_t
395 eth_dev_speed_max_mbps(uint32_t speed_capa)
396 {
397         uint32_t rate_mbps[32] = {
398                 ETH_SPEED_NUM_NONE,
399                 ETH_SPEED_NUM_10M,
400                 ETH_SPEED_NUM_10M,
401                 ETH_SPEED_NUM_100M,
402                 ETH_SPEED_NUM_100M,
403                 ETH_SPEED_NUM_1G,
404                 ETH_SPEED_NUM_2_5G,
405                 ETH_SPEED_NUM_5G,
406                 ETH_SPEED_NUM_10G,
407                 ETH_SPEED_NUM_20G,
408                 ETH_SPEED_NUM_25G,
409                 ETH_SPEED_NUM_40G,
410                 ETH_SPEED_NUM_50G,
411                 ETH_SPEED_NUM_56G,
412                 ETH_SPEED_NUM_100G,
413         };
414
415         uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0;
416         return rate_mbps[pos];
417 }
418
419 static int
420 default_init(struct pmd_internals *p,
421         struct pmd_params *params,
422         int numa_node)
423 {
424         p->soft.def.pkts = rte_zmalloc_socket(params->soft.name,
425                 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
426                 0,
427                 numa_node);
428
429         if (p->soft.def.pkts == NULL)
430                 return -ENOMEM;
431
432         return 0;
433 }
434
435 static void
436 default_free(struct pmd_internals *p)
437 {
438         rte_free(p->soft.def.pkts);
439 }
440
441 static void *
442 pmd_init(struct pmd_params *params, int numa_node)
443 {
444         struct pmd_internals *p;
445         int status;
446
447         p = rte_zmalloc_socket(params->soft.name,
448                 sizeof(struct pmd_internals),
449                 0,
450                 numa_node);
451         if (p == NULL)
452                 return NULL;
453
454         memcpy(&p->params, params, sizeof(p->params));
455         rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id);
456
457         /* Default */
458         status = default_init(p, params, numa_node);
459         if (status) {
460                 free(p->params.hard.name);
461                 rte_free(p);
462                 return NULL;
463         }
464
465         /* Traffic Management (TM)*/
466         if (params->soft.flags & PMD_FEATURE_TM) {
467                 status = tm_init(p, params, numa_node);
468                 if (status) {
469                         default_free(p);
470                         free(p->params.hard.name);
471                         rte_free(p);
472                         return NULL;
473                 }
474         }
475
476         return p;
477 }
478
479 static void
480 pmd_free(struct pmd_internals *p)
481 {
482         if (p->params.soft.flags & PMD_FEATURE_TM)
483                 tm_free(p);
484
485         default_free(p);
486
487         free(p->params.hard.name);
488         rte_free(p);
489 }
490
491 static int
492 pmd_ethdev_register(struct rte_vdev_device *vdev,
493         struct pmd_params *params,
494         void *dev_private)
495 {
496         struct rte_eth_dev_info hard_info;
497         struct rte_eth_dev *soft_dev;
498         uint32_t hard_speed;
499         int numa_node;
500         uint16_t hard_port_id;
501
502         rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id);
503         rte_eth_dev_info_get(hard_port_id, &hard_info);
504         hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
505         numa_node = rte_eth_dev_socket_id(hard_port_id);
506
507         /* Ethdev entry allocation */
508         soft_dev = rte_eth_dev_allocate(params->soft.name);
509         if (!soft_dev)
510                 return -ENOMEM;
511
512         /* dev */
513         soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
514                 NULL : /* set up later */
515                 pmd_rx_pkt_burst;
516         soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
517         soft_dev->tx_pkt_prepare = NULL;
518         soft_dev->dev_ops = &pmd_ops;
519         soft_dev->device = &vdev->device;
520
521         /* dev->data */
522         soft_dev->data->dev_private = dev_private;
523         soft_dev->data->dev_link.link_speed = hard_speed;
524         soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
525         soft_dev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
526         soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
527         soft_dev->data->mac_addrs = &eth_addr;
528         soft_dev->data->promiscuous = 1;
529         soft_dev->data->kdrv = RTE_KDRV_NONE;
530         soft_dev->data->numa_node = numa_node;
531
532         return 0;
533 }
534
535 static int
536 get_string(const char *key __rte_unused, const char *value, void *extra_args)
537 {
538         if (!value || !extra_args)
539                 return -EINVAL;
540
541         *(char **)extra_args = strdup(value);
542
543         if (!*(char **)extra_args)
544                 return -ENOMEM;
545
546         return 0;
547 }
548
549 static int
550 get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
551 {
552         if (!value || !extra_args)
553                 return -EINVAL;
554
555         *(uint32_t *)extra_args = strtoull(value, NULL, 0);
556
557         return 0;
558 }
559
560 static int
561 pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
562 {
563         struct rte_kvargs *kvlist;
564         int i, ret;
565
566         kvlist = rte_kvargs_parse(params, pmd_valid_args);
567         if (kvlist == NULL)
568                 return -EINVAL;
569
570         /* Set default values */
571         memset(p, 0, sizeof(*p));
572         p->soft.name = name;
573         p->soft.intrusive = INTRUSIVE;
574         p->soft.tm.rate = 0;
575         p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
576         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
577                 p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
578         p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
579         p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
580         p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
581
582         /* SOFT: TM (optional) */
583         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
584                 char *s;
585
586                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
587                         &get_string, &s);
588                 if (ret < 0)
589                         goto out_free;
590
591                 if (strcmp(s, "on") == 0)
592                         p->soft.flags |= PMD_FEATURE_TM;
593                 else if (strcmp(s, "off") == 0)
594                         p->soft.flags &= ~PMD_FEATURE_TM;
595                 else
596                         ret = -EINVAL;
597
598                 free(s);
599                 if (ret)
600                         goto out_free;
601         }
602
603         /* SOFT: TM rate (measured in bytes/second) (optional) */
604         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
605                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
606                         &get_uint32, &p->soft.tm.rate);
607                 if (ret < 0)
608                         goto out_free;
609
610                 p->soft.flags |= PMD_FEATURE_TM;
611         }
612
613         /* SOFT: TM number of queues (optional) */
614         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
615                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
616                         &get_uint32, &p->soft.tm.nb_queues);
617                 if (ret < 0)
618                         goto out_free;
619
620                 p->soft.flags |= PMD_FEATURE_TM;
621         }
622
623         /* SOFT: TM queue size 0 .. 3 (optional) */
624         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
625                 uint32_t qsize;
626
627                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
628                         &get_uint32, &qsize);
629                 if (ret < 0)
630                         goto out_free;
631
632                 p->soft.tm.qsize[0] = (uint16_t)qsize;
633                 p->soft.flags |= PMD_FEATURE_TM;
634         }
635
636         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
637                 uint32_t qsize;
638
639                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
640                         &get_uint32, &qsize);
641                 if (ret < 0)
642                         goto out_free;
643
644                 p->soft.tm.qsize[1] = (uint16_t)qsize;
645                 p->soft.flags |= PMD_FEATURE_TM;
646         }
647
648         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
649                 uint32_t qsize;
650
651                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
652                         &get_uint32, &qsize);
653                 if (ret < 0)
654                         goto out_free;
655
656                 p->soft.tm.qsize[2] = (uint16_t)qsize;
657                 p->soft.flags |= PMD_FEATURE_TM;
658         }
659
660         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
661                 uint32_t qsize;
662
663                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
664                         &get_uint32, &qsize);
665                 if (ret < 0)
666                         goto out_free;
667
668                 p->soft.tm.qsize[3] = (uint16_t)qsize;
669                 p->soft.flags |= PMD_FEATURE_TM;
670         }
671
672         /* SOFT: TM enqueue burst size (optional) */
673         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
674                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
675                         &get_uint32, &p->soft.tm.enq_bsz);
676                 if (ret < 0)
677                         goto out_free;
678
679                 p->soft.flags |= PMD_FEATURE_TM;
680         }
681
682         /* SOFT: TM dequeue burst size (optional) */
683         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
684                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
685                         &get_uint32, &p->soft.tm.deq_bsz);
686                 if (ret < 0)
687                         goto out_free;
688
689                 p->soft.flags |= PMD_FEATURE_TM;
690         }
691
692         /* HARD: name (mandatory) */
693         if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
694                 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
695                         &get_string, &p->hard.name);
696                 if (ret < 0)
697                         goto out_free;
698         } else {
699                 ret = -EINVAL;
700                 goto out_free;
701         }
702
703         /* HARD: tx_queue_id (optional) */
704         if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) {
705                 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID,
706                         &get_uint32, &p->hard.tx_queue_id);
707                 if (ret < 0)
708                         goto out_free;
709         }
710
711 out_free:
712         rte_kvargs_free(kvlist);
713         return ret;
714 }
715
716 static int
717 pmd_probe(struct rte_vdev_device *vdev)
718 {
719         struct pmd_params p;
720         const char *params;
721         int status;
722
723         struct rte_eth_dev_info hard_info;
724         uint32_t hard_speed;
725         uint16_t hard_port_id;
726         int numa_node;
727         void *dev_private;
728
729         RTE_LOG(INFO, PMD,
730                 "Probing device \"%s\"\n",
731                 rte_vdev_device_name(vdev));
732
733         /* Parse input arguments */
734         params = rte_vdev_device_args(vdev);
735         if (!params)
736                 return -EINVAL;
737
738         status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params);
739         if (status)
740                 return status;
741
742         /* Check input arguments */
743         if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id))
744                 return -EINVAL;
745
746         rte_eth_dev_info_get(hard_port_id, &hard_info);
747         hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
748         numa_node = rte_eth_dev_socket_id(hard_port_id);
749
750         if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
751                 return -EINVAL;
752
753         if (p.soft.flags & PMD_FEATURE_TM) {
754                 status = tm_params_check(&p, hard_speed);
755
756                 if (status)
757                         return status;
758         }
759
760         /* Allocate and initialize soft ethdev private data */
761         dev_private = pmd_init(&p, numa_node);
762         if (dev_private == NULL)
763                 return -ENOMEM;
764
765         /* Register soft ethdev */
766         RTE_LOG(INFO, PMD,
767                 "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n",
768                 p.soft.name, p.hard.name);
769
770         status = pmd_ethdev_register(vdev, &p, dev_private);
771         if (status) {
772                 pmd_free(dev_private);
773                 return status;
774         }
775
776         return 0;
777 }
778
779 static int
780 pmd_remove(struct rte_vdev_device *vdev)
781 {
782         struct rte_eth_dev *dev = NULL;
783         struct pmd_internals *p;
784
785         if (!vdev)
786                 return -EINVAL;
787
788         RTE_LOG(INFO, PMD, "Removing device \"%s\"\n",
789                 rte_vdev_device_name(vdev));
790
791         /* Find the ethdev entry */
792         dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
793         if (dev == NULL)
794                 return -ENODEV;
795         p = dev->data->dev_private;
796
797         /* Free device data structures*/
798         pmd_free(p);
799         rte_free(dev->data);
800         rte_eth_dev_release_port(dev);
801
802         return 0;
803 }
804
805 static struct rte_vdev_driver pmd_softnic_drv = {
806         .probe = pmd_probe,
807         .remove = pmd_remove,
808 };
809
810 RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
811 RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
812         PMD_PARAM_SOFT_TM        "=on|off "
813         PMD_PARAM_SOFT_TM_RATE "=<int> "
814         PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
815         PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
816         PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
817         PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
818         PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
819         PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
820         PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
821         PMD_PARAM_HARD_NAME "=<string> "
822         PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");