New upstream version 17.11.1
[deb_dpdk.git] / drivers / net / softnic / rte_eth_softnic.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <stdlib.h>
36 #include <string.h>
37
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_bus_vdev.h>
42 #include <rte_kvargs.h>
43 #include <rte_errno.h>
44 #include <rte_ring.h>
45 #include <rte_sched.h>
46 #include <rte_tm_driver.h>
47
48 #include "rte_eth_softnic.h"
49 #include "rte_eth_softnic_internals.h"
50
51 #define DEV_HARD(p)                                     \
52         (&rte_eth_devices[p->hard.port_id])
53
54 #define PMD_PARAM_SOFT_TM                                       "soft_tm"
55 #define PMD_PARAM_SOFT_TM_RATE                          "soft_tm_rate"
56 #define PMD_PARAM_SOFT_TM_NB_QUEUES                     "soft_tm_nb_queues"
57 #define PMD_PARAM_SOFT_TM_QSIZE0                        "soft_tm_qsize0"
58 #define PMD_PARAM_SOFT_TM_QSIZE1                        "soft_tm_qsize1"
59 #define PMD_PARAM_SOFT_TM_QSIZE2                        "soft_tm_qsize2"
60 #define PMD_PARAM_SOFT_TM_QSIZE3                        "soft_tm_qsize3"
61 #define PMD_PARAM_SOFT_TM_ENQ_BSZ                       "soft_tm_enq_bsz"
62 #define PMD_PARAM_SOFT_TM_DEQ_BSZ                       "soft_tm_deq_bsz"
63
64 #define PMD_PARAM_HARD_NAME                                     "hard_name"
65 #define PMD_PARAM_HARD_TX_QUEUE_ID                      "hard_tx_queue_id"
66
67 static const char *pmd_valid_args[] = {
68         PMD_PARAM_SOFT_TM,
69         PMD_PARAM_SOFT_TM_RATE,
70         PMD_PARAM_SOFT_TM_NB_QUEUES,
71         PMD_PARAM_SOFT_TM_QSIZE0,
72         PMD_PARAM_SOFT_TM_QSIZE1,
73         PMD_PARAM_SOFT_TM_QSIZE2,
74         PMD_PARAM_SOFT_TM_QSIZE3,
75         PMD_PARAM_SOFT_TM_ENQ_BSZ,
76         PMD_PARAM_SOFT_TM_DEQ_BSZ,
77         PMD_PARAM_HARD_NAME,
78         PMD_PARAM_HARD_TX_QUEUE_ID,
79         NULL
80 };
81
82 static const struct rte_eth_dev_info pmd_dev_info = {
83         .min_rx_bufsize = 0,
84         .max_rx_pktlen = UINT32_MAX,
85         .max_rx_queues = UINT16_MAX,
86         .max_tx_queues = UINT16_MAX,
87         .rx_desc_lim = {
88                 .nb_max = UINT16_MAX,
89                 .nb_min = 0,
90                 .nb_align = 1,
91         },
92         .tx_desc_lim = {
93                 .nb_max = UINT16_MAX,
94                 .nb_min = 0,
95                 .nb_align = 1,
96         },
97 };
98
99 static void
100 pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
101         struct rte_eth_dev_info *dev_info)
102 {
103         memcpy(dev_info, &pmd_dev_info, sizeof(*dev_info));
104 }
105
106 static int
107 pmd_dev_configure(struct rte_eth_dev *dev)
108 {
109         struct pmd_internals *p = dev->data->dev_private;
110         struct rte_eth_dev *hard_dev = DEV_HARD(p);
111
112         if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues)
113                 return -1;
114
115         if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues)
116                 return -1;
117
118         return 0;
119 }
120
121 static int
122 pmd_rx_queue_setup(struct rte_eth_dev *dev,
123         uint16_t rx_queue_id,
124         uint16_t nb_rx_desc __rte_unused,
125         unsigned int socket_id,
126         const struct rte_eth_rxconf *rx_conf __rte_unused,
127         struct rte_mempool *mb_pool __rte_unused)
128 {
129         struct pmd_internals *p = dev->data->dev_private;
130
131         if (p->params.soft.intrusive == 0) {
132                 struct pmd_rx_queue *rxq;
133
134                 rxq = rte_zmalloc_socket(p->params.soft.name,
135                         sizeof(struct pmd_rx_queue), 0, socket_id);
136                 if (rxq == NULL)
137                         return -ENOMEM;
138
139                 rxq->hard.port_id = p->hard.port_id;
140                 rxq->hard.rx_queue_id = rx_queue_id;
141                 dev->data->rx_queues[rx_queue_id] = rxq;
142         } else {
143                 struct rte_eth_dev *hard_dev = DEV_HARD(p);
144                 void *rxq = hard_dev->data->rx_queues[rx_queue_id];
145
146                 if (rxq == NULL)
147                         return -1;
148
149                 dev->data->rx_queues[rx_queue_id] = rxq;
150         }
151         return 0;
152 }
153
154 static int
155 pmd_tx_queue_setup(struct rte_eth_dev *dev,
156         uint16_t tx_queue_id,
157         uint16_t nb_tx_desc,
158         unsigned int socket_id,
159         const struct rte_eth_txconf *tx_conf __rte_unused)
160 {
161         uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4;
162         char name[size];
163         struct rte_ring *r;
164
165         snprintf(name, sizeof(name), "%s_txq%04x",
166                 dev->data->name, tx_queue_id);
167         r = rte_ring_create(name, nb_tx_desc, socket_id,
168                 RING_F_SP_ENQ | RING_F_SC_DEQ);
169         if (r == NULL)
170                 return -1;
171
172         dev->data->tx_queues[tx_queue_id] = r;
173         return 0;
174 }
175
176 static int
177 pmd_dev_start(struct rte_eth_dev *dev)
178 {
179         struct pmd_internals *p = dev->data->dev_private;
180
181         if (tm_used(dev)) {
182                 int status = tm_start(p);
183
184                 if (status)
185                         return status;
186         }
187
188         dev->data->dev_link.link_status = ETH_LINK_UP;
189
190         if (p->params.soft.intrusive) {
191                 struct rte_eth_dev *hard_dev = DEV_HARD(p);
192
193                 /* The hard_dev->rx_pkt_burst should be stable by now */
194                 dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
195         }
196
197         return 0;
198 }
199
200 static void
201 pmd_dev_stop(struct rte_eth_dev *dev)
202 {
203         struct pmd_internals *p = dev->data->dev_private;
204
205         dev->data->dev_link.link_status = ETH_LINK_DOWN;
206
207         if (tm_used(dev))
208                 tm_stop(p);
209 }
210
211 static void
212 pmd_dev_close(struct rte_eth_dev *dev)
213 {
214         uint32_t i;
215
216         /* TX queues */
217         for (i = 0; i < dev->data->nb_tx_queues; i++)
218                 rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]);
219 }
220
221 static int
222 pmd_link_update(struct rte_eth_dev *dev __rte_unused,
223         int wait_to_complete __rte_unused)
224 {
225         return 0;
226 }
227
228 static int
229 pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg)
230 {
231         *(const struct rte_tm_ops **)arg =
232                 (tm_enabled(dev)) ? &pmd_tm_ops : NULL;
233
234         return 0;
235 }
236
237 static const struct eth_dev_ops pmd_ops = {
238         .dev_configure = pmd_dev_configure,
239         .dev_start = pmd_dev_start,
240         .dev_stop = pmd_dev_stop,
241         .dev_close = pmd_dev_close,
242         .link_update = pmd_link_update,
243         .dev_infos_get = pmd_dev_infos_get,
244         .rx_queue_setup = pmd_rx_queue_setup,
245         .tx_queue_setup = pmd_tx_queue_setup,
246         .tm_ops_get = pmd_tm_ops_get,
247 };
248
249 static uint16_t
250 pmd_rx_pkt_burst(void *rxq,
251         struct rte_mbuf **rx_pkts,
252         uint16_t nb_pkts)
253 {
254         struct pmd_rx_queue *rx_queue = rxq;
255
256         return rte_eth_rx_burst(rx_queue->hard.port_id,
257                 rx_queue->hard.rx_queue_id,
258                 rx_pkts,
259                 nb_pkts);
260 }
261
262 static uint16_t
263 pmd_tx_pkt_burst(void *txq,
264         struct rte_mbuf **tx_pkts,
265         uint16_t nb_pkts)
266 {
267         return (uint16_t)rte_ring_enqueue_burst(txq,
268                 (void **)tx_pkts,
269                 nb_pkts,
270                 NULL);
271 }
272
273 static __rte_always_inline int
274 run_default(struct rte_eth_dev *dev)
275 {
276         struct pmd_internals *p = dev->data->dev_private;
277
278         /* Persistent context: Read Only (update not required) */
279         struct rte_mbuf **pkts = p->soft.def.pkts;
280         uint16_t nb_tx_queues = dev->data->nb_tx_queues;
281
282         /* Persistent context: Read - Write (update required) */
283         uint32_t txq_pos = p->soft.def.txq_pos;
284         uint32_t pkts_len = p->soft.def.pkts_len;
285         uint32_t flush_count = p->soft.def.flush_count;
286
287         /* Not part of the persistent context */
288         uint32_t pos;
289         uint16_t i;
290
291         /* Soft device TXQ read, Hard device TXQ write */
292         for (i = 0; i < nb_tx_queues; i++) {
293                 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
294
295                 /* Read soft device TXQ burst to packet enqueue buffer */
296                 pkts_len += rte_ring_sc_dequeue_burst(txq,
297                         (void **)&pkts[pkts_len],
298                         DEFAULT_BURST_SIZE,
299                         NULL);
300
301                 /* Increment soft device TXQ */
302                 txq_pos++;
303                 if (txq_pos >= nb_tx_queues)
304                         txq_pos = 0;
305
306                 /* Hard device TXQ write when complete burst is available */
307                 if (pkts_len >= DEFAULT_BURST_SIZE) {
308                         for (pos = 0; pos < pkts_len; )
309                                 pos += rte_eth_tx_burst(p->hard.port_id,
310                                         p->params.hard.tx_queue_id,
311                                         &pkts[pos],
312                                         (uint16_t)(pkts_len - pos));
313
314                         pkts_len = 0;
315                         flush_count = 0;
316                         break;
317                 }
318         }
319
320         if (flush_count >= FLUSH_COUNT_THRESHOLD) {
321                 for (pos = 0; pos < pkts_len; )
322                         pos += rte_eth_tx_burst(p->hard.port_id,
323                                 p->params.hard.tx_queue_id,
324                                 &pkts[pos],
325                                 (uint16_t)(pkts_len - pos));
326
327                 pkts_len = 0;
328                 flush_count = 0;
329         }
330
331         p->soft.def.txq_pos = txq_pos;
332         p->soft.def.pkts_len = pkts_len;
333         p->soft.def.flush_count = flush_count + 1;
334
335         return 0;
336 }
337
338 static __rte_always_inline int
339 run_tm(struct rte_eth_dev *dev)
340 {
341         struct pmd_internals *p = dev->data->dev_private;
342
343         /* Persistent context: Read Only (update not required) */
344         struct rte_sched_port *sched = p->soft.tm.sched;
345         struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
346         struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
347         uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
348         uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
349         uint16_t nb_tx_queues = dev->data->nb_tx_queues;
350
351         /* Persistent context: Read - Write (update required) */
352         uint32_t txq_pos = p->soft.tm.txq_pos;
353         uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
354         uint32_t flush_count = p->soft.tm.flush_count;
355
356         /* Not part of the persistent context */
357         uint32_t pkts_deq_len, pos;
358         uint16_t i;
359
360         /* Soft device TXQ read, TM enqueue */
361         for (i = 0; i < nb_tx_queues; i++) {
362                 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
363
364                 /* Read TXQ burst to packet enqueue buffer */
365                 pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
366                         (void **)&pkts_enq[pkts_enq_len],
367                         enq_bsz,
368                         NULL);
369
370                 /* Increment TXQ */
371                 txq_pos++;
372                 if (txq_pos >= nb_tx_queues)
373                         txq_pos = 0;
374
375                 /* TM enqueue when complete burst is available */
376                 if (pkts_enq_len >= enq_bsz) {
377                         rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
378
379                         pkts_enq_len = 0;
380                         flush_count = 0;
381                         break;
382                 }
383         }
384
385         if (flush_count >= FLUSH_COUNT_THRESHOLD) {
386                 if (pkts_enq_len)
387                         rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
388
389                 pkts_enq_len = 0;
390                 flush_count = 0;
391         }
392
393         p->soft.tm.txq_pos = txq_pos;
394         p->soft.tm.pkts_enq_len = pkts_enq_len;
395         p->soft.tm.flush_count = flush_count + 1;
396
397         /* TM dequeue, Hard device TXQ write */
398         pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
399
400         for (pos = 0; pos < pkts_deq_len; )
401                 pos += rte_eth_tx_burst(p->hard.port_id,
402                         p->params.hard.tx_queue_id,
403                         &pkts_deq[pos],
404                         (uint16_t)(pkts_deq_len - pos));
405
406         return 0;
407 }
408
409 int
410 rte_pmd_softnic_run(uint16_t port_id)
411 {
412         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
413
414 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
415         RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
416 #endif
417
418         return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
419 }
420
421 static struct ether_addr eth_addr = { .addr_bytes = {0} };
422
423 static uint32_t
424 eth_dev_speed_max_mbps(uint32_t speed_capa)
425 {
426         uint32_t rate_mbps[32] = {
427                 ETH_SPEED_NUM_NONE,
428                 ETH_SPEED_NUM_10M,
429                 ETH_SPEED_NUM_10M,
430                 ETH_SPEED_NUM_100M,
431                 ETH_SPEED_NUM_100M,
432                 ETH_SPEED_NUM_1G,
433                 ETH_SPEED_NUM_2_5G,
434                 ETH_SPEED_NUM_5G,
435                 ETH_SPEED_NUM_10G,
436                 ETH_SPEED_NUM_20G,
437                 ETH_SPEED_NUM_25G,
438                 ETH_SPEED_NUM_40G,
439                 ETH_SPEED_NUM_50G,
440                 ETH_SPEED_NUM_56G,
441                 ETH_SPEED_NUM_100G,
442         };
443
444         uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0;
445         return rate_mbps[pos];
446 }
447
448 static int
449 default_init(struct pmd_internals *p,
450         struct pmd_params *params,
451         int numa_node)
452 {
453         p->soft.def.pkts = rte_zmalloc_socket(params->soft.name,
454                 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
455                 0,
456                 numa_node);
457
458         if (p->soft.def.pkts == NULL)
459                 return -ENOMEM;
460
461         return 0;
462 }
463
464 static void
465 default_free(struct pmd_internals *p)
466 {
467         rte_free(p->soft.def.pkts);
468 }
469
470 static void *
471 pmd_init(struct pmd_params *params, int numa_node)
472 {
473         struct pmd_internals *p;
474         int status;
475
476         p = rte_zmalloc_socket(params->soft.name,
477                 sizeof(struct pmd_internals),
478                 0,
479                 numa_node);
480         if (p == NULL)
481                 return NULL;
482
483         memcpy(&p->params, params, sizeof(p->params));
484         rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id);
485
486         /* Default */
487         status = default_init(p, params, numa_node);
488         if (status) {
489                 free(p->params.hard.name);
490                 rte_free(p);
491                 return NULL;
492         }
493
494         /* Traffic Management (TM)*/
495         if (params->soft.flags & PMD_FEATURE_TM) {
496                 status = tm_init(p, params, numa_node);
497                 if (status) {
498                         default_free(p);
499                         free(p->params.hard.name);
500                         rte_free(p);
501                         return NULL;
502                 }
503         }
504
505         return p;
506 }
507
508 static void
509 pmd_free(struct pmd_internals *p)
510 {
511         if (p->params.soft.flags & PMD_FEATURE_TM)
512                 tm_free(p);
513
514         default_free(p);
515
516         free(p->params.hard.name);
517         rte_free(p);
518 }
519
520 static int
521 pmd_ethdev_register(struct rte_vdev_device *vdev,
522         struct pmd_params *params,
523         void *dev_private)
524 {
525         struct rte_eth_dev_info hard_info;
526         struct rte_eth_dev *soft_dev;
527         uint32_t hard_speed;
528         int numa_node;
529         uint16_t hard_port_id;
530
531         rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id);
532         rte_eth_dev_info_get(hard_port_id, &hard_info);
533         hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
534         numa_node = rte_eth_dev_socket_id(hard_port_id);
535
536         /* Ethdev entry allocation */
537         soft_dev = rte_eth_dev_allocate(params->soft.name);
538         if (!soft_dev)
539                 return -ENOMEM;
540
541         /* dev */
542         soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
543                 NULL : /* set up later */
544                 pmd_rx_pkt_burst;
545         soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
546         soft_dev->tx_pkt_prepare = NULL;
547         soft_dev->dev_ops = &pmd_ops;
548         soft_dev->device = &vdev->device;
549
550         /* dev->data */
551         soft_dev->data->dev_private = dev_private;
552         soft_dev->data->dev_link.link_speed = hard_speed;
553         soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
554         soft_dev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
555         soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
556         soft_dev->data->mac_addrs = &eth_addr;
557         soft_dev->data->promiscuous = 1;
558         soft_dev->data->kdrv = RTE_KDRV_NONE;
559         soft_dev->data->numa_node = numa_node;
560
561         return 0;
562 }
563
564 static int
565 get_string(const char *key __rte_unused, const char *value, void *extra_args)
566 {
567         if (!value || !extra_args)
568                 return -EINVAL;
569
570         *(char **)extra_args = strdup(value);
571
572         if (!*(char **)extra_args)
573                 return -ENOMEM;
574
575         return 0;
576 }
577
578 static int
579 get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
580 {
581         if (!value || !extra_args)
582                 return -EINVAL;
583
584         *(uint32_t *)extra_args = strtoull(value, NULL, 0);
585
586         return 0;
587 }
588
589 static int
590 pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
591 {
592         struct rte_kvargs *kvlist;
593         int i, ret;
594
595         kvlist = rte_kvargs_parse(params, pmd_valid_args);
596         if (kvlist == NULL)
597                 return -EINVAL;
598
599         /* Set default values */
600         memset(p, 0, sizeof(*p));
601         p->soft.name = name;
602         p->soft.intrusive = INTRUSIVE;
603         p->soft.tm.rate = 0;
604         p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
605         for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
606                 p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
607         p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
608         p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
609         p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
610
611         /* SOFT: TM (optional) */
612         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
613                 char *s;
614
615                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
616                         &get_string, &s);
617                 if (ret < 0)
618                         goto out_free;
619
620                 if (strcmp(s, "on") == 0)
621                         p->soft.flags |= PMD_FEATURE_TM;
622                 else if (strcmp(s, "off") == 0)
623                         p->soft.flags &= ~PMD_FEATURE_TM;
624                 else
625                         ret = -EINVAL;
626
627                 free(s);
628                 if (ret)
629                         goto out_free;
630         }
631
632         /* SOFT: TM rate (measured in bytes/second) (optional) */
633         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
634                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
635                         &get_uint32, &p->soft.tm.rate);
636                 if (ret < 0)
637                         goto out_free;
638
639                 p->soft.flags |= PMD_FEATURE_TM;
640         }
641
642         /* SOFT: TM number of queues (optional) */
643         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
644                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
645                         &get_uint32, &p->soft.tm.nb_queues);
646                 if (ret < 0)
647                         goto out_free;
648
649                 p->soft.flags |= PMD_FEATURE_TM;
650         }
651
652         /* SOFT: TM queue size 0 .. 3 (optional) */
653         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
654                 uint32_t qsize;
655
656                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
657                         &get_uint32, &qsize);
658                 if (ret < 0)
659                         goto out_free;
660
661                 p->soft.tm.qsize[0] = (uint16_t)qsize;
662                 p->soft.flags |= PMD_FEATURE_TM;
663         }
664
665         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
666                 uint32_t qsize;
667
668                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
669                         &get_uint32, &qsize);
670                 if (ret < 0)
671                         goto out_free;
672
673                 p->soft.tm.qsize[1] = (uint16_t)qsize;
674                 p->soft.flags |= PMD_FEATURE_TM;
675         }
676
677         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
678                 uint32_t qsize;
679
680                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
681                         &get_uint32, &qsize);
682                 if (ret < 0)
683                         goto out_free;
684
685                 p->soft.tm.qsize[2] = (uint16_t)qsize;
686                 p->soft.flags |= PMD_FEATURE_TM;
687         }
688
689         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
690                 uint32_t qsize;
691
692                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
693                         &get_uint32, &qsize);
694                 if (ret < 0)
695                         goto out_free;
696
697                 p->soft.tm.qsize[3] = (uint16_t)qsize;
698                 p->soft.flags |= PMD_FEATURE_TM;
699         }
700
701         /* SOFT: TM enqueue burst size (optional) */
702         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
703                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
704                         &get_uint32, &p->soft.tm.enq_bsz);
705                 if (ret < 0)
706                         goto out_free;
707
708                 p->soft.flags |= PMD_FEATURE_TM;
709         }
710
711         /* SOFT: TM dequeue burst size (optional) */
712         if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
713                 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
714                         &get_uint32, &p->soft.tm.deq_bsz);
715                 if (ret < 0)
716                         goto out_free;
717
718                 p->soft.flags |= PMD_FEATURE_TM;
719         }
720
721         /* HARD: name (mandatory) */
722         if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
723                 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
724                         &get_string, &p->hard.name);
725                 if (ret < 0)
726                         goto out_free;
727         } else {
728                 ret = -EINVAL;
729                 goto out_free;
730         }
731
732         /* HARD: tx_queue_id (optional) */
733         if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) {
734                 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID,
735                         &get_uint32, &p->hard.tx_queue_id);
736                 if (ret < 0)
737                         goto out_free;
738         }
739
740 out_free:
741         rte_kvargs_free(kvlist);
742         return ret;
743 }
744
745 static int
746 pmd_probe(struct rte_vdev_device *vdev)
747 {
748         struct pmd_params p;
749         const char *params;
750         int status;
751
752         struct rte_eth_dev_info hard_info;
753         uint32_t hard_speed;
754         uint16_t hard_port_id;
755         int numa_node;
756         void *dev_private;
757
758         RTE_LOG(INFO, PMD,
759                 "Probing device \"%s\"\n",
760                 rte_vdev_device_name(vdev));
761
762         /* Parse input arguments */
763         params = rte_vdev_device_args(vdev);
764         if (!params)
765                 return -EINVAL;
766
767         status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params);
768         if (status)
769                 return status;
770
771         /* Check input arguments */
772         if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id))
773                 return -EINVAL;
774
775         rte_eth_dev_info_get(hard_port_id, &hard_info);
776         hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
777         numa_node = rte_eth_dev_socket_id(hard_port_id);
778
779         if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
780                 return -EINVAL;
781
782         if (p.soft.flags & PMD_FEATURE_TM) {
783                 status = tm_params_check(&p, hard_speed);
784
785                 if (status)
786                         return status;
787         }
788
789         /* Allocate and initialize soft ethdev private data */
790         dev_private = pmd_init(&p, numa_node);
791         if (dev_private == NULL)
792                 return -ENOMEM;
793
794         /* Register soft ethdev */
795         RTE_LOG(INFO, PMD,
796                 "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n",
797                 p.soft.name, p.hard.name);
798
799         status = pmd_ethdev_register(vdev, &p, dev_private);
800         if (status) {
801                 pmd_free(dev_private);
802                 return status;
803         }
804
805         return 0;
806 }
807
808 static int
809 pmd_remove(struct rte_vdev_device *vdev)
810 {
811         struct rte_eth_dev *dev = NULL;
812         struct pmd_internals *p;
813
814         if (!vdev)
815                 return -EINVAL;
816
817         RTE_LOG(INFO, PMD, "Removing device \"%s\"\n",
818                 rte_vdev_device_name(vdev));
819
820         /* Find the ethdev entry */
821         dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
822         if (dev == NULL)
823                 return -ENODEV;
824         p = dev->data->dev_private;
825
826         /* Free device data structures*/
827         pmd_free(p);
828         rte_free(dev->data);
829         rte_eth_dev_release_port(dev);
830
831         return 0;
832 }
833
834 static struct rte_vdev_driver pmd_softnic_drv = {
835         .probe = pmd_probe,
836         .remove = pmd_remove,
837 };
838
839 RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
840 RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
841         PMD_PARAM_SOFT_TM        "=on|off "
842         PMD_PARAM_SOFT_TM_RATE "=<int> "
843         PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
844         PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
845         PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
846         PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
847         PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
848         PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
849         PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
850         PMD_PARAM_HARD_NAME "=<string> "
851         PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");