4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_bus_vdev.h>
42 #include <rte_kvargs.h>
43 #include <rte_errno.h>
45 #include <rte_sched.h>
46 #include <rte_tm_driver.h>
48 #include "rte_eth_softnic.h"
49 #include "rte_eth_softnic_internals.h"
52 (&rte_eth_devices[p->hard.port_id])
54 #define PMD_PARAM_SOFT_TM "soft_tm"
55 #define PMD_PARAM_SOFT_TM_RATE "soft_tm_rate"
56 #define PMD_PARAM_SOFT_TM_NB_QUEUES "soft_tm_nb_queues"
57 #define PMD_PARAM_SOFT_TM_QSIZE0 "soft_tm_qsize0"
58 #define PMD_PARAM_SOFT_TM_QSIZE1 "soft_tm_qsize1"
59 #define PMD_PARAM_SOFT_TM_QSIZE2 "soft_tm_qsize2"
60 #define PMD_PARAM_SOFT_TM_QSIZE3 "soft_tm_qsize3"
61 #define PMD_PARAM_SOFT_TM_ENQ_BSZ "soft_tm_enq_bsz"
62 #define PMD_PARAM_SOFT_TM_DEQ_BSZ "soft_tm_deq_bsz"
64 #define PMD_PARAM_HARD_NAME "hard_name"
65 #define PMD_PARAM_HARD_TX_QUEUE_ID "hard_tx_queue_id"
67 static const char *pmd_valid_args[] = {
69 PMD_PARAM_SOFT_TM_RATE,
70 PMD_PARAM_SOFT_TM_NB_QUEUES,
71 PMD_PARAM_SOFT_TM_QSIZE0,
72 PMD_PARAM_SOFT_TM_QSIZE1,
73 PMD_PARAM_SOFT_TM_QSIZE2,
74 PMD_PARAM_SOFT_TM_QSIZE3,
75 PMD_PARAM_SOFT_TM_ENQ_BSZ,
76 PMD_PARAM_SOFT_TM_DEQ_BSZ,
78 PMD_PARAM_HARD_TX_QUEUE_ID,
83 pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
84 struct rte_eth_dev_info *dev_info)
86 dev_info->max_rx_pktlen = UINT32_MAX;
87 dev_info->max_rx_queues = UINT16_MAX;
88 dev_info->max_tx_queues = UINT16_MAX;
92 pmd_dev_configure(struct rte_eth_dev *dev)
94 struct pmd_internals *p = dev->data->dev_private;
95 struct rte_eth_dev *hard_dev = DEV_HARD(p);
97 if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues)
100 if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues)
107 pmd_rx_queue_setup(struct rte_eth_dev *dev,
108 uint16_t rx_queue_id,
109 uint16_t nb_rx_desc __rte_unused,
110 unsigned int socket_id,
111 const struct rte_eth_rxconf *rx_conf __rte_unused,
112 struct rte_mempool *mb_pool __rte_unused)
114 struct pmd_internals *p = dev->data->dev_private;
116 if (p->params.soft.intrusive == 0) {
117 struct pmd_rx_queue *rxq;
119 rxq = rte_zmalloc_socket(p->params.soft.name,
120 sizeof(struct pmd_rx_queue), 0, socket_id);
124 rxq->hard.port_id = p->hard.port_id;
125 rxq->hard.rx_queue_id = rx_queue_id;
126 dev->data->rx_queues[rx_queue_id] = rxq;
128 struct rte_eth_dev *hard_dev = DEV_HARD(p);
129 void *rxq = hard_dev->data->rx_queues[rx_queue_id];
134 dev->data->rx_queues[rx_queue_id] = rxq;
140 pmd_tx_queue_setup(struct rte_eth_dev *dev,
141 uint16_t tx_queue_id,
143 unsigned int socket_id,
144 const struct rte_eth_txconf *tx_conf __rte_unused)
146 uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4;
150 snprintf(name, sizeof(name), "%s_txq%04x",
151 dev->data->name, tx_queue_id);
152 r = rte_ring_create(name, nb_tx_desc, socket_id,
153 RING_F_SP_ENQ | RING_F_SC_DEQ);
157 dev->data->tx_queues[tx_queue_id] = r;
162 pmd_dev_start(struct rte_eth_dev *dev)
164 struct pmd_internals *p = dev->data->dev_private;
167 int status = tm_start(p);
173 dev->data->dev_link.link_status = ETH_LINK_UP;
175 if (p->params.soft.intrusive) {
176 struct rte_eth_dev *hard_dev = DEV_HARD(p);
178 /* The hard_dev->rx_pkt_burst should be stable by now */
179 dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
186 pmd_dev_stop(struct rte_eth_dev *dev)
188 struct pmd_internals *p = dev->data->dev_private;
190 dev->data->dev_link.link_status = ETH_LINK_DOWN;
197 pmd_dev_close(struct rte_eth_dev *dev)
202 for (i = 0; i < dev->data->nb_tx_queues; i++)
203 rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]);
207 pmd_link_update(struct rte_eth_dev *dev __rte_unused,
208 int wait_to_complete __rte_unused)
214 pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg)
216 *(const struct rte_tm_ops **)arg =
217 (tm_enabled(dev)) ? &pmd_tm_ops : NULL;
222 static const struct eth_dev_ops pmd_ops = {
223 .dev_configure = pmd_dev_configure,
224 .dev_start = pmd_dev_start,
225 .dev_stop = pmd_dev_stop,
226 .dev_close = pmd_dev_close,
227 .link_update = pmd_link_update,
228 .dev_infos_get = pmd_dev_infos_get,
229 .rx_queue_setup = pmd_rx_queue_setup,
230 .tx_queue_setup = pmd_tx_queue_setup,
231 .tm_ops_get = pmd_tm_ops_get,
235 pmd_rx_pkt_burst(void *rxq,
236 struct rte_mbuf **rx_pkts,
239 struct pmd_rx_queue *rx_queue = rxq;
241 return rte_eth_rx_burst(rx_queue->hard.port_id,
242 rx_queue->hard.rx_queue_id,
248 pmd_tx_pkt_burst(void *txq,
249 struct rte_mbuf **tx_pkts,
252 return (uint16_t)rte_ring_enqueue_burst(txq,
258 static __rte_always_inline int
259 run_default(struct rte_eth_dev *dev)
261 struct pmd_internals *p = dev->data->dev_private;
263 /* Persistent context: Read Only (update not required) */
264 struct rte_mbuf **pkts = p->soft.def.pkts;
265 uint16_t nb_tx_queues = dev->data->nb_tx_queues;
267 /* Persistent context: Read - Write (update required) */
268 uint32_t txq_pos = p->soft.def.txq_pos;
269 uint32_t pkts_len = p->soft.def.pkts_len;
270 uint32_t flush_count = p->soft.def.flush_count;
272 /* Not part of the persistent context */
276 /* Soft device TXQ read, Hard device TXQ write */
277 for (i = 0; i < nb_tx_queues; i++) {
278 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
280 /* Read soft device TXQ burst to packet enqueue buffer */
281 pkts_len += rte_ring_sc_dequeue_burst(txq,
282 (void **)&pkts[pkts_len],
286 /* Increment soft device TXQ */
288 if (txq_pos >= nb_tx_queues)
291 /* Hard device TXQ write when complete burst is available */
292 if (pkts_len >= DEFAULT_BURST_SIZE) {
293 for (pos = 0; pos < pkts_len; )
294 pos += rte_eth_tx_burst(p->hard.port_id,
295 p->params.hard.tx_queue_id,
297 (uint16_t)(pkts_len - pos));
305 if (flush_count >= FLUSH_COUNT_THRESHOLD) {
306 for (pos = 0; pos < pkts_len; )
307 pos += rte_eth_tx_burst(p->hard.port_id,
308 p->params.hard.tx_queue_id,
310 (uint16_t)(pkts_len - pos));
316 p->soft.def.txq_pos = txq_pos;
317 p->soft.def.pkts_len = pkts_len;
318 p->soft.def.flush_count = flush_count + 1;
323 static __rte_always_inline int
324 run_tm(struct rte_eth_dev *dev)
326 struct pmd_internals *p = dev->data->dev_private;
328 /* Persistent context: Read Only (update not required) */
329 struct rte_sched_port *sched = p->soft.tm.sched;
330 struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
331 struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
332 uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
333 uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
334 uint16_t nb_tx_queues = dev->data->nb_tx_queues;
336 /* Persistent context: Read - Write (update required) */
337 uint32_t txq_pos = p->soft.tm.txq_pos;
338 uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
339 uint32_t flush_count = p->soft.tm.flush_count;
341 /* Not part of the persistent context */
342 uint32_t pkts_deq_len, pos;
345 /* Soft device TXQ read, TM enqueue */
346 for (i = 0; i < nb_tx_queues; i++) {
347 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
349 /* Read TXQ burst to packet enqueue buffer */
350 pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
351 (void **)&pkts_enq[pkts_enq_len],
357 if (txq_pos >= nb_tx_queues)
360 /* TM enqueue when complete burst is available */
361 if (pkts_enq_len >= enq_bsz) {
362 rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
370 if (flush_count >= FLUSH_COUNT_THRESHOLD) {
372 rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
378 p->soft.tm.txq_pos = txq_pos;
379 p->soft.tm.pkts_enq_len = pkts_enq_len;
380 p->soft.tm.flush_count = flush_count + 1;
382 /* TM dequeue, Hard device TXQ write */
383 pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
385 for (pos = 0; pos < pkts_deq_len; )
386 pos += rte_eth_tx_burst(p->hard.port_id,
387 p->params.hard.tx_queue_id,
389 (uint16_t)(pkts_deq_len - pos));
395 rte_pmd_softnic_run(uint16_t port_id)
397 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
399 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
400 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
403 return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
406 static struct ether_addr eth_addr = { .addr_bytes = {0} };
409 eth_dev_speed_max_mbps(uint32_t speed_capa)
411 uint32_t rate_mbps[32] = {
429 uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0;
430 return rate_mbps[pos];
434 default_init(struct pmd_internals *p,
435 struct pmd_params *params,
438 p->soft.def.pkts = rte_zmalloc_socket(params->soft.name,
439 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
443 if (p->soft.def.pkts == NULL)
450 default_free(struct pmd_internals *p)
452 rte_free(p->soft.def.pkts);
456 pmd_init(struct pmd_params *params, int numa_node)
458 struct pmd_internals *p;
461 p = rte_zmalloc_socket(params->soft.name,
462 sizeof(struct pmd_internals),
468 memcpy(&p->params, params, sizeof(p->params));
469 rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id);
472 status = default_init(p, params, numa_node);
474 free(p->params.hard.name);
479 /* Traffic Management (TM)*/
480 if (params->soft.flags & PMD_FEATURE_TM) {
481 status = tm_init(p, params, numa_node);
484 free(p->params.hard.name);
494 pmd_free(struct pmd_internals *p)
496 if (p->params.soft.flags & PMD_FEATURE_TM)
501 free(p->params.hard.name);
506 pmd_ethdev_register(struct rte_vdev_device *vdev,
507 struct pmd_params *params,
510 struct rte_eth_dev_info hard_info;
511 struct rte_eth_dev *soft_dev;
514 uint16_t hard_port_id;
516 rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id);
517 rte_eth_dev_info_get(hard_port_id, &hard_info);
518 hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
519 numa_node = rte_eth_dev_socket_id(hard_port_id);
521 /* Ethdev entry allocation */
522 soft_dev = rte_eth_dev_allocate(params->soft.name);
527 soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
528 NULL : /* set up later */
530 soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
531 soft_dev->tx_pkt_prepare = NULL;
532 soft_dev->dev_ops = &pmd_ops;
533 soft_dev->device = &vdev->device;
536 soft_dev->data->dev_private = dev_private;
537 soft_dev->data->dev_link.link_speed = hard_speed;
538 soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
539 soft_dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
540 soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
541 soft_dev->data->mac_addrs = ð_addr;
542 soft_dev->data->promiscuous = 1;
543 soft_dev->data->kdrv = RTE_KDRV_NONE;
544 soft_dev->data->numa_node = numa_node;
550 get_string(const char *key __rte_unused, const char *value, void *extra_args)
552 if (!value || !extra_args)
555 *(char **)extra_args = strdup(value);
557 if (!*(char **)extra_args)
564 get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
566 if (!value || !extra_args)
569 *(uint32_t *)extra_args = strtoull(value, NULL, 0);
575 pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
577 struct rte_kvargs *kvlist;
580 kvlist = rte_kvargs_parse(params, pmd_valid_args);
584 /* Set default values */
585 memset(p, 0, sizeof(*p));
587 p->soft.intrusive = INTRUSIVE;
589 p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
590 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
591 p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
592 p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
593 p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
594 p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
596 /* SOFT: TM (optional) */
597 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
600 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
605 if (strcmp(s, "on") == 0)
606 p->soft.flags |= PMD_FEATURE_TM;
607 else if (strcmp(s, "off") == 0)
608 p->soft.flags &= ~PMD_FEATURE_TM;
617 /* SOFT: TM rate (measured in bytes/second) (optional) */
618 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
619 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
620 &get_uint32, &p->soft.tm.rate);
624 p->soft.flags |= PMD_FEATURE_TM;
627 /* SOFT: TM number of queues (optional) */
628 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
629 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
630 &get_uint32, &p->soft.tm.nb_queues);
634 p->soft.flags |= PMD_FEATURE_TM;
637 /* SOFT: TM queue size 0 .. 3 (optional) */
638 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
641 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
642 &get_uint32, &qsize);
646 p->soft.tm.qsize[0] = (uint16_t)qsize;
647 p->soft.flags |= PMD_FEATURE_TM;
650 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
653 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
654 &get_uint32, &qsize);
658 p->soft.tm.qsize[1] = (uint16_t)qsize;
659 p->soft.flags |= PMD_FEATURE_TM;
662 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
665 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
666 &get_uint32, &qsize);
670 p->soft.tm.qsize[2] = (uint16_t)qsize;
671 p->soft.flags |= PMD_FEATURE_TM;
674 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
677 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
678 &get_uint32, &qsize);
682 p->soft.tm.qsize[3] = (uint16_t)qsize;
683 p->soft.flags |= PMD_FEATURE_TM;
686 /* SOFT: TM enqueue burst size (optional) */
687 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
688 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
689 &get_uint32, &p->soft.tm.enq_bsz);
693 p->soft.flags |= PMD_FEATURE_TM;
696 /* SOFT: TM dequeue burst size (optional) */
697 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
698 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
699 &get_uint32, &p->soft.tm.deq_bsz);
703 p->soft.flags |= PMD_FEATURE_TM;
706 /* HARD: name (mandatory) */
707 if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
708 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
709 &get_string, &p->hard.name);
717 /* HARD: tx_queue_id (optional) */
718 if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) {
719 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID,
720 &get_uint32, &p->hard.tx_queue_id);
726 rte_kvargs_free(kvlist);
731 pmd_probe(struct rte_vdev_device *vdev)
737 struct rte_eth_dev_info hard_info;
739 uint16_t hard_port_id;
744 "Probing device \"%s\"\n",
745 rte_vdev_device_name(vdev));
747 /* Parse input arguments */
748 params = rte_vdev_device_args(vdev);
752 status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params);
756 /* Check input arguments */
757 if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id))
760 rte_eth_dev_info_get(hard_port_id, &hard_info);
761 hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
762 numa_node = rte_eth_dev_socket_id(hard_port_id);
764 if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
767 if (p.soft.flags & PMD_FEATURE_TM) {
768 status = tm_params_check(&p, hard_speed);
774 /* Allocate and initialize soft ethdev private data */
775 dev_private = pmd_init(&p, numa_node);
776 if (dev_private == NULL)
779 /* Register soft ethdev */
781 "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n",
782 p.soft.name, p.hard.name);
784 status = pmd_ethdev_register(vdev, &p, dev_private);
786 pmd_free(dev_private);
794 pmd_remove(struct rte_vdev_device *vdev)
796 struct rte_eth_dev *dev = NULL;
797 struct pmd_internals *p;
802 RTE_LOG(INFO, PMD, "Removing device \"%s\"\n",
803 rte_vdev_device_name(vdev));
805 /* Find the ethdev entry */
806 dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
809 p = dev->data->dev_private;
811 /* Free device data structures*/
814 rte_eth_dev_release_port(dev);
819 static struct rte_vdev_driver pmd_softnic_drv = {
821 .remove = pmd_remove,
824 RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
825 RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
826 PMD_PARAM_SOFT_TM "=on|off "
827 PMD_PARAM_SOFT_TM_RATE "=<int> "
828 PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
829 PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
830 PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
831 PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
832 PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
833 PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
834 PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
835 PMD_PARAM_HARD_NAME "=<string> "
836 PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");