X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_ethdev.c;h=31614f3fd59852a90fe304e85f7fb80df274eccc;hb=f7a9461e29147c47ce2bb81bd157ac1833cf5eb1;hp=3c87b49eadd228ca7570d17b47c773d862616997;hpb=a41e6ff15809d40e0f9bbc9576bf8f7f80fbec1d;p=deb_dpdk.git diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index 3c87b49e..31614f3f 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include "vnic_intr.h" @@ -64,6 +65,8 @@ static const struct rte_pci_id pci_id_enic_map[] = { {.vendor_id = 0, /* sentinel */}, }; +#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite" + static int enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev, enum rte_filter_op filter_op, void *arg) @@ -95,10 +98,12 @@ enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev, break; case RTE_ETH_FILTER_FLUSH: - case RTE_ETH_FILTER_INFO: dev_warning(enic, "unsupported operation %u", filter_op); ret = -ENOTSUP; break; + case RTE_ETH_FILTER_INFO: + enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg); + break; default: dev_err(enic, "unknown operation %u", filter_op); ret = -EINVAL; @@ -142,9 +147,21 @@ static int enicpmd_dev_setup_intr(struct enic *enic) if (!enic->cq[index].ctrl) break; } - if (enic->cq_count != index) return 0; + for (index = 0; index < enic->wq_count; index++) { + if (!enic->wq[index].ctrl) + break; + } + if (enic->wq_count != index) + return 0; + /* check start of packet (SOP) RQs only in case scatter is disabled. */ + for (index = 0; index < enic->rq_count; index++) { + if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl) + break; + } + if (enic->rq_count != index) + return 0; ret = enic_alloc_intr_resources(enic); if (ret) { @@ -170,13 +187,7 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); - if (queue_idx >= ENIC_WQ_MAX) { - dev_err(enic, - "Max number of TX queues exceeded. Max is %d\n", - ENIC_WQ_MAX); - return -EINVAL; - } - + RTE_ASSERT(queue_idx < enic->conf_wq_count); eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx]; ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); @@ -196,7 +207,6 @@ static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev, ENICPMD_FUNC_TRACE(); enic_start_wq(enic, queue_idx); - eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; return 0; } @@ -212,8 +222,6 @@ static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, ret = enic_stop_wq(enic, queue_idx); if (ret) dev_err(enic, "error in stopping wq %d\n", queue_idx); - else - eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; return ret; } @@ -226,7 +234,6 @@ static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev, ENICPMD_FUNC_TRACE(); enic_start_rq(enic, queue_idx); - eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; return 0; } @@ -242,8 +249,6 @@ static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, ret = enic_stop_rq(enic, queue_idx); if (ret) dev_err(enic, "error in stopping rq %d\n", queue_idx); - else - eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; return ret; } @@ -254,6 +259,35 @@ static void enicpmd_dev_rx_queue_release(void *rxq) enic_free_rq(rxq); } +static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev, + uint16_t rx_queue_id) +{ + struct enic *enic = pmd_priv(dev); + uint32_t queue_count = 0; + struct vnic_cq *cq; + uint32_t cq_tail; + uint16_t cq_idx; + int rq_num; + + if (rx_queue_id >= dev->data->nb_rx_queues) { + dev_err(enic, "Invalid RX queue id=%d", rx_queue_id); + return 0; + } + + rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id); + cq = &enic->cq[enic_cq_rq(enic, rq_num)]; + cq_idx = cq->to_clean; + + cq_tail = ioread32(&cq->ctrl->cq_tail); + + if (cq_tail < cq_idx) + cq_tail += cq->ring.desc_count; + + queue_count = cq_tail - cq_idx; + + return queue_count; +} + static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc, @@ -265,29 +299,18 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); - /* With Rx scatter support, two RQs are now used on VIC per RQ used - * by the application. - */ - if (queue_idx * 2 >= ENIC_RQ_MAX) { - dev_err(enic, - "Max number of RX queues exceeded. Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n", - ENIC_RQ_MAX); - return -EINVAL; - } + RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count); eth_dev->data->rx_queues[queue_idx] = - (void *)&enic->rq[enic_sop_rq(queue_idx)]; + (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; - ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc); + ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc, + rx_conf->rx_free_thresh); if (ret) { dev_err(enic, "error in allocating rq\n"); return ret; } - enic->rq[queue_idx].rx_free_thresh = rx_conf->rx_free_thresh; - dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx, - enic->rq[queue_idx].rx_free_thresh); - return enicpmd_dev_setup_intr(enic); } @@ -350,6 +373,7 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) eth_dev->data->dev_conf.rxmode.split_hdr_size); } + enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK); enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum; return 0; } @@ -396,17 +420,9 @@ static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) { struct enic *enic = pmd_priv(eth_dev); - int ret; - int link_status = 0; ENICPMD_FUNC_TRACE(); - link_status = enic_get_link_status(enic); - ret = (link_status == enic->link_status); - enic->link_status = link_status; - eth_dev->data->dev_link.link_status = link_status; - eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); - return ret; + return enic_link_update(enic); } static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev, @@ -436,8 +452,7 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, device_info->max_rx_queues = enic->conf_rq_count / 2; device_info->max_tx_queues = enic->conf_wq_count; device_info->min_rx_bufsize = ENIC_MIN_MTU; - device_info->max_rx_pktlen = enic->rte_dev->data->mtu - + ETHER_HDR_LEN + 4; + device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4; device_info->max_mac_addrs = 1; device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | @@ -457,6 +472,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) { static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L2_ETHER_VLAN, RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, RTE_PTYPE_L4_TCP, @@ -561,7 +578,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .tx_queue_stop = enicpmd_dev_tx_queue_stop, .rx_queue_setup = enicpmd_dev_rx_queue_setup, .rx_queue_release = enicpmd_dev_rx_queue_release, - .rx_queue_count = NULL, + .rx_queue_count = enicpmd_dev_rx_queue_count, .rx_descriptor_done = NULL, .tx_queue_setup = enicpmd_dev_tx_queue_setup, .tx_queue_release = enicpmd_dev_tx_queue_release, @@ -575,7 +592,64 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .filter_ctrl = enicpmd_dev_filter_ctrl, }; -struct enic *enicpmd_list_head = NULL; +static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct enic *enic; + + enic = (struct enic *)opaque; + if (strcmp(value, "trunk") == 0) { + /* Trunk mode: always tag */ + enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK; + } else if (strcmp(value, "untag") == 0) { + /* Untag default VLAN mode: untag if VLAN = default VLAN */ + enic->ig_vlan_rewrite_mode = + IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN; + } else if (strcmp(value, "priority") == 0) { + /* + * Priority-tag default VLAN mode: priority tag (VLAN header + * with ID=0) if VLAN = default + */ + enic->ig_vlan_rewrite_mode = + IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN; + } else if (strcmp(value, "pass") == 0) { + /* Pass through mode: do not touch tags */ + enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; + } else { + dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE + ": expected=trunk|untag|priority|pass given=%s\n", + value); + return -EINVAL; + } + return 0; +} + +static int enic_check_devargs(struct rte_eth_dev *dev) +{ + static const char *valid_keys[] = { + ENIC_DEVARG_IG_VLAN_REWRITE, + NULL}; + struct enic *enic = pmd_priv(dev); + struct rte_kvargs *kvlist; + + ENICPMD_FUNC_TRACE(); + + enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; + if (!dev->pci_dev->device.devargs) + return 0; + kvlist = rte_kvargs_parse(dev->pci_dev->device.devargs->args, valid_keys); + if (!kvlist) + return -EINVAL; + if (rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE, + enic_parse_ig_vlan_rewrite, enic) < 0) { + rte_kvargs_free(kvlist); + return -EINVAL; + } + rte_kvargs_free(kvlist); + return 0; +} + /* Initialize the driver * It returns 0 on success. */ @@ -584,6 +658,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pdev; struct rte_pci_addr *addr; struct enic *enic = pmd_priv(eth_dev); + int err; ENICPMD_FUNC_TRACE(); @@ -601,37 +676,24 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x", addr->domain, addr->bus, addr->devid, addr->function); + err = enic_check_devargs(eth_dev); + if (err) + return err; return enic_probe(enic); } static struct eth_driver rte_enic_pmd = { .pci_drv = { - .name = "rte_enic_pmd", .id_table = pci_id_enic_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = eth_enicpmd_dev_init, .dev_private_size = sizeof(struct enic), }; -/* Driver initialization routine. - * Invoked once at EAL init time. - * Register as the [Poll Mode] Driver of Cisco ENIC device. - */ -static int -rte_enic_pmd_init(__rte_unused const char *name, - __rte_unused const char *params) -{ - ENICPMD_FUNC_TRACE(); - - rte_eth_driver_register(&rte_enic_pmd); - return 0; -} - -static struct rte_driver rte_enic_driver = { - .type = PMD_PDEV, - .init = rte_enic_pmd_init, -}; - -PMD_REGISTER_DRIVER(rte_enic_driver, enic); -DRIVER_REGISTER_PCI_TABLE(enic, pci_id_enic_map); +RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map); +RTE_PMD_REGISTER_PARAM_STRING(net_enic, + ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");