X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Frdma%2Fdevice.c;h=c91567445bf5e0e4dc997505c9a74be2bb632627;hb=f916414b3;hp=b6609ca4bdcee30a919aff0ce339d73890b1257f;hpb=5763e47b7d0d9de7a46541e432abee39e17b5f1e;p=vpp.git diff --git a/src/plugins/rdma/device.c b/src/plugins/rdma/device.c index b6609ca4bdc..c91567445bf 100644 --- a/src/plugins/rdma/device.c +++ b/src/plugins/rdma/device.c @@ -45,20 +45,155 @@ static u8 rdma_rss_hash_key[] = { rdma_main_t rdma_main; -#define rdma_log_debug(dev, f, ...) \ -{ \ - vlib_log(VLIB_LOG_LEVEL_DEBUG, rdma_main.log_class, "%U: " f, \ - format_vlib_pci_addr, &rd->pci_addr, ##__VA_ARGS__); \ -}; +#define rdma_log__(lvl, dev, f, ...) \ + do { \ + vlib_log((lvl), rdma_main.log_class, "%s: " f, \ + &(dev)->name, ##__VA_ARGS__); \ + } while (0) + +#define rdma_log(lvl, dev, f, ...) \ + rdma_log__((lvl), (dev), "%s (%d): " f, strerror(errno), errno, ##__VA_ARGS__) + +static struct ibv_flow * +rdma_rxq_init_flow (const rdma_device_t * rd, struct ibv_qp *qp, + const mac_address_t * mac, const mac_address_t * mask, + u32 flags) +{ + struct ibv_flow *flow; + struct raw_eth_flow_attr + { + struct ibv_flow_attr attr; + struct ibv_flow_spec_eth spec_eth; + } __attribute__ ((packed)) fa; + + memset (&fa, 0, sizeof (fa)); + fa.attr.num_of_specs = 1; + fa.attr.port = 1; + fa.attr.flags = flags; + fa.spec_eth.type = IBV_FLOW_SPEC_ETH; + fa.spec_eth.size = sizeof (struct ibv_flow_spec_eth); + + memcpy (fa.spec_eth.val.dst_mac, mac, sizeof (fa.spec_eth.val.dst_mac)); + memcpy (fa.spec_eth.mask.dst_mac, mask, sizeof (fa.spec_eth.mask.dst_mac)); + + flow = ibv_create_flow (qp, &fa.attr); + if (!flow) + rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_create_flow() failed"); + return flow; +} static u32 -rdma_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags) +rdma_rxq_destroy_flow (const rdma_device_t * rd, struct ibv_flow **flow) +{ + if (!*flow) + return 0; + + if (ibv_destroy_flow (*flow)) + { + rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_destroy_flow() failed"); + return ~0; + } + + *flow = 0; + return 0; +} + +static u32 +rdma_dev_set_promisc (rdma_device_t * rd) +{ + const mac_address_t all = {.bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0} }; + int err; + + err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast); + if (err) + return ~0; + + err = rdma_rxq_destroy_flow (rd, &rd->flow_ucast); + if (err) + return ~0; + + rd->flow_ucast = rdma_rxq_init_flow (rd, rd->rx_qp, &all, &all, 0); + if (!rd->flow_ucast) + return ~0; + + rd->flags |= RDMA_DEVICE_F_PROMISC; + return 0; +} + +static u32 +rdma_dev_set_ucast (rdma_device_t * rd) +{ + const mac_address_t ucast = {.bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + }; + const mac_address_t mcast = {.bytes = {0x1, 0x0, 0x0, 0x0, 0x0, 0x0} }; + int err; + + err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast); + if (err) + return ~0; + + err = rdma_rxq_destroy_flow (rd, &rd->flow_ucast); + if (err) + return ~0; + + /* receive only packets with src = our MAC */ + rd->flow_ucast = rdma_rxq_init_flow (rd, rd->rx_qp, &rd->hwaddr, &ucast, 0); + if (!rd->flow_ucast) + return ~0; + + /* receive multicast packets */ + rd->flow_mcast = rdma_rxq_init_flow (rd, rd->rx_qp, &mcast, &mcast, + IBV_FLOW_ATTR_FLAGS_DONT_TRAP + /* let others receive mcast packet too (eg. Linux) */ + ); + if (!rd->flow_mcast) + return ~0; + + rd->flags &= ~RDMA_DEVICE_F_PROMISC; + return 0; +} + +static clib_error_t * +rdma_mac_change (vnet_hw_interface_t * hw, const u8 * old, const u8 * new) { rdma_main_t *rm = &rdma_main; - vlib_log_warn (rm->log_class, "TODO"); + rdma_device_t *rd = vec_elt_at_index (rm->devices, hw->dev_instance); + mac_address_from_bytes (&rd->hwaddr, new); + if (!(rd->flags & RDMA_DEVICE_F_PROMISC) && rdma_dev_set_ucast (rd)) + { + mac_address_from_bytes (&rd->hwaddr, old); + return clib_error_return_unix (0, "MAC update failed"); + } return 0; } +static u32 +rdma_dev_change_mtu (rdma_device_t * rd) +{ + rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "MTU change not supported"); + return ~0; +} + +static u32 +rdma_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags) +{ + rdma_main_t *rm = &rdma_main; + rdma_device_t *rd = vec_elt_at_index (rm->devices, hw->dev_instance); + + switch (flags) + { + case ETHERNET_INTERFACE_FLAG_DEFAULT_L3: + return rdma_dev_set_ucast (rd); + case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL: + return rdma_dev_set_promisc (rd); + case ETHERNET_INTERFACE_FLAG_MTU: + return rdma_dev_change_mtu (rd); + } + + rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unknown flag %x requested", flags); + return ~0; +} + static void rdma_update_state (vnet_main_t * vnm, rdma_device_t * rd, int port) { @@ -131,8 +266,7 @@ rdma_async_event_error_ready (clib_file_t * f) { rdma_main_t *rm = &rdma_main; rdma_device_t *rd = vec_elt_at_index (rm->devices, f->private_data); - return clib_error_return (0, "RDMA async event error for device %U", - format_vlib_pci_addr, &rd->pci_addr); + return clib_error_return (0, "RDMA: %s: async event error", rd->name); } static clib_error_t * @@ -145,9 +279,7 @@ rdma_async_event_read_ready (clib_file_t * f) struct ibv_async_event event; ret = ibv_get_async_event (rd->ctx, &event); if (ret < 0) - { - return clib_error_return_unix (0, "ibv_get_async_event() failed"); - } + return clib_error_return_unix (0, "ibv_get_async_event() failed"); switch (event.event_type) { @@ -160,13 +292,11 @@ rdma_async_event_read_ready (clib_file_t * f) case IBV_EVENT_DEVICE_FATAL: rd->flags &= ~RDMA_DEVICE_F_LINK_UP; vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0); - vlib_log_emerg (rm->log_class, "Fatal RDMA error for device %U", - format_vlib_pci_addr, &rd->pci_addr); + vlib_log_emerg (rm->log_class, "%s: fatal error", rd->name); break; default: - vlib_log_warn (rm->log_class, - "Unhandeld RDMA async event %i for device %U", - event.event_type, format_vlib_pci_addr, &rd->pci_addr); + rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unhandeld RDMA async event %i", + event.event_type); break; } @@ -183,25 +313,20 @@ rdma_async_event_init (rdma_device_t * rd) /* make RDMA async event fd non-blocking */ ret = fcntl (rd->ctx->async_fd, F_GETFL); if (ret < 0) - { - return clib_error_return_unix (0, "fcntl(F_GETFL) failed"); - } + return clib_error_return_unix (0, "fcntl(F_GETFL) failed"); + ret = fcntl (rd->ctx->async_fd, F_SETFL, ret | O_NONBLOCK); if (ret < 0) - { - return clib_error_return_unix (0, "fcntl(F_SETFL, O_NONBLOCK) failed"); - } + return clib_error_return_unix (0, "fcntl(F_SETFL, O_NONBLOCK) failed"); /* register RDMA async event fd */ t.read_function = rdma_async_event_read_ready; t.file_descriptor = rd->ctx->async_fd; t.error_function = rdma_async_event_error_ready; t.private_data = rd->dev_instance; - t.description = - format (0, "RMDA %U async event", format_vlib_pci_addr, &rd->pci_addr); + t.description = format (0, "%v async event", rd->name); rd->async_event_clib_file_index = clib_file_add (&file_main, &t); - return 0; } @@ -214,9 +339,18 @@ rdma_async_event_cleanup (rdma_device_t * rd) static clib_error_t * rdma_register_interface (vnet_main_t * vnm, rdma_device_t * rd) { - return ethernet_register_interface (vnm, rdma_device_class.index, - rd->dev_instance, rd->hwaddr.bytes, - &rd->hw_if_index, rdma_flag_change); + clib_error_t *err = + ethernet_register_interface (vnm, rdma_device_class.index, + rd->dev_instance, rd->hwaddr.bytes, + &rd->hw_if_index, rdma_flag_change); + + /* Indicate ability to support L3 DMAC filtering and + * initialize interface to L3 non-promisc mode */ + vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rd->hw_if_index); + hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER; + ethernet_set_flags (vnm, rd->hw_if_index, + ETHERNET_INTERFACE_FLAG_DEFAULT_L3); + return err; } static void @@ -238,7 +372,7 @@ rdma_dev_cleanup (rdma_device_t * rd) { \ int rv; \ if ((rv = fn (arg))) \ - rdma_log_debug (rd, #fn "() failed (rv = %d)", rv); \ + rdma_log (VLIB_LOG_LEVEL_DEBUG, rd, #fn "() failed (rv = %d)", rv); \ } _(ibv_destroy_flow, rd->flow_mcast); @@ -265,49 +399,41 @@ rdma_dev_cleanup (rdma_device_t * rd) vec_free (rd->rxqs); vec_free (rd->txqs); vec_free (rd->name); + vlib_pci_free_device_info (rd->pci); pool_put (rm->devices, rd); } -static clib_error_t * -rdma_rxq_init_flow (struct ibv_flow **flow, struct ibv_qp *qp, - const mac_address_t * mac, const mac_address_t * mask, - u32 flags) -{ - struct raw_eth_flow_attr - { - struct ibv_flow_attr attr; - struct ibv_flow_spec_eth spec_eth; - } __attribute__ ((packed)) fa; - - memset (&fa, 0, sizeof (fa)); - fa.attr.num_of_specs = 1; - fa.attr.port = 1; - fa.attr.flags = flags; - fa.spec_eth.type = IBV_FLOW_SPEC_ETH; - fa.spec_eth.size = sizeof (struct ibv_flow_spec_eth); - - memcpy (fa.spec_eth.val.dst_mac, mac, sizeof (fa.spec_eth.val.dst_mac)); - memcpy (fa.spec_eth.mask.dst_mac, mask, sizeof (fa.spec_eth.mask.dst_mac)); - - if ((*flow = ibv_create_flow (qp, &fa.attr)) == 0) - return clib_error_return_unix (0, "create Flow Failed"); - - return 0; -} - static clib_error_t * rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc) { rdma_rxq_t *rxq; struct ibv_wq_init_attr wqia; + struct ibv_cq_init_attr_ex cqa = { }; struct ibv_wq_attr wqa; + struct ibv_cq_ex *cqex; vec_validate_aligned (rd->rxqs, qid, CLIB_CACHE_LINE_BYTES); rxq = vec_elt_at_index (rd->rxqs, qid); rxq->size = n_desc; + vec_validate_aligned (rxq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES); - if ((rxq->cq = ibv_create_cq (rd->ctx, n_desc, NULL, NULL, 0)) == 0) - return clib_error_return_unix (0, "Create CQ Failed"); + cqa.cqe = n_desc; + if (rd->flags & RDMA_DEVICE_F_MLX5DV) + { + struct mlx5dv_cq_init_attr dvcq = { }; + dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; + dvcq.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; + + if ((cqex = mlx5dv_create_cq (rd->ctx, &cqa, &dvcq)) == 0) + return clib_error_return_unix (0, "Create mlx5dv rx CQ Failed"); + } + else + { + if ((cqex = ibv_create_cq_ex (rd->ctx, &cqa)) == 0) + return clib_error_return_unix (0, "Create CQ Failed"); + } + + rxq->cq = ibv_cq_ex_to_cq (cqex); memset (&wqia, 0, sizeof (wqia)); wqia.wq_type = IBV_WQT_RQ; @@ -324,6 +450,44 @@ rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc) if (ibv_modify_wq (rxq->wq, &wqa) != 0) return clib_error_return_unix (0, "Modify WQ (RDY) Failed"); + if (rd->flags & RDMA_DEVICE_F_MLX5DV) + { + struct mlx5dv_obj obj = { }; + struct mlx5dv_cq dv_cq; + struct mlx5dv_rwq dv_rwq; + u64 qw0; + + obj.cq.in = rxq->cq; + obj.cq.out = &dv_cq; + obj.rwq.in = rxq->wq; + obj.rwq.out = &dv_rwq; + + if ((mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ))) + return clib_error_return_unix (0, "mlx5dv: failed to init rx obj"); + + if (dv_cq.cqe_size != sizeof (mlx5dv_cqe_t)) + return clib_error_return_unix (0, "mlx5dv: incompatible rx CQE size"); + + rxq->log2_cq_size = max_log2 (dv_cq.cqe_cnt); + rxq->cqes = (mlx5dv_cqe_t *) dv_cq.buf; + rxq->cq_db = (volatile u32 *) dv_cq.dbrec; + rxq->cqn = dv_cq.cqn; + + rxq->wqes = (mlx5dv_rwq_t *) dv_rwq.buf; + rxq->wq_db = (volatile u32 *) dv_rwq.dbrec; + rxq->wq_stride = dv_rwq.stride; + rxq->wqe_cnt = dv_rwq.wqe_cnt; + + qw0 = clib_host_to_net_u32 (vlib_buffer_get_default_data_size (vm)); + qw0 |= (u64) clib_host_to_net_u32 (rd->lkey) << 32; + + for (int i = 0; i < rxq->size; i++) + rxq->wqes[i].dsz_and_lkey = qw0; + + for (int i = 0; i < (1 << rxq->log2_cq_size); i++) + rxq->cqes[i].opcode_cqefmt_se_owner = 0xff; + } + return 0; } @@ -332,11 +496,7 @@ rdma_rxq_finalize (vlib_main_t * vm, rdma_device_t * rd) { struct ibv_rwq_ind_table_init_attr rwqia; struct ibv_qp_init_attr_ex qpia; - const mac_address_t ucast = {.bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - }; - const mac_address_t mcast = {.bytes = {0x1, 0x0, 0x0, 0x0, 0x0, 0x0} }; struct ibv_wq **ind_tbl; - clib_error_t *err; u32 i; ASSERT (is_pow2 (vec_len (rd->rxqs)) @@ -368,16 +528,10 @@ rdma_rxq_finalize (vlib_main_t * vm, rdma_device_t * rd) if ((rd->rx_qp = ibv_create_qp_ex (rd->ctx, &qpia)) == 0) return clib_error_return_unix (0, "Queue Pair create failed"); - /* receive only packets with src = our MAC */ - if ((err = - rdma_rxq_init_flow (&rd->flow_ucast, rd->rx_qp, &rd->hwaddr, &ucast, - 0)) != 0) - return err; - /* receive multicast packets */ - return rdma_rxq_init_flow (&rd->flow_mcast, rd->rx_qp, &mcast, &mcast, - IBV_FLOW_ATTR_FLAGS_DONT_TRAP - /* let others receive mcast packet too (eg. Linux) */ - ); + if (rdma_dev_set_ucast (rd)) + return clib_error_return_unix (0, "Set unicast mode failed"); + + return 0; } static clib_error_t * @@ -390,7 +544,9 @@ rdma_txq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc) vec_validate_aligned (rd->txqs, qid, CLIB_CACHE_LINE_BYTES); txq = vec_elt_at_index (rd->txqs, qid); - txq->size = n_desc; + ASSERT (is_pow2 (n_desc)); + txq->bufs_log2sz = min_log2 (n_desc); + vec_validate_aligned (txq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES); if ((txq->cq = ibv_create_cq (rd->ctx, n_desc, NULL, NULL, 0)) == 0) return clib_error_return_unix (0, "Create CQ Failed"); @@ -401,7 +557,6 @@ rdma_txq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc) qpia.cap.max_send_wr = n_desc; qpia.cap.max_send_sge = 1; qpia.qp_type = IBV_QPT_RAW_PACKET; - qpia.sq_sig_all = 1; if ((txq->qp = ibv_create_qp (rd->pd, &qpia)) == 0) return clib_error_return_unix (0, "Queue Pair create failed"); @@ -424,6 +579,57 @@ rdma_txq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc) qpa.qp_state = IBV_QPS_RTS; if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0) return clib_error_return_unix (0, "Modify QP (send) Failed"); + + txq->ibv_cq = txq->cq; + txq->ibv_qp = txq->qp; + + if (rd->flags & RDMA_DEVICE_F_MLX5DV) + { + rdma_mlx5_wqe_t *tmpl = (void *) txq->dv_wqe_tmpl; + struct mlx5dv_cq dv_cq; + struct mlx5dv_qp dv_qp; + struct mlx5dv_obj obj = { }; + + obj.cq.in = txq->cq; + obj.cq.out = &dv_cq; + obj.qp.in = txq->qp; + obj.qp.out = &dv_qp; + + if (mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP)) + return clib_error_return_unix (0, "DV init obj failed"); + + if (RDMA_TXQ_BUF_SZ (txq) > dv_qp.sq.wqe_cnt + || !is_pow2 (dv_qp.sq.wqe_cnt) + || sizeof (rdma_mlx5_wqe_t) != dv_qp.sq.stride + || (uword) dv_qp.sq.buf % sizeof (rdma_mlx5_wqe_t)) + return clib_error_return (0, "Unsupported DV SQ parameters"); + + if (RDMA_TXQ_BUF_SZ (txq) > dv_cq.cqe_cnt + || !is_pow2 (dv_cq.cqe_cnt) + || sizeof (struct mlx5_cqe64) != dv_cq.cqe_size + || (uword) dv_cq.buf % sizeof (struct mlx5_cqe64)) + return clib_error_return (0, "Unsupported DV CQ parameters"); + + /* get SQ and doorbell addresses */ + txq->dv_sq_wqes = dv_qp.sq.buf; + txq->dv_sq_dbrec = dv_qp.dbrec; + txq->dv_sq_db = dv_qp.bf.reg; + txq->dv_sq_log2sz = min_log2 (dv_qp.sq.wqe_cnt); + + /* get CQ and doorbell addresses */ + txq->dv_cq_cqes = dv_cq.buf; + txq->dv_cq_dbrec = dv_cq.dbrec; + txq->dv_cq_log2sz = min_log2 (dv_cq.cqe_cnt); + + /* init tx desc template */ + STATIC_ASSERT_SIZEOF (txq->dv_wqe_tmpl, sizeof (*tmpl)); + mlx5dv_set_ctrl_seg (&tmpl->ctrl, 0, MLX5_OPCODE_SEND, 0, + txq->qp->qp_num, 0, RDMA_MLX5_WQE_DS, 0, + RDMA_TXQ_DV_INVALID_ID); + tmpl->eseg.inline_hdr_sz = htobe16 (MLX5_ETH_L2_INLINE_HEADER_SIZE); + mlx5dv_set_data_seg (&tmpl->dseg, 0, rd->lkey, 0); + } + return 0; } @@ -442,22 +648,35 @@ rdma_dev_init (vlib_main_t * vm, rdma_device_t * rd, u32 rxq_size, if ((rd->pd = ibv_alloc_pd (rd->ctx)) == 0) return clib_error_return_unix (0, "PD Alloc Failed"); - ethernet_mac_address_generate (rd->hwaddr.bytes); + if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start, + bm->buffer_mem_size, + IBV_ACCESS_LOCAL_WRITE)) == 0) + return clib_error_return_unix (0, "Register MR Failed"); - for (i = 0; i < rxq_num; i++) - if ((err = rdma_rxq_init (vm, rd, i, rxq_size))) - return err; - if ((err = rdma_rxq_finalize (vm, rd))) - return err; + rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */ - for (i = 0; i < tm->n_vlib_mains; i++) - if ((err = rdma_txq_init (vm, rd, i, txq_size))) - return err; + ethernet_mac_address_generate (rd->hwaddr.bytes); if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start, bm->buffer_mem_size, IBV_ACCESS_LOCAL_WRITE)) == 0) return clib_error_return_unix (0, "Register MR Failed"); + rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */ + + /* + * /!\ WARNING /!\ creation order is important + * We *must* create TX queues *before* RX queues, otherwise we will receive + * the broacast packets we sent + */ + for (i = 0; i < tm->n_vlib_mains; i++) + if ((err = rdma_txq_init (vm, rd, i, txq_size))) + return err; + + for (i = 0; i < rxq_num; i++) + if ((err = rdma_rxq_init (vm, rd, i, rxq_size))) + return err; + if ((err = rdma_rxq_finalize (vm, rd))) + return err; return 0; } @@ -470,6 +689,9 @@ sysfs_path_to_pci_addr (char *path, vlib_pci_addr_t * addr) u8 *s; s = clib_sysfs_link_to_name (path); + if (!s) + return 0; + unformat_init_string (&in, (char *) s, strlen ((char *) s)); rv = unformat (&in, "%U", unformat_vlib_pci_addr, addr); unformat_free (&in); @@ -482,14 +704,16 @@ rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args) { vnet_main_t *vnm = vnet_get_main (); rdma_main_t *rm = &rdma_main; - rdma_device_t *rd = 0; - struct ibv_device **dev_list = 0; + rdma_device_t *rd; + vlib_pci_addr_t pci_addr; + struct ibv_device **dev_list; int n_devs; - u8 *s = 0, *s2 = 0; + u8 *s; u16 qid; + int i; - args->rxq_size = args->rxq_size ? args->rxq_size : 2 * VLIB_FRAME_SIZE; - args->txq_size = args->txq_size ? args->txq_size : 2 * VLIB_FRAME_SIZE; + args->rxq_size = args->rxq_size ? args->rxq_size : 1024; + args->txq_size = args->txq_size ? args->txq_size : 1024; args->rxq_num = args->rxq_num ? args->rxq_num : 1; if (!is_pow2 (args->rxq_num)) @@ -497,54 +721,67 @@ rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args) args->rv = VNET_API_ERROR_INVALID_VALUE; args->error = clib_error_return (0, "rx queue number must be a power of two"); - return; + goto err0; } - if (!is_pow2 (args->rxq_size) || !is_pow2 (args->txq_size)) + if (args->rxq_size < VLIB_FRAME_SIZE || args->txq_size < VLIB_FRAME_SIZE || + args->rxq_size > 65535 || args->txq_size > 65535 || + !is_pow2 (args->rxq_size) || !is_pow2 (args->txq_size)) { args->rv = VNET_API_ERROR_INVALID_VALUE; - args->error = - clib_error_return (0, "queue size must be a power of two"); - return; + args->error = clib_error_return (0, "queue size must be a power of two " + "between %i and 65535", + VLIB_FRAME_SIZE); + goto err0; } - pool_get_zero (rm->devices, rd); - rd->dev_instance = rd - rm->devices; - rd->per_interface_next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; - rd->name = vec_dup (args->name); - - /* check if device exist and if it is bound to mlx5_core */ - s = format (s, "/sys/class/net/%s/device/driver/module%c", args->ifname, 0); - s2 = clib_sysfs_link_to_name ((char *) s); - - if (s2 == 0 || strncmp ((char *) s2, "mlx5_core", 9) != 0) + dev_list = ibv_get_device_list (&n_devs); + if (n_devs == 0) { args->error = - clib_error_return (0, - "invalid interface (only mlx5 supported for now)"); + clib_error_return_unix (0, + "no RDMA devices available. Is the ib_uverbs module loaded?"); goto err0; } - /* extract PCI address */ - vec_reset_length (s); - s = format (s, "/sys/class/net/%s/device%c", args->ifname, 0); - if (sysfs_path_to_pci_addr ((char *) s, &rd->pci_addr) == 0) + /* get PCI address */ + s = format (0, "/sys/class/net/%s/device%c", args->ifname, 0); + if (sysfs_path_to_pci_addr ((char *) s, &pci_addr) == 0) { - args->error = clib_error_return (0, "cannot find PCI address"); - goto err0; + args->error = + clib_error_return (0, "cannot find PCI address for device "); + goto err1; } - dev_list = ibv_get_device_list (&n_devs); - if (n_devs == 0) + pool_get_zero (rm->devices, rd); + rd->dev_instance = rd - rm->devices; + rd->per_interface_next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; + rd->linux_ifname = format (0, "%s", args->ifname); + + if (!args->name || 0 == args->name[0]) + rd->name = format (0, "%s/%d", args->ifname, rd->dev_instance); + else + rd->name = format (0, "%s", args->name); + + rd->pci = vlib_pci_get_device_info (vm, &pci_addr, &args->error); + if (!rd->pci) + goto err2; + + /* if we failed to parse NUMA node, default to 0 */ + if (-1 == rd->pci->numa_node) + rd->pci->numa_node = 0; + + rd->pool = vlib_buffer_pool_get_default_for_numa (vm, rd->pci->numa_node); + + if (strncmp ((char *) rd->pci->driver_name, "mlx5_core", 9)) { args->error = - clib_error_return_unix (0, - "no RDMA devices available, errno = %d. " - "Is the ib_uverbs module loaded?", errno); - goto err1; + clib_error_return (0, + "invalid interface (only mlx5 supported for now)"); + goto err2; } - for (int i = 0; i < n_devs; i++) + for (i = 0; i < n_devs; i++) { vlib_pci_addr_t addr; @@ -554,15 +791,35 @@ rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args) if (sysfs_path_to_pci_addr ((char *) s, &addr) == 0) continue; - if (addr.as_u32 != rd->pci_addr.as_u32) + if (addr.as_u32 != rd->pci->addr.as_u32) continue; if ((rd->ctx = ibv_open_device (dev_list[i]))) break; } - if ((args->error = - rdma_dev_init (vm, rd, args->rxq_size, args->txq_size, args->rxq_num))) + if (args->mode != RDMA_MODE_IBV) + { + struct mlx5dv_context mlx5dv_attrs = { }; + + if (mlx5dv_query_device (rd->ctx, &mlx5dv_attrs) == 0) + { + if ((mlx5dv_attrs.flags & MLX5DV_CONTEXT_FLAGS_CQE_V1)) + rd->flags |= RDMA_DEVICE_F_MLX5DV; + } + else + { + if (args->mode == RDMA_MODE_DV) + { + args->error = clib_error_return (0, "Direct Verbs mode not " + "supported on this interface"); + goto err2; + } + } + } + + if ((args->error = rdma_dev_init (vm, rd, args->rxq_size, args->txq_size, + args->rxq_num))) goto err2; if ((args->error = rdma_register_interface (vnm, rd))) @@ -584,6 +841,8 @@ rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args) rdma_input_node.index); vec_foreach_index (qid, rd->rxqs) vnet_hw_interface_assign_rx_thread (vnm, rd->hw_if_index, qid, ~0); + + vec_free (s); return; err3: @@ -592,10 +851,9 @@ err2: rdma_dev_cleanup (rd); err1: ibv_free_device_list (dev_list); -err0: - vec_free (s2); vec_free (s); args->rv = VNET_API_ERROR_INVALID_INTERFACE; +err0: vlib_log_err (rm->log_class, "%U", format_clib_error, args->error); } @@ -639,15 +897,9 @@ rdma_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index, rdma_main_t *rm = &rdma_main; vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); rdma_device_t *rd = pool_elt_at_index (rm->devices, hw->dev_instance); - - /* Shut off redirection */ - if (node_index == ~0) - { - rd->per_interface_next_index = node_index; - return; - } - rd->per_interface_next_index = + ~0 == + node_index ? VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT : vlib_node_add_next (vlib_get_main (), rdma_input_node.index, node_index); } @@ -658,7 +910,7 @@ static char *rdma_tx_func_error_strings[] = { }; /* *INDENT-OFF* */ -VNET_DEVICE_CLASS (rdma_device_class,) = +VNET_DEVICE_CLASS (rdma_device_class) = { .name = "RDMA interface", .format_device = format_rdma_device, @@ -667,6 +919,7 @@ VNET_DEVICE_CLASS (rdma_device_class,) = .rx_redirect_to_node = rdma_set_interface_next_node, .tx_function_n_errors = RDMA_TX_N_ERROR, .tx_function_error_strings = rdma_tx_func_error_strings, + .mac_addr_change_function = rdma_mac_change, }; /* *INDENT-ON* */ @@ -674,13 +927,32 @@ clib_error_t * rdma_init (vlib_main_t * vm) { rdma_main_t *rm = &rdma_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); rm->log_class = vlib_log_register_class ("rdma", 0); + /* vlib_buffer_t template */ + vec_validate_aligned (rm->per_thread_data, tm->n_vlib_mains - 1, + CLIB_CACHE_LINE_BYTES); + + for (int i = 0; i < tm->n_vlib_mains; i++) + { + rdma_per_thread_data_t *ptd = vec_elt_at_index (rm->per_thread_data, i); + clib_memset (&ptd->buffer_template, 0, sizeof (vlib_buffer_t)); + ptd->buffer_template.flags = VLIB_BUFFER_TOTAL_LENGTH_VALID; + ptd->buffer_template.ref_count = 1; + vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_TX] = (u32) ~ 0; + } + return 0; } -VLIB_INIT_FUNCTION (rdma_init); +/* *INDENT-OFF* */ +VLIB_INIT_FUNCTION (rdma_init) = +{ + .runs_after = VLIB_INITS ("pci_bus_init"), +}; +/* *INDENT-OFF* */ /* * fd.io coding-style-patch-verification: ON