X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Frdma%2Fdevice.c;h=14262f597b72925eba510ce5ce6a5a7192c6886d;hb=ee60ac638c9abcd0763c57fce0e3e646723ea0e0;hp=6c48a97f5bf386842010bb2e6bd03d37573a45fd;hpb=0dd97d473bc0c958d9fcea508e1f5122a137b23f;p=vpp.git diff --git a/src/plugins/rdma/device.c b/src/plugins/rdma/device.c index 6c48a97f5bf..14262f597b7 100644 --- a/src/plugins/rdma/device.c +++ b/src/plugins/rdma/device.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -45,11 +46,14 @@ static u8 rdma_rss_hash_key[] = { rdma_main_t rdma_main; -#define rdma_log__(lvl, dev, f, ...) \ - do { \ - vlib_log((lvl), rdma_main.log_class, "%s: " f, \ - &(dev)->name, ##__VA_ARGS__); \ - } while (0) +/* (dev) is of type (rdma_device_t *) */ +#define rdma_log__(lvl, dev, f, ...) \ + do \ + { \ + vlib_log ((lvl), rdma_main.log_class, "%s: " f, (dev)->name, \ + ##__VA_ARGS__); \ + } \ + while (0) #define rdma_log(lvl, dev, f, ...) \ rdma_log__((lvl), (dev), "%s (%d): " f, strerror(errno), errno, ##__VA_ARGS__) @@ -57,7 +61,7 @@ rdma_main_t rdma_main; static struct ibv_flow * rdma_rxq_init_flow (const rdma_device_t * rd, struct ibv_qp *qp, const mac_address_t * mac, const mac_address_t * mask, - u32 flags) + u16 ether_type, u32 flags) { struct ibv_flow *flow; struct raw_eth_flow_attr @@ -76,6 +80,12 @@ rdma_rxq_init_flow (const rdma_device_t * rd, struct ibv_qp *qp, memcpy (fa.spec_eth.val.dst_mac, mac, sizeof (fa.spec_eth.val.dst_mac)); memcpy (fa.spec_eth.mask.dst_mac, mask, sizeof (fa.spec_eth.mask.dst_mac)); + if (ether_type) + { + fa.spec_eth.val.ether_type = ether_type; + fa.spec_eth.mask.ether_type = 0xffff; + } + flow = ibv_create_flow (qp, &fa.attr); if (!flow) rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_create_flow() failed"); @@ -104,16 +114,17 @@ rdma_dev_set_promisc (rdma_device_t * rd) const mac_address_t all = {.bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0} }; int err; - err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast); - if (err) - return ~0; - - err = rdma_rxq_destroy_flow (rd, &rd->flow_ucast); + err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6); + err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6); + err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4); + err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4); if (err) return ~0; - rd->flow_ucast = rdma_rxq_init_flow (rd, rd->rx_qp, &all, &all, 0); - if (!rd->flow_ucast) + rd->flow_ucast6 = + rdma_rxq_init_flow (rd, rd->rx_qp6, &all, &all, ntohs (ETH_P_IPV6), 0); + rd->flow_ucast4 = rdma_rxq_init_flow (rd, rd->rx_qp4, &all, &all, 0, 0); + if (!rd->flow_ucast6 || !rd->flow_ucast4) return ~0; rd->flags |= RDMA_DEVICE_F_PROMISC; @@ -128,25 +139,30 @@ rdma_dev_set_ucast (rdma_device_t * rd) const mac_address_t mcast = {.bytes = {0x1, 0x0, 0x0, 0x0, 0x0, 0x0} }; int err; - err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast); + err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6); + err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6); + err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4); + err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4); if (err) return ~0; - err = rdma_rxq_destroy_flow (rd, &rd->flow_ucast); - if (err) - return ~0; - - /* receive only packets with src = our MAC */ - rd->flow_ucast = rdma_rxq_init_flow (rd, rd->rx_qp, &rd->hwaddr, &ucast, 0); - if (!rd->flow_ucast) - return ~0; - - /* receive multicast packets */ - rd->flow_mcast = rdma_rxq_init_flow (rd, rd->rx_qp, &mcast, &mcast, - IBV_FLOW_ATTR_FLAGS_DONT_TRAP - /* let others receive mcast packet too (eg. Linux) */ + rd->flow_ucast6 = + rdma_rxq_init_flow (rd, rd->rx_qp6, &rd->hwaddr, &ucast, + ntohs (ETH_P_IPV6), 0); + rd->flow_mcast6 = + rdma_rxq_init_flow (rd, rd->rx_qp6, &mcast, &mcast, ntohs (ETH_P_IPV6), + IBV_FLOW_ATTR_FLAGS_DONT_TRAP + /* let others receive mcast packet too (eg. Linux) */ + ); + rd->flow_ucast4 = + rdma_rxq_init_flow (rd, rd->rx_qp4, &rd->hwaddr, &ucast, 0, 0); + rd->flow_mcast4 = + rdma_rxq_init_flow (rd, rd->rx_qp4, &mcast, &mcast, 0, + IBV_FLOW_ATTR_FLAGS_DONT_TRAP + /* let others receive mcast packet too (eg. Linux) */ ); - if (!rd->flow_mcast) + if (!rd->flow_ucast6 || !rd->flow_mcast6 || !rd->flow_ucast4 + || !rd->flow_mcast4) return ~0; rd->flags &= ~RDMA_DEVICE_F_PROMISC; @@ -182,7 +198,7 @@ rdma_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags) switch (flags) { - case 0: + case ETHERNET_INTERFACE_FLAG_DEFAULT_L3: return rdma_dev_set_ucast (rd); case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL: return rdma_dev_set_promisc (rd); @@ -339,16 +355,24 @@ rdma_async_event_cleanup (rdma_device_t * rd) static clib_error_t * rdma_register_interface (vnet_main_t * vnm, rdma_device_t * rd) { - return ethernet_register_interface (vnm, rdma_device_class.index, - rd->dev_instance, rd->hwaddr.bytes, - &rd->hw_if_index, rdma_flag_change); + clib_error_t *err = + ethernet_register_interface (vnm, rdma_device_class.index, + rd->dev_instance, rd->hwaddr.bytes, + &rd->hw_if_index, rdma_flag_change); + + /* Indicate ability to support L3 DMAC filtering and + * initialize interface to L3 non-promisc mode */ + vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rd->hw_if_index); + hi->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_MAC_FILTER; + ethernet_set_flags (vnm, rd->hw_if_index, + ETHERNET_INTERFACE_FLAG_DEFAULT_L3); + return err; } static void rdma_unregister_interface (vnet_main_t * vnm, rdma_device_t * rd) { vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0); - vnet_hw_interface_unassign_rx_thread (vnm, rd->hw_if_index, 0); ethernet_delete_interface (vnm, rd->hw_if_index); } @@ -366,8 +390,10 @@ rdma_dev_cleanup (rdma_device_t * rd) rdma_log (VLIB_LOG_LEVEL_DEBUG, rd, #fn "() failed (rv = %d)", rv); \ } - _(ibv_destroy_flow, rd->flow_mcast); - _(ibv_destroy_flow, rd->flow_ucast); + _(ibv_destroy_flow, rd->flow_mcast6); + _(ibv_destroy_flow, rd->flow_ucast6); + _(ibv_destroy_flow, rd->flow_mcast4); + _(ibv_destroy_flow, rd->flow_ucast4); _(ibv_dereg_mr, rd->mr); vec_foreach (txq, rd->txqs) { @@ -380,7 +406,8 @@ rdma_dev_cleanup (rdma_device_t * rd) _(ibv_destroy_cq, rxq->cq); } _(ibv_destroy_rwq_ind_table, rd->rx_rwq_ind_tbl); - _(ibv_destroy_qp, rd->rx_qp); + _(ibv_destroy_qp, rd->rx_qp6); + _(ibv_destroy_qp, rd->rx_qp4); _(ibv_dealloc_pd, rd->pd); _(ibv_close_device, rd->ctx); #undef _ @@ -395,19 +422,42 @@ rdma_dev_cleanup (rdma_device_t * rd) } static clib_error_t * -rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc) +rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc, + u8 no_multi_seg, u16 max_pktlen) { rdma_rxq_t *rxq; struct ibv_wq_init_attr wqia; + struct ibv_cq_init_attr_ex cqa = { }; struct ibv_wq_attr wqa; + struct ibv_cq_ex *cqex; + struct mlx5dv_wq_init_attr dv_wqia = { }; + int is_mlx5dv = ! !(rd->flags & RDMA_DEVICE_F_MLX5DV); + int is_striding = ! !(rd->flags & RDMA_DEVICE_F_STRIDING_RQ); vec_validate_aligned (rd->rxqs, qid, CLIB_CACHE_LINE_BYTES); rxq = vec_elt_at_index (rd->rxqs, qid); rxq->size = n_desc; + rxq->log_wqe_sz = 0; + rxq->buf_sz = vlib_buffer_get_default_data_size (vm); vec_validate_aligned (rxq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES); - if ((rxq->cq = ibv_create_cq (rd->ctx, n_desc, NULL, NULL, 0)) == 0) - return clib_error_return_unix (0, "Create CQ Failed"); + cqa.cqe = n_desc; + if (is_mlx5dv) + { + struct mlx5dv_cq_init_attr dvcq = { }; + dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; + dvcq.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; + + if ((cqex = mlx5dv_create_cq (rd->ctx, &cqa, &dvcq)) == 0) + return clib_error_return_unix (0, "Create mlx5dv rx CQ Failed"); + } + else + { + if ((cqex = ibv_create_cq_ex (rd->ctx, &cqa)) == 0) + return clib_error_return_unix (0, "Create CQ Failed"); + } + + rxq->cq = ibv_cq_ex_to_cq (cqex); memset (&wqia, 0, sizeof (wqia)); wqia.wq_type = IBV_WQT_RQ; @@ -415,7 +465,74 @@ rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc) wqia.max_sge = 1; wqia.pd = rd->pd; wqia.cq = rxq->cq; - if ((rxq->wq = ibv_create_wq (rd->ctx, &wqia)) == 0) + if (is_mlx5dv) + { + if (is_striding) + { + /* In STRIDING_RQ mode, map a descriptor to a stride, not a full WQE buffer */ + uword data_seg_log2_sz = + min_log2 (vlib_buffer_get_default_data_size (vm)); + rxq->buf_sz = 1 << data_seg_log2_sz; + /* The trick is also to map a descriptor to a data segment in the WQE SG list + The number of strides per WQE and the size of a WQE (in 16-bytes words) both + must be powers of two. + Moreover, in striding RQ mode, WQEs must include the SRQ header, which occupies + one 16-bytes word. That is why WQEs have 2*RDMA_RXQ_MAX_CHAIN_SZ 16-bytes words: + - One for the SRQ Header + - RDMA_RXQ_MAX_CHAIN_SZ for the different data segments (each mapped to + a stride, and a vlib_buffer) + - RDMA_RXQ_MAX_CHAIN_SZ-1 null data segments + */ + int max_chain_log_sz = + max_pktlen ? max_log2 ((max_pktlen / + (rxq->buf_sz)) + + 1) : RDMA_RXQ_MAX_CHAIN_LOG_SZ; + max_chain_log_sz = clib_max (max_chain_log_sz, 3); + wqia.max_sge = 1 << max_chain_log_sz; + dv_wqia.comp_mask = MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ; + dv_wqia.striding_rq_attrs.two_byte_shift_en = 0; + dv_wqia.striding_rq_attrs.single_wqe_log_num_of_strides = + max_chain_log_sz; + dv_wqia.striding_rq_attrs.single_stride_log_num_of_bytes = + data_seg_log2_sz; + wqia.max_wr >>= max_chain_log_sz; + rxq->log_wqe_sz = max_chain_log_sz + 1; + rxq->log_stride_per_wqe = max_chain_log_sz; + } + else + { + /* In non STRIDING_RQ mode and if multiseg is not disabled, each WQE is a SG list of data + segments, each pointing to a vlib_buffer. */ + if (no_multi_seg) + { + wqia.max_sge = 1; + rxq->log_wqe_sz = 0; + rxq->n_ds_per_wqe = 1; + } + else + { + int max_chain_sz = + max_pktlen ? (max_pktlen / + (rxq->buf_sz)) + + 1 : RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ; + int max_chain_log_sz = max_log2 (max_chain_sz); + wqia.max_sge = 1 << max_chain_log_sz; + rxq->log_wqe_sz = max_chain_log_sz; + rxq->n_ds_per_wqe = max_chain_sz; + } + + } + + if ((rxq->wq = mlx5dv_create_wq (rd->ctx, &wqia, &dv_wqia))) + { + rxq->wq->events_completed = 0; + pthread_mutex_init (&rxq->wq->mutex, NULL); + pthread_cond_init (&rxq->wq->cond, NULL); + } + else + return clib_error_return_unix (0, "Create WQ Failed"); + } + else if ((rxq->wq = ibv_create_wq (rd->ctx, &wqia)) == 0) return clib_error_return_unix (0, "Create WQ Failed"); memset (&wqa, 0, sizeof (wqa)); @@ -424,6 +541,73 @@ rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc) if (ibv_modify_wq (rxq->wq, &wqa) != 0) return clib_error_return_unix (0, "Modify WQ (RDY) Failed"); + if (is_mlx5dv) + { + struct mlx5dv_obj obj = { }; + struct mlx5dv_cq dv_cq; + struct mlx5dv_rwq dv_rwq; + u64 qw0; + u64 qw0_nullseg; + u32 wqe_sz_mask = (1 << rxq->log_wqe_sz) - 1; + + obj.cq.in = rxq->cq; + obj.cq.out = &dv_cq; + obj.rwq.in = rxq->wq; + obj.rwq.out = &dv_rwq; + + if ((mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ))) + return clib_error_return_unix (0, "mlx5dv: failed to init rx obj"); + + if (dv_cq.cqe_size != sizeof (mlx5dv_cqe_t)) + return clib_error_return_unix (0, "mlx5dv: incompatible rx CQE size"); + + rxq->log2_cq_size = max_log2 (dv_cq.cqe_cnt); + rxq->cqes = (mlx5dv_cqe_t *) dv_cq.buf; + rxq->cq_db = (volatile u32 *) dv_cq.dbrec; + rxq->cqn = dv_cq.cqn; + + rxq->wqes = (mlx5dv_wqe_ds_t *) dv_rwq.buf; + rxq->wq_db = (volatile u32 *) dv_rwq.dbrec; + rxq->wq_stride = dv_rwq.stride; + rxq->wqe_cnt = dv_rwq.wqe_cnt; + + qw0 = clib_host_to_net_u32 (rxq->buf_sz); + qw0_nullseg = 0; + qw0 |= (u64) clib_host_to_net_u32 (rd->lkey) << 32; + qw0_nullseg |= (u64) clib_host_to_net_u32 (rd->lkey) << 32; + +/* Prefill the different 16 bytes words of the WQ. + - If not in striding RQ mode, for each WQE, init with qw0 the first + RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ, and init the rest of the WQE + with null segments. + - If in striding RQ mode, for each WQE, the RDMA_RXQ_MAX_CHAIN_SZ + 1 + first 16-bytes words are initialised with qw0, the rest are null segments */ + + for (int i = 0; i < rxq->wqe_cnt << rxq->log_wqe_sz; i++) + if ((!is_striding + && ((i & wqe_sz_mask) < rxq->n_ds_per_wqe)) + || (is_striding + && ((i == 0) + || !(((i - 1) >> rxq->log_stride_per_wqe) & 0x1)))) + rxq->wqes[i].dsz_and_lkey = qw0; + else + rxq->wqes[i].dsz_and_lkey = qw0_nullseg; + + for (int i = 0; i < (1 << rxq->log2_cq_size); i++) + rxq->cqes[i].opcode_cqefmt_se_owner = 0xff; + + if (!is_striding) + { + vec_validate_aligned (rxq->second_bufs, n_desc - 1, + CLIB_CACHE_LINE_BYTES); + vec_validate_aligned (rxq->n_used_per_chain, n_desc - 1, + CLIB_CACHE_LINE_BYTES); + rxq->n_total_additional_segs = n_desc * (rxq->n_ds_per_wqe - 1); + for (int i = 0; i < n_desc; i++) + rxq->n_used_per_chain[i] = rxq->n_ds_per_wqe - 1; + } + } + return 0; } @@ -433,15 +617,29 @@ rdma_rxq_finalize (vlib_main_t * vm, rdma_device_t * rd) struct ibv_rwq_ind_table_init_attr rwqia; struct ibv_qp_init_attr_ex qpia; struct ibv_wq **ind_tbl; + const u32 rxq_sz = vec_len (rd->rxqs); + u32 ind_tbl_sz = rxq_sz; u32 i; - ASSERT (is_pow2 (vec_len (rd->rxqs)) - && "rxq number should be a power of 2"); + if (!is_pow2 (ind_tbl_sz)) + { + /* in case we do not have a power-of-2 number of rxq, we try to use the + * maximum supported to minimize the imbalance */ + struct ibv_device_attr_ex attr; + if (ibv_query_device_ex (rd->ctx, 0, &attr)) + return clib_error_return_unix (0, "device query failed"); + ind_tbl_sz = attr.rss_caps.max_rwq_indirection_table_size; + if (ind_tbl_sz < rxq_sz) + return clib_error_create ("too many rxqs requested (%d) compared to " + "max indirection table size (%d)", + rxq_sz, ind_tbl_sz); + } - ind_tbl = vec_new (struct ibv_wq *, vec_len (rd->rxqs)); - vec_foreach_index (i, rd->rxqs) - ind_tbl[i] = vec_elt_at_index (rd->rxqs, i)->wq; + ind_tbl = vec_new (struct ibv_wq *, ind_tbl_sz); + vec_foreach_index (i, ind_tbl) + vec_elt (ind_tbl, i) = vec_elt (rd->rxqs, i % rxq_sz).wq; memset (&rwqia, 0, sizeof (rwqia)); + ASSERT (is_pow2 (vec_len (ind_tbl))); rwqia.log_ind_tbl_size = min_log2 (vec_len (ind_tbl)); rwqia.ind_tbl = ind_tbl; if ((rd->rx_rwq_ind_tbl = ibv_create_rwq_ind_table (rd->ctx, &rwqia)) == 0) @@ -459,10 +657,18 @@ rdma_rxq_finalize (vlib_main_t * vm, rdma_device_t * rd) qpia.rx_hash_conf.rx_hash_key_len = sizeof (rdma_rss_hash_key); qpia.rx_hash_conf.rx_hash_key = rdma_rss_hash_key; qpia.rx_hash_conf.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ; + qpia.rx_hash_conf.rx_hash_fields_mask = - IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4; - if ((rd->rx_qp = ibv_create_qp_ex (rd->ctx, &qpia)) == 0) - return clib_error_return_unix (0, "Queue Pair create failed"); + IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_SRC_PORT_TCP | + IBV_RX_HASH_DST_PORT_TCP; + if ((rd->rx_qp4 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0) + return clib_error_return_unix (0, "IPv4 Queue Pair create failed"); + + qpia.rx_hash_conf.rx_hash_fields_mask = + IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 | IBV_RX_HASH_SRC_PORT_TCP | + IBV_RX_HASH_DST_PORT_TCP; + if ((rd->rx_qp6 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0) + return clib_error_return_unix (0, "IPv6 Queue Pair create failed"); if (rdma_dev_set_ucast (rd)) return clib_error_return_unix (0, "Set unicast mode failed"); @@ -480,7 +686,8 @@ rdma_txq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc) vec_validate_aligned (rd->txqs, qid, CLIB_CACHE_LINE_BYTES); txq = vec_elt_at_index (rd->txqs, qid); - txq->size = n_desc; + ASSERT (is_pow2 (n_desc)); + txq->bufs_log2sz = min_log2 (n_desc); vec_validate_aligned (txq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES); if ((txq->cq = ibv_create_cq (rd->ctx, n_desc, NULL, NULL, 0)) == 0) @@ -514,16 +721,70 @@ rdma_txq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc) qpa.qp_state = IBV_QPS_RTS; if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0) return clib_error_return_unix (0, "Modify QP (send) Failed"); + + txq->ibv_cq = txq->cq; + txq->ibv_qp = txq->qp; + + if (rd->flags & RDMA_DEVICE_F_MLX5DV) + { + rdma_mlx5_wqe_t *tmpl = (void *) txq->dv_wqe_tmpl; + struct mlx5dv_cq dv_cq; + struct mlx5dv_qp dv_qp; + struct mlx5dv_obj obj = { }; + + obj.cq.in = txq->cq; + obj.cq.out = &dv_cq; + obj.qp.in = txq->qp; + obj.qp.out = &dv_qp; + + if (mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP)) + return clib_error_return_unix (0, "DV init obj failed"); + + if (RDMA_TXQ_BUF_SZ (txq) > dv_qp.sq.wqe_cnt + || !is_pow2 (dv_qp.sq.wqe_cnt) + || sizeof (rdma_mlx5_wqe_t) != dv_qp.sq.stride + || (uword) dv_qp.sq.buf % sizeof (rdma_mlx5_wqe_t)) + return clib_error_return (0, "Unsupported DV SQ parameters"); + + if (RDMA_TXQ_BUF_SZ (txq) > dv_cq.cqe_cnt + || !is_pow2 (dv_cq.cqe_cnt) + || sizeof (struct mlx5_cqe64) != dv_cq.cqe_size + || (uword) dv_cq.buf % sizeof (struct mlx5_cqe64)) + return clib_error_return (0, "Unsupported DV CQ parameters"); + + /* get SQ and doorbell addresses */ + txq->dv_sq_wqes = dv_qp.sq.buf; + txq->dv_sq_dbrec = dv_qp.dbrec; + txq->dv_sq_db = dv_qp.bf.reg; + txq->dv_sq_log2sz = min_log2 (dv_qp.sq.wqe_cnt); + + /* get CQ and doorbell addresses */ + txq->dv_cq_cqes = dv_cq.buf; + txq->dv_cq_dbrec = dv_cq.dbrec; + txq->dv_cq_log2sz = min_log2 (dv_cq.cqe_cnt); + + /* init tx desc template */ + STATIC_ASSERT_SIZEOF (txq->dv_wqe_tmpl, sizeof (*tmpl)); + mlx5dv_set_ctrl_seg (&tmpl->ctrl, 0, MLX5_OPCODE_SEND, 0, + txq->qp->qp_num, 0, RDMA_MLX5_WQE_DS, 0, + RDMA_TXQ_DV_INVALID_ID); + tmpl->eseg.inline_hdr_sz = htobe16 (MLX5_ETH_L2_INLINE_HEADER_SIZE); + mlx5dv_set_data_seg (&tmpl->dseg, 0, rd->lkey, 0); + } + return 0; } static clib_error_t * -rdma_dev_init (vlib_main_t * vm, rdma_device_t * rd, u32 rxq_size, - u32 txq_size, u32 rxq_num) +rdma_dev_init (vlib_main_t * vm, rdma_device_t * rd, + rdma_create_if_args_t * args) { clib_error_t *err; vlib_buffer_main_t *bm = vm->buffer_main; vlib_thread_main_t *tm = vlib_get_thread_main (); + u32 rxq_num = args->rxq_num; + u32 rxq_size = args->rxq_size; + u32 txq_size = args->txq_size; u32 i; if (rd->ctx == 0) @@ -532,6 +793,13 @@ rdma_dev_init (vlib_main_t * vm, rdma_device_t * rd, u32 rxq_size, if ((rd->pd = ibv_alloc_pd (rd->ctx)) == 0) return clib_error_return_unix (0, "PD Alloc Failed"); + if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start, + bm->buffer_mem_size, + IBV_ACCESS_LOCAL_WRITE)) == 0) + return clib_error_return_unix (0, "Register MR Failed"); + + rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */ + ethernet_mac_address_generate (rd->hwaddr.bytes); /* @@ -544,17 +812,13 @@ rdma_dev_init (vlib_main_t * vm, rdma_device_t * rd, u32 rxq_size, return err; for (i = 0; i < rxq_num; i++) - if ((err = rdma_rxq_init (vm, rd, i, rxq_size))) + if ((err = + rdma_rxq_init (vm, rd, i, rxq_size, + args->no_multi_seg, args->max_pktlen))) return err; if ((err = rdma_rxq_finalize (vm, rd))) return err; - if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start, - bm->buffer_mem_size, - IBV_ACCESS_LOCAL_WRITE)) == 0) - return clib_error_return_unix (0, "Register MR Failed"); - rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */ - return 0; } @@ -589,25 +853,18 @@ rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args) u16 qid; int i; - args->rxq_size = args->rxq_size ? args->rxq_size : 2 * VLIB_FRAME_SIZE; - args->txq_size = args->txq_size ? args->txq_size : 2 * VLIB_FRAME_SIZE; - args->rxq_num = args->rxq_num ? args->rxq_num : 1; - - if (!is_pow2 (args->rxq_num)) - { - args->rv = VNET_API_ERROR_INVALID_VALUE; - args->error = - clib_error_return (0, "rx queue number must be a power of two"); - goto err0; - } + args->rxq_size = args->rxq_size ? args->rxq_size : 1024; + args->txq_size = args->txq_size ? args->txq_size : 1024; + args->rxq_num = args->rxq_num ? args->rxq_num : 2; if (args->rxq_size < VLIB_FRAME_SIZE || args->txq_size < VLIB_FRAME_SIZE || + args->rxq_size > 65535 || args->txq_size > 65535 || !is_pow2 (args->rxq_size) || !is_pow2 (args->txq_size)) { args->rv = VNET_API_ERROR_INVALID_VALUE; - args->error = - clib_error_return (0, "queue size must be a power of two >= %i", - VLIB_FRAME_SIZE); + args->error = clib_error_return (0, "queue size must be a power of two " + "between %i and 65535", + VLIB_FRAME_SIZE); goto err0; } @@ -632,9 +889,13 @@ rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args) pool_get_zero (rm->devices, rd); rd->dev_instance = rd - rm->devices; rd->per_interface_next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; - rd->name = format (0, "%s", args->name); rd->linux_ifname = format (0, "%s", args->ifname); + if (!args->name || 0 == args->name[0]) + rd->name = format (0, "%s/%d", args->ifname, rd->dev_instance); + else + rd->name = format (0, "%s", args->name); + rd->pci = vlib_pci_get_device_info (vm, &pci_addr, &args->error); if (!rd->pci) goto err2; @@ -670,8 +931,44 @@ rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args) break; } - if ((args->error = - rdma_dev_init (vm, rd, args->rxq_size, args->txq_size, args->rxq_num))) + if (args->mode != RDMA_MODE_IBV) + { + struct mlx5dv_context mlx5dv_attrs = { }; + mlx5dv_attrs.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ; + + if (mlx5dv_query_device (rd->ctx, &mlx5dv_attrs) == 0) + { + uword data_seg_log2_sz = + min_log2 (vlib_buffer_get_default_data_size (vm)); + + if ((mlx5dv_attrs.flags & MLX5DV_CONTEXT_FLAGS_CQE_V1)) + rd->flags |= RDMA_DEVICE_F_MLX5DV; + +/* Enable striding RQ if neither multiseg nor striding rq +are explicitly disabled, and if the interface supports it.*/ + if (!args->no_multi_seg && !args->disable_striding_rq + && data_seg_log2_sz <= + mlx5dv_attrs.striding_rq_caps.max_single_stride_log_num_of_bytes + && data_seg_log2_sz >= + mlx5dv_attrs.striding_rq_caps.min_single_stride_log_num_of_bytes + && RDMA_RXQ_MAX_CHAIN_LOG_SZ >= + mlx5dv_attrs.striding_rq_caps.min_single_wqe_log_num_of_strides + && RDMA_RXQ_MAX_CHAIN_LOG_SZ <= + mlx5dv_attrs.striding_rq_caps.max_single_wqe_log_num_of_strides) + rd->flags |= RDMA_DEVICE_F_STRIDING_RQ; + } + else + { + if (args->mode == RDMA_MODE_DV) + { + args->error = clib_error_return (0, "Direct Verbs mode not " + "supported on this interface"); + goto err2; + } + } + } + + if ((args->error = rdma_dev_init (vm, rd, args))) goto err2; if ((args->error = rdma_register_interface (vnm, rd))) @@ -687,13 +984,17 @@ rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args) /* * FIXME: add support for interrupt mode * vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, rd->hw_if_index); - * hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE; + * hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE; */ - vnet_hw_interface_set_input_node (vnm, rd->hw_if_index, - rdma_input_node.index); - vec_foreach_index (qid, rd->rxqs) - vnet_hw_interface_assign_rx_thread (vnm, rd->hw_if_index, qid, ~0); + vnet_hw_if_set_input_node (vnm, rd->hw_if_index, rdma_input_node.index); + vec_foreach_index (qid, rd->rxqs) + { + u32 queue_index = vnet_hw_if_register_rx_queue ( + vnm, rd->hw_if_index, qid, VNET_HW_IF_RXQ_THREAD_ANY); + rd->rxqs[qid].queue_index = queue_index; + } + vnet_hw_if_update_runtime_data (vnm, rd->hw_if_index); vec_free (s); return; @@ -749,15 +1050,9 @@ rdma_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index, rdma_main_t *rm = &rdma_main; vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); rdma_device_t *rd = pool_elt_at_index (rm->devices, hw->dev_instance); - - /* Shut off redirection */ - if (node_index == ~0) - { - rd->per_interface_next_index = node_index; - return; - } - rd->per_interface_next_index = + ~0 == + node_index ? VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT : vlib_node_add_next (vlib_get_main (), rdma_input_node.index, node_index); } @@ -767,7 +1062,6 @@ static char *rdma_tx_func_error_strings[] = { #undef _ }; -/* *INDENT-OFF* */ VNET_DEVICE_CLASS (rdma_device_class) = { .name = "RDMA interface", @@ -779,24 +1073,35 @@ VNET_DEVICE_CLASS (rdma_device_class) = .tx_function_error_strings = rdma_tx_func_error_strings, .mac_addr_change_function = rdma_mac_change, }; -/* *INDENT-ON* */ clib_error_t * rdma_init (vlib_main_t * vm) { rdma_main_t *rm = &rdma_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); rm->log_class = vlib_log_register_class ("rdma", 0); + /* vlib_buffer_t template */ + vec_validate_aligned (rm->per_thread_data, tm->n_vlib_mains - 1, + CLIB_CACHE_LINE_BYTES); + + for (int i = 0; i < tm->n_vlib_mains; i++) + { + rdma_per_thread_data_t *ptd = vec_elt_at_index (rm->per_thread_data, i); + clib_memset (&ptd->buffer_template, 0, sizeof (vlib_buffer_t)); + ptd->buffer_template.flags = VLIB_BUFFER_TOTAL_LENGTH_VALID; + ptd->buffer_template.ref_count = 1; + vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_TX] = (u32) ~ 0; + } + return 0; } -/* *INDENT-OFF* */ VLIB_INIT_FUNCTION (rdma_init) = { .runs_after = VLIB_INITS ("pci_bus_init"), }; -/* *INDENT-OFF* */ /* * fd.io coding-style-patch-verification: ON