#include <vlib/unix/unix.h>
#include <vlib/pci/pci.h>
#include <vnet/ethernet/ethernet.h>
+#include <vnet/interface/rx_queue_funcs.h>
#include <rdma/rdma.h>
rdma_main_t rdma_main;
-#define rdma_log__(lvl, dev, f, ...) \
- do { \
- vlib_log((lvl), rdma_main.log_class, "%s: " f, \
- &(dev)->name, ##__VA_ARGS__); \
- } while (0)
+/* (dev) is of type (rdma_device_t *) */
+#define rdma_log__(lvl, dev, f, ...) \
+ do \
+ { \
+ vlib_log ((lvl), rdma_main.log_class, "%s: " f, (dev)->name, \
+ ##__VA_ARGS__); \
+ } \
+ while (0)
#define rdma_log(lvl, dev, f, ...) \
rdma_log__((lvl), (dev), "%s (%d): " f, strerror(errno), errno, ##__VA_ARGS__)
vlib_log_emerg (rm->log_class, "%s: fatal error", rd->name);
break;
default:
- rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unhandeld RDMA async event %i",
+ rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unhandeld RDMA async event %d",
event.event_type);
break;
}
/* Indicate ability to support L3 DMAC filtering and
* initialize interface to L3 non-promisc mode */
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rd->hw_if_index);
- hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER;
+ hi->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_MAC_FILTER;
ethernet_set_flags (vnm, rd->hw_if_index,
ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
return err;
rdma_unregister_interface (vnet_main_t * vnm, rdma_device_t * rd)
{
vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
- vnet_hw_interface_unassign_rx_thread (vnm, rd->hw_if_index, 0);
ethernet_delete_interface (vnm, rd->hw_if_index);
}
}
static clib_error_t *
-rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc)
+rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc,
+ u8 no_multi_seg, u16 max_pktlen)
{
rdma_rxq_t *rxq;
struct ibv_wq_init_attr wqia;
struct ibv_cq_init_attr_ex cqa = { };
struct ibv_wq_attr wqa;
struct ibv_cq_ex *cqex;
+ struct mlx5dv_wq_init_attr dv_wqia = { };
+ int is_mlx5dv = ! !(rd->flags & RDMA_DEVICE_F_MLX5DV);
+ int is_striding = ! !(rd->flags & RDMA_DEVICE_F_STRIDING_RQ);
vec_validate_aligned (rd->rxqs, qid, CLIB_CACHE_LINE_BYTES);
rxq = vec_elt_at_index (rd->rxqs, qid);
rxq->size = n_desc;
+ rxq->log_wqe_sz = 0;
+ rxq->buf_sz = vlib_buffer_get_default_data_size (vm);
vec_validate_aligned (rxq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES);
cqa.cqe = n_desc;
- if (rd->flags & RDMA_DEVICE_F_MLX5DV)
+ if (is_mlx5dv)
{
struct mlx5dv_cq_init_attr dvcq = { };
dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
wqia.max_sge = 1;
wqia.pd = rd->pd;
wqia.cq = rxq->cq;
- if ((rxq->wq = ibv_create_wq (rd->ctx, &wqia)) == 0)
+ if (is_mlx5dv)
+ {
+ if (is_striding)
+ {
+ /* In STRIDING_RQ mode, map a descriptor to a stride, not a full WQE buffer */
+ uword data_seg_log2_sz =
+ min_log2 (vlib_buffer_get_default_data_size (vm));
+ rxq->buf_sz = 1 << data_seg_log2_sz;
+ /* The trick is also to map a descriptor to a data segment in the WQE SG list
+ The number of strides per WQE and the size of a WQE (in 16-bytes words) both
+ must be powers of two.
+ Moreover, in striding RQ mode, WQEs must include the SRQ header, which occupies
+ one 16-bytes word. That is why WQEs have 2*RDMA_RXQ_MAX_CHAIN_SZ 16-bytes words:
+ - One for the SRQ Header
+ - RDMA_RXQ_MAX_CHAIN_SZ for the different data segments (each mapped to
+ a stride, and a vlib_buffer)
+ - RDMA_RXQ_MAX_CHAIN_SZ-1 null data segments
+ */
+ int max_chain_log_sz =
+ max_pktlen ? max_log2 ((max_pktlen /
+ (rxq->buf_sz)) +
+ 1) : RDMA_RXQ_MAX_CHAIN_LOG_SZ;
+ max_chain_log_sz = clib_max (max_chain_log_sz, 3);
+ wqia.max_sge = 1 << max_chain_log_sz;
+ dv_wqia.comp_mask = MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
+ dv_wqia.striding_rq_attrs.two_byte_shift_en = 0;
+ dv_wqia.striding_rq_attrs.single_wqe_log_num_of_strides =
+ max_chain_log_sz;
+ dv_wqia.striding_rq_attrs.single_stride_log_num_of_bytes =
+ data_seg_log2_sz;
+ wqia.max_wr >>= max_chain_log_sz;
+ rxq->log_wqe_sz = max_chain_log_sz + 1;
+ rxq->log_stride_per_wqe = max_chain_log_sz;
+ }
+ else
+ {
+ /* In non STRIDING_RQ mode and if multiseg is not disabled, each WQE is a SG list of data
+ segments, each pointing to a vlib_buffer. */
+ if (no_multi_seg)
+ {
+ wqia.max_sge = 1;
+ rxq->log_wqe_sz = 0;
+ rxq->n_ds_per_wqe = 1;
+ }
+ else
+ {
+ int max_chain_sz =
+ max_pktlen ? (max_pktlen /
+ (rxq->buf_sz)) +
+ 1 : RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ;
+ int max_chain_log_sz = max_log2 (max_chain_sz);
+ wqia.max_sge = 1 << max_chain_log_sz;
+ rxq->log_wqe_sz = max_chain_log_sz;
+ rxq->n_ds_per_wqe = max_chain_sz;
+ }
+
+ }
+
+ if ((rxq->wq = mlx5dv_create_wq (rd->ctx, &wqia, &dv_wqia)))
+ {
+ rxq->wq->events_completed = 0;
+ pthread_mutex_init (&rxq->wq->mutex, NULL);
+ pthread_cond_init (&rxq->wq->cond, NULL);
+ }
+ else
+ return clib_error_return_unix (0, "Create WQ Failed");
+ }
+ else if ((rxq->wq = ibv_create_wq (rd->ctx, &wqia)) == 0)
return clib_error_return_unix (0, "Create WQ Failed");
memset (&wqa, 0, sizeof (wqa));
if (ibv_modify_wq (rxq->wq, &wqa) != 0)
return clib_error_return_unix (0, "Modify WQ (RDY) Failed");
- if (rd->flags & RDMA_DEVICE_F_MLX5DV)
+ if (is_mlx5dv)
{
struct mlx5dv_obj obj = { };
struct mlx5dv_cq dv_cq;
struct mlx5dv_rwq dv_rwq;
u64 qw0;
+ u64 qw0_nullseg;
+ u32 wqe_sz_mask = (1 << rxq->log_wqe_sz) - 1;
obj.cq.in = rxq->cq;
obj.cq.out = &dv_cq;
rxq->cq_db = (volatile u32 *) dv_cq.dbrec;
rxq->cqn = dv_cq.cqn;
- rxq->wqes = (mlx5dv_rwq_t *) dv_rwq.buf;
+ rxq->wqes = (mlx5dv_wqe_ds_t *) dv_rwq.buf;
rxq->wq_db = (volatile u32 *) dv_rwq.dbrec;
rxq->wq_stride = dv_rwq.stride;
rxq->wqe_cnt = dv_rwq.wqe_cnt;
- qw0 = clib_host_to_net_u32 (vlib_buffer_get_default_data_size (vm));
+ qw0 = clib_host_to_net_u32 (rxq->buf_sz);
+ qw0_nullseg = 0;
qw0 |= (u64) clib_host_to_net_u32 (rd->lkey) << 32;
-
- for (int i = 0; i < rxq->size; i++)
- rxq->wqes[i].dsz_and_lkey = qw0;
+ qw0_nullseg |= (u64) clib_host_to_net_u32 (rd->lkey) << 32;
+
+/* Prefill the different 16 bytes words of the WQ.
+ - If not in striding RQ mode, for each WQE, init with qw0 the first
+ RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ, and init the rest of the WQE
+ with null segments.
+ - If in striding RQ mode, for each WQE, the RDMA_RXQ_MAX_CHAIN_SZ + 1
+ first 16-bytes words are initialised with qw0, the rest are null segments */
+
+ for (int i = 0; i < rxq->wqe_cnt << rxq->log_wqe_sz; i++)
+ if ((!is_striding
+ && ((i & wqe_sz_mask) < rxq->n_ds_per_wqe))
+ || (is_striding
+ && ((i == 0)
+ || !(((i - 1) >> rxq->log_stride_per_wqe) & 0x1))))
+ rxq->wqes[i].dsz_and_lkey = qw0;
+ else
+ rxq->wqes[i].dsz_and_lkey = qw0_nullseg;
for (int i = 0; i < (1 << rxq->log2_cq_size); i++)
rxq->cqes[i].opcode_cqefmt_se_owner = 0xff;
+
+ if (!is_striding)
+ {
+ vec_validate_aligned (rxq->second_bufs, n_desc - 1,
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (rxq->n_used_per_chain, n_desc - 1,
+ CLIB_CACHE_LINE_BYTES);
+ rxq->n_total_additional_segs = n_desc * (rxq->n_ds_per_wqe - 1);
+ for (int i = 0; i < n_desc; i++)
+ rxq->n_used_per_chain[i] = rxq->n_ds_per_wqe - 1;
+ }
}
return 0;
}
+static uint64_t
+rdma_rss42ibv (const rdma_rss4_t rss4)
+{
+ switch (rss4)
+ {
+ case RDMA_RSS4_IP:
+ return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4;
+ case RDMA_RSS4_IP_UDP:
+ return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
+ IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP;
+ case RDMA_RSS4_AUTO: /* fallthrough */
+ case RDMA_RSS4_IP_TCP:
+ return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
+ IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
+ }
+ ASSERT (0);
+ return 0;
+}
+
+static uint64_t
+rdma_rss62ibv (const rdma_rss6_t rss6)
+{
+ switch (rss6)
+ {
+ case RDMA_RSS6_IP:
+ return IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6;
+ case RDMA_RSS6_IP_UDP:
+ return IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
+ IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP;
+ case RDMA_RSS6_AUTO: /* fallthrough */
+ case RDMA_RSS6_IP_TCP:
+ return IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
+ IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
+ }
+ ASSERT (0);
+ return 0;
+}
+
static clib_error_t *
-rdma_rxq_finalize (vlib_main_t * vm, rdma_device_t * rd)
+rdma_rxq_finalize (vlib_main_t *vm, rdma_device_t *rd)
{
struct ibv_rwq_ind_table_init_attr rwqia;
struct ibv_qp_init_attr_ex qpia;
struct ibv_wq **ind_tbl;
+ const u32 rxq_sz = vec_len (rd->rxqs);
+ u32 ind_tbl_sz = rxq_sz;
u32 i;
- ASSERT (is_pow2 (vec_len (rd->rxqs))
- && "rxq number should be a power of 2");
+ if (!is_pow2 (ind_tbl_sz))
+ {
+ /* in case we do not have a power-of-2 number of rxq, we try to use the
+ * maximum supported to minimize the imbalance */
+ struct ibv_device_attr_ex attr;
+ if (ibv_query_device_ex (rd->ctx, 0, &attr))
+ return clib_error_return_unix (0, "device query failed");
+ ind_tbl_sz = attr.rss_caps.max_rwq_indirection_table_size;
+ if (ind_tbl_sz < rxq_sz)
+ return clib_error_create ("too many rxqs requested (%d) compared to "
+ "max indirection table size (%d)",
+ rxq_sz, ind_tbl_sz);
+ }
- ind_tbl = vec_new (struct ibv_wq *, vec_len (rd->rxqs));
- vec_foreach_index (i, rd->rxqs)
- ind_tbl[i] = vec_elt_at_index (rd->rxqs, i)->wq;
+ ind_tbl = vec_new (struct ibv_wq *, ind_tbl_sz);
+ vec_foreach_index (i, ind_tbl)
+ vec_elt (ind_tbl, i) = vec_elt (rd->rxqs, i % rxq_sz).wq;
memset (&rwqia, 0, sizeof (rwqia));
+ ASSERT (is_pow2 (vec_len (ind_tbl)));
rwqia.log_ind_tbl_size = min_log2 (vec_len (ind_tbl));
rwqia.ind_tbl = ind_tbl;
if ((rd->rx_rwq_ind_tbl = ibv_create_rwq_ind_table (rd->ctx, &rwqia)) == 0)
qpia.rx_hash_conf.rx_hash_key = rdma_rss_hash_key;
qpia.rx_hash_conf.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ;
- qpia.rx_hash_conf.rx_hash_fields_mask =
- IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP;
+ qpia.rx_hash_conf.rx_hash_fields_mask = rdma_rss42ibv (rd->rss4);
if ((rd->rx_qp4 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
return clib_error_return_unix (0, "IPv4 Queue Pair create failed");
- qpia.rx_hash_conf.rx_hash_fields_mask =
- IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 | IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP;
+ qpia.rx_hash_conf.rx_hash_fields_mask = rdma_rss62ibv (rd->rss6);
if ((rd->rx_qp6 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
return clib_error_return_unix (0, "IPv6 Queue Pair create failed");
}
static clib_error_t *
-rdma_dev_init (vlib_main_t * vm, rdma_device_t * rd, u32 rxq_size,
- u32 txq_size, u32 rxq_num)
+rdma_dev_init (vlib_main_t * vm, rdma_device_t * rd,
+ rdma_create_if_args_t * args)
{
clib_error_t *err;
vlib_buffer_main_t *bm = vm->buffer_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
+ u32 rxq_num = args->rxq_num;
+ u32 rxq_size = args->rxq_size;
+ u32 txq_size = args->txq_size;
u32 i;
if (rd->ctx == 0)
ethernet_mac_address_generate (rd->hwaddr.bytes);
- if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start,
- bm->buffer_mem_size,
- IBV_ACCESS_LOCAL_WRITE)) == 0)
- return clib_error_return_unix (0, "Register MR Failed");
- rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */
+ rd->rss4 = args->rss4;
+ rd->rss6 = args->rss6;
/*
* /!\ WARNING /!\ creation order is important
return err;
for (i = 0; i < rxq_num; i++)
- if ((err = rdma_rxq_init (vm, rd, i, rxq_size)))
+ if ((err =
+ rdma_rxq_init (vm, rd, i, rxq_size,
+ args->no_multi_seg, args->max_pktlen)))
return err;
if ((err = rdma_rxq_finalize (vm, rd)))
return err;
args->rxq_size = args->rxq_size ? args->rxq_size : 1024;
args->txq_size = args->txq_size ? args->txq_size : 1024;
- args->rxq_num = args->rxq_num ? args->rxq_num : 1;
-
- if (!is_pow2 (args->rxq_num))
- {
- args->rv = VNET_API_ERROR_INVALID_VALUE;
- args->error =
- clib_error_return (0, "rx queue number must be a power of two");
- goto err0;
- }
+ args->rxq_num = args->rxq_num ? args->rxq_num : 2;
if (args->rxq_size < VLIB_FRAME_SIZE || args->txq_size < VLIB_FRAME_SIZE ||
args->rxq_size > 65535 || args->txq_size > 65535 ||
!is_pow2 (args->rxq_size) || !is_pow2 (args->txq_size))
{
args->rv = VNET_API_ERROR_INVALID_VALUE;
- args->error = clib_error_return (0, "queue size must be a power of two "
- "between %i and 65535",
+ args->error = clib_error_return (0,
+ "queue size must be a power of two "
+ "between %d and 65535",
VLIB_FRAME_SIZE);
goto err0;
}
if (args->mode != RDMA_MODE_IBV)
{
struct mlx5dv_context mlx5dv_attrs = { };
+ mlx5dv_attrs.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
if (mlx5dv_query_device (rd->ctx, &mlx5dv_attrs) == 0)
{
+ uword data_seg_log2_sz =
+ min_log2 (vlib_buffer_get_default_data_size (vm));
+
if ((mlx5dv_attrs.flags & MLX5DV_CONTEXT_FLAGS_CQE_V1))
rd->flags |= RDMA_DEVICE_F_MLX5DV;
+
+/* Enable striding RQ if neither multiseg nor striding rq
+are explicitly disabled, and if the interface supports it.*/
+ if (!args->no_multi_seg && !args->disable_striding_rq
+ && data_seg_log2_sz <=
+ mlx5dv_attrs.striding_rq_caps.max_single_stride_log_num_of_bytes
+ && data_seg_log2_sz >=
+ mlx5dv_attrs.striding_rq_caps.min_single_stride_log_num_of_bytes
+ && RDMA_RXQ_MAX_CHAIN_LOG_SZ >=
+ mlx5dv_attrs.striding_rq_caps.min_single_wqe_log_num_of_strides
+ && RDMA_RXQ_MAX_CHAIN_LOG_SZ <=
+ mlx5dv_attrs.striding_rq_caps.max_single_wqe_log_num_of_strides)
+ rd->flags |= RDMA_DEVICE_F_STRIDING_RQ;
}
else
{
}
}
- if ((args->error = rdma_dev_init (vm, rd, args->rxq_size, args->txq_size,
- args->rxq_num)))
+ if ((args->error = rdma_dev_init (vm, rd, args)))
goto err2;
if ((args->error = rdma_register_interface (vnm, rd)))
/*
* FIXME: add support for interrupt mode
* vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, rd->hw_if_index);
- * hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
+ * hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE;
*/
- vnet_hw_interface_set_input_node (vnm, rd->hw_if_index,
- rdma_input_node.index);
- vec_foreach_index (qid, rd->rxqs)
- vnet_hw_interface_assign_rx_thread (vnm, rd->hw_if_index, qid, ~0);
+ vnet_hw_if_set_input_node (vnm, rd->hw_if_index, rdma_input_node.index);
+ vec_foreach_index (qid, rd->rxqs)
+ {
+ u32 queue_index = vnet_hw_if_register_rx_queue (
+ vnm, rd->hw_if_index, qid, VNET_HW_IF_RXQ_THREAD_ANY);
+ rd->rxqs[qid].queue_index = queue_index;
+ }
+ vnet_hw_if_update_runtime_data (vnm, rd->hw_if_index);
vec_free (s);
return;
#undef _
};
-/* *INDENT-OFF* */
VNET_DEVICE_CLASS (rdma_device_class) =
{
.name = "RDMA interface",
.tx_function_error_strings = rdma_tx_func_error_strings,
.mac_addr_change_function = rdma_mac_change,
};
-/* *INDENT-ON* */
clib_error_t *
rdma_init (vlib_main_t * vm)
return 0;
}
-/* *INDENT-OFF* */
VLIB_INIT_FUNCTION (rdma_init) =
{
.runs_after = VLIB_INITS ("pci_bus_init"),
};
-/* *INDENT-OFF* */
/*
* fd.io coding-style-patch-verification: ON