Type: improvement
Change-Id: I7f28a3f03ab1ea8461c52743c61dc23a57965237
Signed-off-by: Damjan Marion <damarion@cisco.com>
mlx5dv_set_ctrl_seg (&tmpl->ctrl, 0, MLX5_OPCODE_SEND, 0,
txq->qp->qp_num, 0, RDMA_MLX5_WQE_DS, 0,
RDMA_TXQ_DV_INVALID_ID);
mlx5dv_set_ctrl_seg (&tmpl->ctrl, 0, MLX5_OPCODE_SEND, 0,
txq->qp->qp_num, 0, RDMA_MLX5_WQE_DS, 0,
RDMA_TXQ_DV_INVALID_ID);
- /* FIXME: mlx5dv_set_eth_seg(&tmpl->eseg, MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM, 0, 0, 0); */
+ tmpl->eseg.inline_hdr_sz = htobe16 (MLX5_ETH_L2_INLINE_HEADER_SIZE);
mlx5dv_set_data_seg (&tmpl->dseg, 0, rd->lkey, 0);
}
mlx5dv_set_data_seg (&tmpl->dseg, 0, rd->lkey, 0);
}
#include <vnet/devices/devices.h>
#include <rdma/rdma.h>
#include <vnet/devices/devices.h>
#include <rdma/rdma.h>
-#ifndef MLX5_ETH_L2_INLINE_HEADER_SIZE
-#define MLX5_ETH_L2_INLINE_HEADER_SIZE 18
-#endif
-
#define RDMA_TX_RETRIES 5
#define RDMA_TXQ_DV_DSEG_SZ(txq) (RDMA_MLX5_WQE_DS * RDMA_TXQ_DV_SQ_SZ(txq))
#define RDMA_TX_RETRIES 5
#define RDMA_TXQ_DV_DSEG_SZ(txq) (RDMA_MLX5_WQE_DS * RDMA_TXQ_DV_SQ_SZ(txq))
vlib_buffer_t * b, const u16 tail)
{
u16 sz = b->current_length;
vlib_buffer_t * b, const u16 tail)
{
u16 sz = b->current_length;
- u16 inline_sz = clib_min (sz, MLX5_ETH_L2_INLINE_HEADER_SIZE);
+ const void *cur = vlib_buffer_get_current (b);
+ uword addr = pointer_to_uword (cur);
clib_memcpy_fast (wqe, tmpl, RDMA_MLX5_WQE_SZ);
clib_memcpy_fast (wqe, tmpl, RDMA_MLX5_WQE_SZ);
-
- wqe->ctrl.opmod_idx_opcode |= ((u32) htobe16 (tail)) << 8;
/* speculatively copy at least MLX5_ETH_L2_INLINE_HEADER_SIZE (18-bytes) */
/* speculatively copy at least MLX5_ETH_L2_INLINE_HEADER_SIZE (18-bytes) */
- const void *cur = vlib_buffer_get_current (b);
clib_memcpy_fast (wqe->eseg.inline_hdr_start,
cur, MLX5_ETH_L2_INLINE_HEADER_SIZE);
clib_memcpy_fast (wqe->eseg.inline_hdr_start,
cur, MLX5_ETH_L2_INLINE_HEADER_SIZE);
- wqe->eseg.inline_hdr_sz = htobe16 (inline_sz);
- wqe->dseg.byte_count = htobe32 (sz - inline_sz);
- wqe->dseg.addr = htobe64 (pointer_to_uword (cur) + inline_sz);
+
+ wqe->wqe_index_lo = tail;
+ wqe->wqe_index_hi = tail >> 8;
+ if (PREDICT_TRUE (sz >= MLX5_ETH_L2_INLINE_HEADER_SIZE))
+ {
+ /* inline_hdr_sz is set to MLX5_ETH_L2_INLINE_HEADER_SIZE
+ in the template */
+ wqe->dseg.byte_count = htobe32 (sz - MLX5_ETH_L2_INLINE_HEADER_SIZE);
+ wqe->dseg.addr = htobe64 (addr + MLX5_ETH_L2_INLINE_HEADER_SIZE);
+ }
+ else
+ {
+ /* dseg.byte_count and desg.addr are set to 0 in the template */
+ wqe->eseg.inline_hdr_sz = htobe16 (sz);
+ }
+#ifndef MLX5_ETH_L2_INLINE_HEADER_SIZE
+#define MLX5_ETH_L2_INLINE_HEADER_SIZE 18
+#endif
+
typedef struct
{
CLIB_ALIGN_MARK (align0, MLX5_SEND_WQE_BB);
typedef struct
{
CLIB_ALIGN_MARK (align0, MLX5_SEND_WQE_BB);
- struct mlx5_wqe_ctrl_seg ctrl;
+ union
+ {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct
+ {
+ u8 opc_mod;
+ u8 wqe_index_hi;
+ u8 wqe_index_lo;
+ u8 opcode;
+ };
+ };
struct mlx5_wqe_eth_seg eseg;
struct mlx5_wqe_data_seg dseg;
} rdma_mlx5_wqe_t;
struct mlx5_wqe_eth_seg eseg;
struct mlx5_wqe_data_seg dseg;
} rdma_mlx5_wqe_t;