static struct ibv_flow *
rdma_rxq_init_flow (const rdma_device_t * rd, struct ibv_qp *qp,
const mac_address_t * mac, const mac_address_t * mask,
- u32 flags)
+ u16 ether_type, u32 flags)
{
struct ibv_flow *flow;
struct raw_eth_flow_attr
memcpy (fa.spec_eth.val.dst_mac, mac, sizeof (fa.spec_eth.val.dst_mac));
memcpy (fa.spec_eth.mask.dst_mac, mask, sizeof (fa.spec_eth.mask.dst_mac));
+ if (ether_type)
+ {
+ fa.spec_eth.val.ether_type = ether_type;
+ fa.spec_eth.mask.ether_type = 0xffff;
+ }
+
flow = ibv_create_flow (qp, &fa.attr);
if (!flow)
rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_create_flow() failed");
const mac_address_t all = {.bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0} };
int err;
- err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast);
- if (err)
- return ~0;
-
- err = rdma_rxq_destroy_flow (rd, &rd->flow_ucast);
+ err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6);
+ err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6);
+ err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4);
+ err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4);
if (err)
return ~0;
- rd->flow_ucast = rdma_rxq_init_flow (rd, rd->rx_qp, &all, &all, 0);
- if (!rd->flow_ucast)
+ rd->flow_ucast6 =
+ rdma_rxq_init_flow (rd, rd->rx_qp6, &all, &all, ntohs (ETH_P_IPV6), 0);
+ rd->flow_ucast4 = rdma_rxq_init_flow (rd, rd->rx_qp4, &all, &all, 0, 0);
+ if (!rd->flow_ucast6 || !rd->flow_ucast4)
return ~0;
rd->flags |= RDMA_DEVICE_F_PROMISC;
const mac_address_t mcast = {.bytes = {0x1, 0x0, 0x0, 0x0, 0x0, 0x0} };
int err;
- err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast);
- if (err)
- return ~0;
-
- err = rdma_rxq_destroy_flow (rd, &rd->flow_ucast);
+ err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6);
+ err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6);
+ err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4);
+ err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4);
if (err)
return ~0;
- /* receive only packets with src = our MAC */
- rd->flow_ucast = rdma_rxq_init_flow (rd, rd->rx_qp, &rd->hwaddr, &ucast, 0);
- if (!rd->flow_ucast)
- return ~0;
-
- /* receive multicast packets */
- rd->flow_mcast = rdma_rxq_init_flow (rd, rd->rx_qp, &mcast, &mcast,
- IBV_FLOW_ATTR_FLAGS_DONT_TRAP
- /* let others receive mcast packet too (eg. Linux) */
+ rd->flow_ucast6 =
+ rdma_rxq_init_flow (rd, rd->rx_qp6, &rd->hwaddr, &ucast,
+ ntohs (ETH_P_IPV6), 0);
+ rd->flow_mcast6 =
+ rdma_rxq_init_flow (rd, rd->rx_qp6, &mcast, &mcast, ntohs (ETH_P_IPV6),
+ IBV_FLOW_ATTR_FLAGS_DONT_TRAP
+ /* let others receive mcast packet too (eg. Linux) */
+ );
+ rd->flow_ucast4 =
+ rdma_rxq_init_flow (rd, rd->rx_qp4, &rd->hwaddr, &ucast, 0, 0);
+ rd->flow_mcast4 =
+ rdma_rxq_init_flow (rd, rd->rx_qp4, &mcast, &mcast, 0,
+ IBV_FLOW_ATTR_FLAGS_DONT_TRAP
+ /* let others receive mcast packet too (eg. Linux) */
);
- if (!rd->flow_mcast)
+ if (!rd->flow_ucast6 || !rd->flow_mcast6 || !rd->flow_ucast4
+ || !rd->flow_mcast4)
return ~0;
rd->flags &= ~RDMA_DEVICE_F_PROMISC;
switch (flags)
{
- case 0:
+ case ETHERNET_INTERFACE_FLAG_DEFAULT_L3:
return rdma_dev_set_ucast (rd);
case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
return rdma_dev_set_promisc (rd);
static clib_error_t *
rdma_register_interface (vnet_main_t * vnm, rdma_device_t * rd)
{
- return ethernet_register_interface (vnm, rdma_device_class.index,
- rd->dev_instance, rd->hwaddr.bytes,
- &rd->hw_if_index, rdma_flag_change);
+ clib_error_t *err =
+ ethernet_register_interface (vnm, rdma_device_class.index,
+ rd->dev_instance, rd->hwaddr.bytes,
+ &rd->hw_if_index, rdma_flag_change);
+
+ /* Indicate ability to support L3 DMAC filtering and
+ * initialize interface to L3 non-promisc mode */
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rd->hw_if_index);
+ hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER;
+ ethernet_set_flags (vnm, rd->hw_if_index,
+ ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
+ return err;
}
static void
rdma_log (VLIB_LOG_LEVEL_DEBUG, rd, #fn "() failed (rv = %d)", rv); \
}
- _(ibv_destroy_flow, rd->flow_mcast);
- _(ibv_destroy_flow, rd->flow_ucast);
+ _(ibv_destroy_flow, rd->flow_mcast6);
+ _(ibv_destroy_flow, rd->flow_ucast6);
+ _(ibv_destroy_flow, rd->flow_mcast4);
+ _(ibv_destroy_flow, rd->flow_ucast4);
_(ibv_dereg_mr, rd->mr);
vec_foreach (txq, rd->txqs)
{
_(ibv_destroy_cq, rxq->cq);
}
_(ibv_destroy_rwq_ind_table, rd->rx_rwq_ind_tbl);
- _(ibv_destroy_qp, rd->rx_qp);
+ _(ibv_destroy_qp, rd->rx_qp6);
+ _(ibv_destroy_qp, rd->rx_qp4);
_(ibv_dealloc_pd, rd->pd);
_(ibv_close_device, rd->ctx);
#undef _
qpia.rx_hash_conf.rx_hash_key_len = sizeof (rdma_rss_hash_key);
qpia.rx_hash_conf.rx_hash_key = rdma_rss_hash_key;
qpia.rx_hash_conf.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ;
+
qpia.rx_hash_conf.rx_hash_fields_mask =
- IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4;
- if ((rd->rx_qp = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
- return clib_error_return_unix (0, "Queue Pair create failed");
+ IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP;
+ if ((rd->rx_qp4 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
+ return clib_error_return_unix (0, "IPv4 Queue Pair create failed");
+
+ qpia.rx_hash_conf.rx_hash_fields_mask =
+ IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 | IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP;
+ if ((rd->rx_qp6 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
+ return clib_error_return_unix (0, "IPv6 Queue Pair create failed");
if (rdma_dev_set_ucast (rd))
return clib_error_return_unix (0, "Set unicast mode failed");
mlx5dv_set_ctrl_seg (&tmpl->ctrl, 0, MLX5_OPCODE_SEND, 0,
txq->qp->qp_num, 0, RDMA_MLX5_WQE_DS, 0,
RDMA_TXQ_DV_INVALID_ID);
- /* FIXME: mlx5dv_set_eth_seg(&tmpl->eseg, MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM, 0, 0, 0); */
+ tmpl->eseg.inline_hdr_sz = htobe16 (MLX5_ETH_L2_INLINE_HEADER_SIZE);
mlx5dv_set_data_seg (&tmpl->dseg, 0, rd->lkey, 0);
}