2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <linux/if_link.h>
22 #include <linux/if_ether.h>
24 #include <vppinfra/linux/sysfs.h>
25 #include <vlib/vlib.h>
26 #include <vlib/unix/unix.h>
27 #include <vlib/pci/pci.h>
28 #include <vnet/ethernet/ethernet.h>
29 #include <vnet/interface/rx_queue_funcs.h>
31 #include <rdma/rdma.h>
33 /* Default RSS hash key (from DPDK MLX driver) */
34 static u8 rdma_rss_hash_key[] = {
35 0x2c, 0xc6, 0x81, 0xd1,
36 0x5b, 0xdb, 0xf4, 0xf7,
37 0xfc, 0xa2, 0x83, 0x19,
38 0xdb, 0x1a, 0x3e, 0x94,
39 0x6b, 0x9e, 0x38, 0xd9,
40 0x2c, 0x9c, 0x03, 0xd1,
41 0xad, 0x99, 0x44, 0xa7,
42 0xd9, 0x56, 0x3d, 0x59,
43 0x06, 0x3c, 0x25, 0xf3,
44 0xfc, 0x1f, 0xdc, 0x2a,
47 rdma_main_t rdma_main;
49 /* (dev) is of type (rdma_device_t *) */
50 #define rdma_log__(lvl, dev, f, ...) \
53 vlib_log ((lvl), rdma_main.log_class, "%s: " f, (dev)->name, \
58 #define rdma_log(lvl, dev, f, ...) \
59 rdma_log__((lvl), (dev), "%s (%d): " f, strerror(errno), errno, ##__VA_ARGS__)
61 static struct ibv_flow *
62 rdma_rxq_init_flow (const rdma_device_t * rd, struct ibv_qp *qp,
63 const mac_address_t * mac, const mac_address_t * mask,
64 u16 ether_type, u32 flags)
66 struct ibv_flow *flow;
67 struct raw_eth_flow_attr
69 struct ibv_flow_attr attr;
70 struct ibv_flow_spec_eth spec_eth;
71 } __attribute__ ((packed)) fa;
73 memset (&fa, 0, sizeof (fa));
74 fa.attr.num_of_specs = 1;
76 fa.attr.flags = flags;
77 fa.spec_eth.type = IBV_FLOW_SPEC_ETH;
78 fa.spec_eth.size = sizeof (struct ibv_flow_spec_eth);
80 memcpy (fa.spec_eth.val.dst_mac, mac, sizeof (fa.spec_eth.val.dst_mac));
81 memcpy (fa.spec_eth.mask.dst_mac, mask, sizeof (fa.spec_eth.mask.dst_mac));
85 fa.spec_eth.val.ether_type = ether_type;
86 fa.spec_eth.mask.ether_type = 0xffff;
89 flow = ibv_create_flow (qp, &fa.attr);
91 rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_create_flow() failed");
96 rdma_rxq_destroy_flow (const rdma_device_t * rd, struct ibv_flow **flow)
101 if (ibv_destroy_flow (*flow))
103 rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_destroy_flow() failed");
112 rdma_dev_set_promisc (rdma_device_t * rd)
114 const mac_address_t all = {.bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0} };
117 err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6);
118 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6);
119 err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4);
120 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4);
125 rdma_rxq_init_flow (rd, rd->rx_qp6, &all, &all, ntohs (ETH_P_IPV6), 0);
126 rd->flow_ucast4 = rdma_rxq_init_flow (rd, rd->rx_qp4, &all, &all, 0, 0);
127 if (!rd->flow_ucast6 || !rd->flow_ucast4)
130 rd->flags |= RDMA_DEVICE_F_PROMISC;
135 rdma_dev_set_ucast (rdma_device_t * rd)
137 const mac_address_t ucast = {.bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
139 const mac_address_t mcast = {.bytes = {0x1, 0x0, 0x0, 0x0, 0x0, 0x0} };
142 err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6);
143 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6);
144 err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4);
145 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4);
150 rdma_rxq_init_flow (rd, rd->rx_qp6, &rd->hwaddr, &ucast,
151 ntohs (ETH_P_IPV6), 0);
153 rdma_rxq_init_flow (rd, rd->rx_qp6, &mcast, &mcast, ntohs (ETH_P_IPV6),
154 IBV_FLOW_ATTR_FLAGS_DONT_TRAP
155 /* let others receive mcast packet too (eg. Linux) */
158 rdma_rxq_init_flow (rd, rd->rx_qp4, &rd->hwaddr, &ucast, 0, 0);
160 rdma_rxq_init_flow (rd, rd->rx_qp4, &mcast, &mcast, 0,
161 IBV_FLOW_ATTR_FLAGS_DONT_TRAP
162 /* let others receive mcast packet too (eg. Linux) */
164 if (!rd->flow_ucast6 || !rd->flow_mcast6 || !rd->flow_ucast4
168 rd->flags &= ~RDMA_DEVICE_F_PROMISC;
172 static clib_error_t *
173 rdma_mac_change (vnet_hw_interface_t * hw, const u8 * old, const u8 * new)
175 rdma_main_t *rm = &rdma_main;
176 rdma_device_t *rd = vec_elt_at_index (rm->devices, hw->dev_instance);
177 mac_address_from_bytes (&rd->hwaddr, new);
178 if (!(rd->flags & RDMA_DEVICE_F_PROMISC) && rdma_dev_set_ucast (rd))
180 mac_address_from_bytes (&rd->hwaddr, old);
181 return clib_error_return_unix (0, "MAC update failed");
187 rdma_dev_change_mtu (rdma_device_t * rd)
189 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "MTU change not supported");
194 rdma_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
196 rdma_main_t *rm = &rdma_main;
197 rdma_device_t *rd = vec_elt_at_index (rm->devices, hw->dev_instance);
201 case ETHERNET_INTERFACE_FLAG_DEFAULT_L3:
202 return rdma_dev_set_ucast (rd);
203 case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
204 return rdma_dev_set_promisc (rd);
205 case ETHERNET_INTERFACE_FLAG_MTU:
206 return rdma_dev_change_mtu (rd);
209 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unknown flag %x requested", flags);
214 rdma_update_state (vnet_main_t * vnm, rdma_device_t * rd, int port)
216 struct ibv_port_attr attr;
220 if (ibv_query_port (rd->ctx, port, &attr))
222 vnet_hw_interface_set_link_speed (vnm, rd->hw_if_index, 0);
223 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
230 case IBV_PORT_ACTIVE: /* fallthrough */
231 case IBV_PORT_ACTIVE_DEFER:
232 rd->flags |= RDMA_DEVICE_F_LINK_UP;
233 vnet_hw_interface_set_flags (vnm, rd->hw_if_index,
234 VNET_HW_INTERFACE_FLAG_LINK_UP);
237 rd->flags &= ~RDMA_DEVICE_F_LINK_UP;
238 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
243 switch (attr.active_width)
258 switch (attr.active_speed)
266 case 4: /* fallthrough */
277 vnet_hw_interface_set_link_speed (vnm, rd->hw_if_index, width * speed);
280 static clib_error_t *
281 rdma_async_event_error_ready (clib_file_t * f)
283 rdma_main_t *rm = &rdma_main;
284 rdma_device_t *rd = vec_elt_at_index (rm->devices, f->private_data);
285 return clib_error_return (0, "RDMA: %s: async event error", rd->name);
288 static clib_error_t *
289 rdma_async_event_read_ready (clib_file_t * f)
291 vnet_main_t *vnm = vnet_get_main ();
292 rdma_main_t *rm = &rdma_main;
293 rdma_device_t *rd = vec_elt_at_index (rm->devices, f->private_data);
295 struct ibv_async_event event;
296 ret = ibv_get_async_event (rd->ctx, &event);
298 return clib_error_return_unix (0, "ibv_get_async_event() failed");
300 switch (event.event_type)
302 case IBV_EVENT_PORT_ACTIVE:
303 rdma_update_state (vnm, rd, event.element.port_num);
305 case IBV_EVENT_PORT_ERR:
306 rdma_update_state (vnm, rd, event.element.port_num);
308 case IBV_EVENT_DEVICE_FATAL:
309 rd->flags &= ~RDMA_DEVICE_F_LINK_UP;
310 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
311 vlib_log_emerg (rm->log_class, "%s: fatal error", rd->name);
314 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unhandeld RDMA async event %d",
319 ibv_ack_async_event (&event);
323 static clib_error_t *
324 rdma_async_event_init (rdma_device_t * rd)
326 clib_file_t t = { 0 };
329 /* make RDMA async event fd non-blocking */
330 ret = fcntl (rd->ctx->async_fd, F_GETFL);
332 return clib_error_return_unix (0, "fcntl(F_GETFL) failed");
334 ret = fcntl (rd->ctx->async_fd, F_SETFL, ret | O_NONBLOCK);
336 return clib_error_return_unix (0, "fcntl(F_SETFL, O_NONBLOCK) failed");
338 /* register RDMA async event fd */
339 t.read_function = rdma_async_event_read_ready;
340 t.file_descriptor = rd->ctx->async_fd;
341 t.error_function = rdma_async_event_error_ready;
342 t.private_data = rd->dev_instance;
343 t.description = format (0, "%v async event", rd->name);
345 rd->async_event_clib_file_index = clib_file_add (&file_main, &t);
350 rdma_async_event_cleanup (rdma_device_t * rd)
352 clib_file_del_by_index (&file_main, rd->async_event_clib_file_index);
355 static clib_error_t *
356 rdma_register_interface (vnet_main_t * vnm, rdma_device_t * rd)
359 ethernet_register_interface (vnm, rdma_device_class.index,
360 rd->dev_instance, rd->hwaddr.bytes,
361 &rd->hw_if_index, rdma_flag_change);
363 /* Indicate ability to support L3 DMAC filtering and
364 * initialize interface to L3 non-promisc mode */
365 vnet_hw_if_set_caps (vnm, rd->hw_if_index, VNET_HW_IF_CAP_MAC_FILTER);
366 ethernet_set_flags (vnm, rd->hw_if_index,
367 ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
372 rdma_unregister_interface (vnet_main_t * vnm, rdma_device_t * rd)
374 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
375 ethernet_delete_interface (vnm, rd->hw_if_index);
379 rdma_dev_cleanup (rdma_device_t * rd)
381 rdma_main_t *rm = &rdma_main;
385 #define _(fn, arg) if (arg) \
388 if ((rv = fn (arg))) \
389 rdma_log (VLIB_LOG_LEVEL_DEBUG, rd, #fn "() failed (rv = %d)", rv); \
392 _(ibv_destroy_flow, rd->flow_mcast6);
393 _(ibv_destroy_flow, rd->flow_ucast6);
394 _(ibv_destroy_flow, rd->flow_mcast4);
395 _(ibv_destroy_flow, rd->flow_ucast4);
396 _(ibv_dereg_mr, rd->mr);
397 vec_foreach (txq, rd->txqs)
399 _(ibv_destroy_qp, txq->qp);
400 _(ibv_destroy_cq, txq->cq);
402 vec_foreach (rxq, rd->rxqs)
404 _(ibv_destroy_wq, rxq->wq);
405 _(ibv_destroy_cq, rxq->cq);
407 _(ibv_destroy_rwq_ind_table, rd->rx_rwq_ind_tbl);
408 _(ibv_destroy_qp, rd->rx_qp6);
409 _(ibv_destroy_qp, rd->rx_qp4);
410 _(ibv_dealloc_pd, rd->pd);
411 _(ibv_close_device, rd->ctx);
414 clib_error_free (rd->error);
419 vlib_pci_free_device_info (rd->pci);
420 pool_put (rm->devices, rd);
423 static clib_error_t *
424 rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc,
425 u8 no_multi_seg, u16 max_pktlen)
428 struct ibv_wq_init_attr wqia;
429 struct ibv_cq_init_attr_ex cqa = { };
430 struct ibv_wq_attr wqa;
431 struct ibv_cq_ex *cqex;
432 struct mlx5dv_wq_init_attr dv_wqia = { };
433 int is_mlx5dv = ! !(rd->flags & RDMA_DEVICE_F_MLX5DV);
434 int is_striding = ! !(rd->flags & RDMA_DEVICE_F_STRIDING_RQ);
436 vec_validate_aligned (rd->rxqs, qid, CLIB_CACHE_LINE_BYTES);
437 rxq = vec_elt_at_index (rd->rxqs, qid);
440 rxq->buf_sz = vlib_buffer_get_default_data_size (vm);
441 vec_validate_aligned (rxq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES);
446 struct mlx5dv_cq_init_attr dvcq = { };
447 dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
448 dvcq.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
450 if ((cqex = mlx5dv_create_cq (rd->ctx, &cqa, &dvcq)) == 0)
451 return clib_error_return_unix (0, "Create mlx5dv rx CQ Failed");
455 if ((cqex = ibv_create_cq_ex (rd->ctx, &cqa)) == 0)
456 return clib_error_return_unix (0, "Create CQ Failed");
459 rxq->cq = ibv_cq_ex_to_cq (cqex);
461 memset (&wqia, 0, sizeof (wqia));
462 wqia.wq_type = IBV_WQT_RQ;
463 wqia.max_wr = n_desc;
471 /* In STRIDING_RQ mode, map a descriptor to a stride, not a full WQE buffer */
472 uword data_seg_log2_sz =
473 min_log2 (vlib_buffer_get_default_data_size (vm));
474 rxq->buf_sz = 1 << data_seg_log2_sz;
475 /* The trick is also to map a descriptor to a data segment in the WQE SG list
476 The number of strides per WQE and the size of a WQE (in 16-bytes words) both
477 must be powers of two.
478 Moreover, in striding RQ mode, WQEs must include the SRQ header, which occupies
479 one 16-bytes word. That is why WQEs have 2*RDMA_RXQ_MAX_CHAIN_SZ 16-bytes words:
480 - One for the SRQ Header
481 - RDMA_RXQ_MAX_CHAIN_SZ for the different data segments (each mapped to
482 a stride, and a vlib_buffer)
483 - RDMA_RXQ_MAX_CHAIN_SZ-1 null data segments
485 int max_chain_log_sz =
486 max_pktlen ? max_log2 ((max_pktlen /
488 1) : RDMA_RXQ_MAX_CHAIN_LOG_SZ;
489 max_chain_log_sz = clib_max (max_chain_log_sz, 3);
490 wqia.max_sge = 1 << max_chain_log_sz;
491 dv_wqia.comp_mask = MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
492 dv_wqia.striding_rq_attrs.two_byte_shift_en = 0;
493 dv_wqia.striding_rq_attrs.single_wqe_log_num_of_strides =
495 dv_wqia.striding_rq_attrs.single_stride_log_num_of_bytes =
497 wqia.max_wr >>= max_chain_log_sz;
498 rxq->log_wqe_sz = max_chain_log_sz + 1;
499 rxq->log_stride_per_wqe = max_chain_log_sz;
503 /* In non STRIDING_RQ mode and if multiseg is not disabled, each WQE is a SG list of data
504 segments, each pointing to a vlib_buffer. */
509 rxq->n_ds_per_wqe = 1;
514 max_pktlen ? (max_pktlen /
516 1 : RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ;
517 int max_chain_log_sz = max_log2 (max_chain_sz);
518 wqia.max_sge = 1 << max_chain_log_sz;
519 rxq->log_wqe_sz = max_chain_log_sz;
520 rxq->n_ds_per_wqe = max_chain_sz;
525 if ((rxq->wq = mlx5dv_create_wq (rd->ctx, &wqia, &dv_wqia)))
527 rxq->wq->events_completed = 0;
528 pthread_mutex_init (&rxq->wq->mutex, NULL);
529 pthread_cond_init (&rxq->wq->cond, NULL);
532 return clib_error_return_unix (0, "Create WQ Failed");
534 else if ((rxq->wq = ibv_create_wq (rd->ctx, &wqia)) == 0)
535 return clib_error_return_unix (0, "Create WQ Failed");
537 memset (&wqa, 0, sizeof (wqa));
538 wqa.attr_mask = IBV_WQ_ATTR_STATE;
539 wqa.wq_state = IBV_WQS_RDY;
540 if (ibv_modify_wq (rxq->wq, &wqa) != 0)
541 return clib_error_return_unix (0, "Modify WQ (RDY) Failed");
545 struct mlx5dv_obj obj = { };
546 struct mlx5dv_cq dv_cq;
547 struct mlx5dv_rwq dv_rwq;
550 u32 wqe_sz_mask = (1 << rxq->log_wqe_sz) - 1;
554 obj.rwq.in = rxq->wq;
555 obj.rwq.out = &dv_rwq;
557 if ((mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ)))
558 return clib_error_return_unix (0, "mlx5dv: failed to init rx obj");
560 if (dv_cq.cqe_size != sizeof (mlx5dv_cqe_t))
561 return clib_error_return_unix (0, "mlx5dv: incompatible rx CQE size");
563 rxq->log2_cq_size = max_log2 (dv_cq.cqe_cnt);
564 rxq->cqes = (mlx5dv_cqe_t *) dv_cq.buf;
565 rxq->cq_db = (volatile u32 *) dv_cq.dbrec;
566 rxq->cqn = dv_cq.cqn;
568 rxq->wqes = (mlx5dv_wqe_ds_t *) dv_rwq.buf;
569 rxq->wq_db = (volatile u32 *) dv_rwq.dbrec;
570 rxq->wq_stride = dv_rwq.stride;
571 rxq->wqe_cnt = dv_rwq.wqe_cnt;
573 qw0 = clib_host_to_net_u32 (rxq->buf_sz);
575 qw0 |= (u64) clib_host_to_net_u32 (rd->lkey) << 32;
576 qw0_nullseg |= (u64) clib_host_to_net_u32 (rd->lkey) << 32;
578 /* Prefill the different 16 bytes words of the WQ.
579 - If not in striding RQ mode, for each WQE, init with qw0 the first
580 RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ, and init the rest of the WQE
582 - If in striding RQ mode, for each WQE, the RDMA_RXQ_MAX_CHAIN_SZ + 1
583 first 16-bytes words are initialised with qw0, the rest are null segments */
585 for (int i = 0; i < rxq->wqe_cnt << rxq->log_wqe_sz; i++)
587 && ((i & wqe_sz_mask) < rxq->n_ds_per_wqe))
590 || !(((i - 1) >> rxq->log_stride_per_wqe) & 0x1))))
591 rxq->wqes[i].dsz_and_lkey = qw0;
593 rxq->wqes[i].dsz_and_lkey = qw0_nullseg;
595 for (int i = 0; i < (1 << rxq->log2_cq_size); i++)
596 rxq->cqes[i].opcode_cqefmt_se_owner = 0xff;
600 vec_validate_aligned (rxq->second_bufs, n_desc - 1,
601 CLIB_CACHE_LINE_BYTES);
602 vec_validate_aligned (rxq->n_used_per_chain, n_desc - 1,
603 CLIB_CACHE_LINE_BYTES);
604 rxq->n_total_additional_segs = n_desc * (rxq->n_ds_per_wqe - 1);
605 for (int i = 0; i < n_desc; i++)
606 rxq->n_used_per_chain[i] = rxq->n_ds_per_wqe - 1;
614 rdma_rss42ibv (const rdma_rss4_t rss4)
619 return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4;
620 case RDMA_RSS4_IP_UDP:
621 return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
622 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP;
623 case RDMA_RSS4_AUTO: /* fallthrough */
624 case RDMA_RSS4_IP_TCP:
625 return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
626 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
633 rdma_rss62ibv (const rdma_rss6_t rss6)
638 return IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6;
639 case RDMA_RSS6_IP_UDP:
640 return IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
641 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP;
642 case RDMA_RSS6_AUTO: /* fallthrough */
643 case RDMA_RSS6_IP_TCP:
644 return IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
645 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
651 static clib_error_t *
652 rdma_rxq_finalize (vlib_main_t *vm, rdma_device_t *rd)
654 struct ibv_rwq_ind_table_init_attr rwqia;
655 struct ibv_qp_init_attr_ex qpia;
656 struct ibv_wq **ind_tbl;
657 const u32 rxq_sz = vec_len (rd->rxqs);
658 u32 ind_tbl_sz = rxq_sz;
661 if (!is_pow2 (ind_tbl_sz))
663 /* in case we do not have a power-of-2 number of rxq, we try to use the
664 * maximum supported to minimize the imbalance */
665 struct ibv_device_attr_ex attr;
666 if (ibv_query_device_ex (rd->ctx, 0, &attr))
667 return clib_error_return_unix (0, "device query failed");
668 ind_tbl_sz = attr.rss_caps.max_rwq_indirection_table_size;
669 if (ind_tbl_sz < rxq_sz)
670 return clib_error_create ("too many rxqs requested (%d) compared to "
671 "max indirection table size (%d)",
675 ind_tbl = vec_new (struct ibv_wq *, ind_tbl_sz);
676 vec_foreach_index (i, ind_tbl)
677 vec_elt (ind_tbl, i) = vec_elt (rd->rxqs, i % rxq_sz).wq;
678 memset (&rwqia, 0, sizeof (rwqia));
679 ASSERT (is_pow2 (vec_len (ind_tbl)));
680 rwqia.log_ind_tbl_size = min_log2 (vec_len (ind_tbl));
681 rwqia.ind_tbl = ind_tbl;
682 if ((rd->rx_rwq_ind_tbl = ibv_create_rwq_ind_table (rd->ctx, &rwqia)) == 0)
683 return clib_error_return_unix (0, "RWQ indirection table create failed");
686 memset (&qpia, 0, sizeof (qpia));
687 qpia.qp_type = IBV_QPT_RAW_PACKET;
689 IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_IND_TABLE |
690 IBV_QP_INIT_ATTR_RX_HASH;
692 qpia.rwq_ind_tbl = rd->rx_rwq_ind_tbl;
693 STATIC_ASSERT_SIZEOF (rdma_rss_hash_key, 40);
694 qpia.rx_hash_conf.rx_hash_key_len = sizeof (rdma_rss_hash_key);
695 qpia.rx_hash_conf.rx_hash_key = rdma_rss_hash_key;
696 qpia.rx_hash_conf.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ;
698 qpia.rx_hash_conf.rx_hash_fields_mask = rdma_rss42ibv (rd->rss4);
699 if ((rd->rx_qp4 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
700 return clib_error_return_unix (0, "IPv4 Queue Pair create failed");
702 qpia.rx_hash_conf.rx_hash_fields_mask = rdma_rss62ibv (rd->rss6);
703 if ((rd->rx_qp6 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
704 return clib_error_return_unix (0, "IPv6 Queue Pair create failed");
706 if (rdma_dev_set_ucast (rd))
707 return clib_error_return_unix (0, "Set unicast mode failed");
712 static clib_error_t *
713 rdma_txq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc)
716 struct ibv_qp_init_attr qpia;
717 struct ibv_qp_attr qpa;
720 vec_validate_aligned (rd->txqs, qid, CLIB_CACHE_LINE_BYTES);
721 txq = vec_elt_at_index (rd->txqs, qid);
722 ASSERT (is_pow2 (n_desc));
723 txq->bufs_log2sz = min_log2 (n_desc);
724 vec_validate_aligned (txq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES);
726 if ((txq->cq = ibv_create_cq (rd->ctx, n_desc, NULL, NULL, 0)) == 0)
727 return clib_error_return_unix (0, "Create CQ Failed");
729 memset (&qpia, 0, sizeof (qpia));
730 qpia.send_cq = txq->cq;
731 qpia.recv_cq = txq->cq;
732 qpia.cap.max_send_wr = n_desc;
733 qpia.cap.max_send_sge = 1;
734 qpia.qp_type = IBV_QPT_RAW_PACKET;
736 if ((txq->qp = ibv_create_qp (rd->pd, &qpia)) == 0)
737 return clib_error_return_unix (0, "Queue Pair create failed");
739 memset (&qpa, 0, sizeof (qpa));
740 qp_flags = IBV_QP_STATE | IBV_QP_PORT;
741 qpa.qp_state = IBV_QPS_INIT;
743 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
744 return clib_error_return_unix (0, "Modify QP (init) Failed");
746 memset (&qpa, 0, sizeof (qpa));
747 qp_flags = IBV_QP_STATE;
748 qpa.qp_state = IBV_QPS_RTR;
749 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
750 return clib_error_return_unix (0, "Modify QP (receive) Failed");
752 memset (&qpa, 0, sizeof (qpa));
753 qp_flags = IBV_QP_STATE;
754 qpa.qp_state = IBV_QPS_RTS;
755 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
756 return clib_error_return_unix (0, "Modify QP (send) Failed");
758 txq->ibv_cq = txq->cq;
759 txq->ibv_qp = txq->qp;
761 if (rd->flags & RDMA_DEVICE_F_MLX5DV)
763 rdma_mlx5_wqe_t *tmpl = (void *) txq->dv_wqe_tmpl;
764 struct mlx5dv_cq dv_cq;
765 struct mlx5dv_qp dv_qp;
766 struct mlx5dv_obj obj = { };
773 if (mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP))
774 return clib_error_return_unix (0, "DV init obj failed");
776 if (RDMA_TXQ_BUF_SZ (txq) > dv_qp.sq.wqe_cnt
777 || !is_pow2 (dv_qp.sq.wqe_cnt)
778 || sizeof (rdma_mlx5_wqe_t) != dv_qp.sq.stride
779 || (uword) dv_qp.sq.buf % sizeof (rdma_mlx5_wqe_t))
780 return clib_error_return (0, "Unsupported DV SQ parameters");
782 if (RDMA_TXQ_BUF_SZ (txq) > dv_cq.cqe_cnt
783 || !is_pow2 (dv_cq.cqe_cnt)
784 || sizeof (struct mlx5_cqe64) != dv_cq.cqe_size
785 || (uword) dv_cq.buf % sizeof (struct mlx5_cqe64))
786 return clib_error_return (0, "Unsupported DV CQ parameters");
788 /* get SQ and doorbell addresses */
789 txq->dv_sq_wqes = dv_qp.sq.buf;
790 txq->dv_sq_dbrec = dv_qp.dbrec;
791 txq->dv_sq_db = dv_qp.bf.reg;
792 txq->dv_sq_log2sz = min_log2 (dv_qp.sq.wqe_cnt);
794 /* get CQ and doorbell addresses */
795 txq->dv_cq_cqes = dv_cq.buf;
796 txq->dv_cq_dbrec = dv_cq.dbrec;
797 txq->dv_cq_log2sz = min_log2 (dv_cq.cqe_cnt);
799 /* init tx desc template */
800 STATIC_ASSERT_SIZEOF (txq->dv_wqe_tmpl, sizeof (*tmpl));
801 mlx5dv_set_ctrl_seg (&tmpl->ctrl, 0, MLX5_OPCODE_SEND, 0,
802 txq->qp->qp_num, 0, RDMA_MLX5_WQE_DS, 0,
803 RDMA_TXQ_DV_INVALID_ID);
804 tmpl->eseg.inline_hdr_sz = htobe16 (MLX5_ETH_L2_INLINE_HEADER_SIZE);
805 mlx5dv_set_data_seg (&tmpl->dseg, 0, rd->lkey, 0);
811 static clib_error_t *
812 rdma_dev_init (vlib_main_t * vm, rdma_device_t * rd,
813 rdma_create_if_args_t * args)
816 vlib_buffer_main_t *bm = vm->buffer_main;
817 vlib_thread_main_t *tm = vlib_get_thread_main ();
818 u32 rxq_num = args->rxq_num;
819 u32 rxq_size = args->rxq_size;
820 u32 txq_size = args->txq_size;
824 return clib_error_return_unix (0, "Device Open Failed");
826 if ((rd->pd = ibv_alloc_pd (rd->ctx)) == 0)
827 return clib_error_return_unix (0, "PD Alloc Failed");
829 if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start,
831 IBV_ACCESS_LOCAL_WRITE)) == 0)
832 return clib_error_return_unix (0, "Register MR Failed");
834 rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */
836 ethernet_mac_address_generate (rd->hwaddr.bytes);
838 rd->rss4 = args->rss4;
839 rd->rss6 = args->rss6;
842 * /!\ WARNING /!\ creation order is important
843 * We *must* create TX queues *before* RX queues, otherwise we will receive
844 * the broacast packets we sent
846 for (i = 0; i < tm->n_vlib_mains; i++)
847 if ((err = rdma_txq_init (vm, rd, i, txq_size)))
850 for (i = 0; i < rxq_num; i++)
852 rdma_rxq_init (vm, rd, i, rxq_size,
853 args->no_multi_seg, args->max_pktlen)))
855 if ((err = rdma_rxq_finalize (vm, rd)))
862 sysfs_path_to_pci_addr (char *path, vlib_pci_addr_t * addr)
868 s = clib_sysfs_link_to_name (path);
872 unformat_init_string (&in, (char *) s, strlen ((char *) s));
873 rv = unformat (&in, "%U", unformat_vlib_pci_addr, addr);
880 rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args)
882 vnet_main_t *vnm = vnet_get_main ();
883 rdma_main_t *rm = &rdma_main;
885 vlib_pci_addr_t pci_addr;
886 struct ibv_device **dev_list;
892 args->rxq_size = args->rxq_size ? args->rxq_size : 1024;
893 args->txq_size = args->txq_size ? args->txq_size : 1024;
894 args->rxq_num = args->rxq_num ? args->rxq_num : 2;
896 if (args->rxq_size < VLIB_FRAME_SIZE || args->txq_size < VLIB_FRAME_SIZE ||
897 args->rxq_size > 65535 || args->txq_size > 65535 ||
898 !is_pow2 (args->rxq_size) || !is_pow2 (args->txq_size))
900 args->rv = VNET_API_ERROR_INVALID_VALUE;
901 args->error = clib_error_return (0,
902 "queue size must be a power of two "
903 "between %d and 65535",
908 dev_list = ibv_get_device_list (&n_devs);
912 clib_error_return_unix (0,
913 "no RDMA devices available. Is the ib_uverbs module loaded?");
917 /* get PCI address */
918 s = format (0, "/sys/class/net/%s/device%c", args->ifname, 0);
919 if (sysfs_path_to_pci_addr ((char *) s, &pci_addr) == 0)
922 clib_error_return (0, "cannot find PCI address for device ");
926 pool_get_zero (rm->devices, rd);
927 rd->dev_instance = rd - rm->devices;
928 rd->per_interface_next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
929 rd->linux_ifname = format (0, "%s", args->ifname);
931 if (!args->name || 0 == args->name[0])
932 rd->name = format (0, "%s/%d", args->ifname, rd->dev_instance);
934 rd->name = format (0, "%s", args->name);
936 rd->pci = vlib_pci_get_device_info (vm, &pci_addr, &args->error);
940 /* if we failed to parse NUMA node, default to 0 */
941 if (-1 == rd->pci->numa_node)
942 rd->pci->numa_node = 0;
944 rd->pool = vlib_buffer_pool_get_default_for_numa (vm, rd->pci->numa_node);
946 if (strncmp ((char *) rd->pci->driver_name, "mlx5_core", 9))
949 clib_error_return (0,
950 "invalid interface (only mlx5 supported for now)");
954 for (i = 0; i < n_devs; i++)
956 vlib_pci_addr_t addr;
958 vec_reset_length (s);
959 s = format (s, "%s/device%c", dev_list[i]->dev_path, 0);
961 if (sysfs_path_to_pci_addr ((char *) s, &addr) == 0)
964 if (addr.as_u32 != rd->pci->addr.as_u32)
967 if ((rd->ctx = ibv_open_device (dev_list[i])))
971 if (args->mode != RDMA_MODE_IBV)
973 struct mlx5dv_context mlx5dv_attrs = { };
974 mlx5dv_attrs.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
976 if (mlx5dv_query_device (rd->ctx, &mlx5dv_attrs) == 0)
978 uword data_seg_log2_sz =
979 min_log2 (vlib_buffer_get_default_data_size (vm));
981 if ((mlx5dv_attrs.flags & MLX5DV_CONTEXT_FLAGS_CQE_V1))
982 rd->flags |= RDMA_DEVICE_F_MLX5DV;
984 /* Enable striding RQ if neither multiseg nor striding rq
985 are explicitly disabled, and if the interface supports it.*/
986 if (!args->no_multi_seg && !args->disable_striding_rq
987 && data_seg_log2_sz <=
988 mlx5dv_attrs.striding_rq_caps.max_single_stride_log_num_of_bytes
989 && data_seg_log2_sz >=
990 mlx5dv_attrs.striding_rq_caps.min_single_stride_log_num_of_bytes
991 && RDMA_RXQ_MAX_CHAIN_LOG_SZ >=
992 mlx5dv_attrs.striding_rq_caps.min_single_wqe_log_num_of_strides
993 && RDMA_RXQ_MAX_CHAIN_LOG_SZ <=
994 mlx5dv_attrs.striding_rq_caps.max_single_wqe_log_num_of_strides)
995 rd->flags |= RDMA_DEVICE_F_STRIDING_RQ;
999 if (args->mode == RDMA_MODE_DV)
1001 args->error = clib_error_return (0, "Direct Verbs mode not "
1002 "supported on this interface");
1008 if ((args->error = rdma_dev_init (vm, rd, args)))
1011 if ((args->error = rdma_register_interface (vnm, rd)))
1014 if ((args->error = rdma_async_event_init (rd)))
1017 rdma_update_state (vnm, rd, 1);
1019 vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, rd->hw_if_index);
1020 args->sw_if_index = rd->sw_if_index = sw->sw_if_index;
1022 * FIXME: add support for interrupt mode
1023 * vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, rd->hw_if_index);
1024 * hw->caps |= VNET_HW_IF_CAP_INT_MODE;
1026 vnet_hw_if_set_input_node (vnm, rd->hw_if_index, rdma_input_node.index);
1028 vec_foreach_index (qid, rd->rxqs)
1030 u32 queue_index = vnet_hw_if_register_rx_queue (
1031 vnm, rd->hw_if_index, qid, VNET_HW_IF_RXQ_THREAD_ANY);
1032 rd->rxqs[qid].queue_index = queue_index;
1034 vnet_hw_if_update_runtime_data (vnm, rd->hw_if_index);
1039 rdma_unregister_interface (vnm, rd);
1041 rdma_dev_cleanup (rd);
1043 ibv_free_device_list (dev_list);
1045 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1047 vlib_log_err (rm->log_class, "%U", format_clib_error, args->error);
1051 rdma_delete_if (vlib_main_t * vm, rdma_device_t * rd)
1053 rdma_async_event_cleanup (rd);
1054 rdma_unregister_interface (vnet_get_main (), rd);
1055 rdma_dev_cleanup (rd);
1058 static clib_error_t *
1059 rdma_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
1061 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1062 rdma_main_t *rm = &rdma_main;
1063 rdma_device_t *rd = vec_elt_at_index (rm->devices, hi->dev_instance);
1064 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1066 if (rd->flags & RDMA_DEVICE_F_ERROR)
1067 return clib_error_return (0, "device is in error state");
1071 vnet_hw_interface_set_flags (vnm, rd->hw_if_index,
1072 VNET_HW_INTERFACE_FLAG_LINK_UP);
1073 rd->flags |= RDMA_DEVICE_F_ADMIN_UP;
1077 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
1078 rd->flags &= ~RDMA_DEVICE_F_ADMIN_UP;
1084 rdma_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
1087 rdma_main_t *rm = &rdma_main;
1088 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1089 rdma_device_t *rd = pool_elt_at_index (rm->devices, hw->dev_instance);
1090 rd->per_interface_next_index =
1092 node_index ? VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT :
1093 vlib_node_add_next (vlib_get_main (), rdma_input_node.index, node_index);
1096 static char *rdma_tx_func_error_strings[] = {
1098 foreach_rdma_tx_func_error
1102 VNET_DEVICE_CLASS (rdma_device_class) =
1104 .name = "RDMA interface",
1105 .format_device = format_rdma_device,
1106 .format_device_name = format_rdma_device_name,
1107 .admin_up_down_function = rdma_interface_admin_up_down,
1108 .rx_redirect_to_node = rdma_set_interface_next_node,
1109 .tx_function_n_errors = RDMA_TX_N_ERROR,
1110 .tx_function_error_strings = rdma_tx_func_error_strings,
1111 .mac_addr_change_function = rdma_mac_change,
1115 rdma_init (vlib_main_t * vm)
1117 rdma_main_t *rm = &rdma_main;
1118 vlib_thread_main_t *tm = vlib_get_thread_main ();
1120 rm->log_class = vlib_log_register_class ("rdma", 0);
1122 /* vlib_buffer_t template */
1123 vec_validate_aligned (rm->per_thread_data, tm->n_vlib_mains - 1,
1124 CLIB_CACHE_LINE_BYTES);
1126 for (int i = 0; i < tm->n_vlib_mains; i++)
1128 rdma_per_thread_data_t *ptd = vec_elt_at_index (rm->per_thread_data, i);
1129 clib_memset (&ptd->buffer_template, 0, sizeof (vlib_buffer_t));
1130 ptd->buffer_template.flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
1131 ptd->buffer_template.ref_count = 1;
1132 vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1138 VLIB_INIT_FUNCTION (rdma_init) =
1140 .runs_after = VLIB_INITS ("pci_bus_init"),
1144 * fd.io coding-style-patch-verification: ON
1147 * eval: (c-set-style "gnu")