2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <linux/if_link.h>
22 #include <linux/if_ether.h>
24 #include <vppinfra/linux/sysfs.h>
25 #include <vlib/vlib.h>
26 #include <vlib/unix/unix.h>
27 #include <vlib/pci/pci.h>
28 #include <vnet/ethernet/ethernet.h>
30 #include <rdma/rdma.h>
32 /* Default RSS hash key (from DPDK MLX driver) */
33 static u8 rdma_rss_hash_key[] = {
34 0x2c, 0xc6, 0x81, 0xd1,
35 0x5b, 0xdb, 0xf4, 0xf7,
36 0xfc, 0xa2, 0x83, 0x19,
37 0xdb, 0x1a, 0x3e, 0x94,
38 0x6b, 0x9e, 0x38, 0xd9,
39 0x2c, 0x9c, 0x03, 0xd1,
40 0xad, 0x99, 0x44, 0xa7,
41 0xd9, 0x56, 0x3d, 0x59,
42 0x06, 0x3c, 0x25, 0xf3,
43 0xfc, 0x1f, 0xdc, 0x2a,
46 rdma_main_t rdma_main;
48 #define rdma_log__(lvl, dev, f, ...) \
50 vlib_log((lvl), rdma_main.log_class, "%s: " f, \
51 &(dev)->name, ##__VA_ARGS__); \
54 #define rdma_log(lvl, dev, f, ...) \
55 rdma_log__((lvl), (dev), "%s (%d): " f, strerror(errno), errno, ##__VA_ARGS__)
57 static struct ibv_flow *
58 rdma_rxq_init_flow (const rdma_device_t * rd, struct ibv_qp *qp,
59 const mac_address_t * mac, const mac_address_t * mask,
62 struct ibv_flow *flow;
63 struct raw_eth_flow_attr
65 struct ibv_flow_attr attr;
66 struct ibv_flow_spec_eth spec_eth;
67 } __attribute__ ((packed)) fa;
69 memset (&fa, 0, sizeof (fa));
70 fa.attr.num_of_specs = 1;
72 fa.attr.flags = flags;
73 fa.spec_eth.type = IBV_FLOW_SPEC_ETH;
74 fa.spec_eth.size = sizeof (struct ibv_flow_spec_eth);
76 memcpy (fa.spec_eth.val.dst_mac, mac, sizeof (fa.spec_eth.val.dst_mac));
77 memcpy (fa.spec_eth.mask.dst_mac, mask, sizeof (fa.spec_eth.mask.dst_mac));
79 flow = ibv_create_flow (qp, &fa.attr);
81 rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_create_flow() failed");
86 rdma_rxq_destroy_flow (const rdma_device_t * rd, struct ibv_flow **flow)
91 if (ibv_destroy_flow (*flow))
93 rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_destroy_flow() failed");
102 rdma_dev_set_promisc (rdma_device_t * rd)
104 const mac_address_t all = {.bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0} };
107 err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast);
111 err = rdma_rxq_destroy_flow (rd, &rd->flow_ucast);
115 rd->flow_ucast = rdma_rxq_init_flow (rd, rd->rx_qp, &all, &all, 0);
119 rd->flags |= RDMA_DEVICE_F_PROMISC;
124 rdma_dev_set_ucast (rdma_device_t * rd)
126 const mac_address_t ucast = {.bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
128 const mac_address_t mcast = {.bytes = {0x1, 0x0, 0x0, 0x0, 0x0, 0x0} };
131 err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast);
135 err = rdma_rxq_destroy_flow (rd, &rd->flow_ucast);
139 /* receive only packets with src = our MAC */
140 rd->flow_ucast = rdma_rxq_init_flow (rd, rd->rx_qp, &rd->hwaddr, &ucast, 0);
144 /* receive multicast packets */
145 rd->flow_mcast = rdma_rxq_init_flow (rd, rd->rx_qp, &mcast, &mcast,
146 IBV_FLOW_ATTR_FLAGS_DONT_TRAP
147 /* let others receive mcast packet too (eg. Linux) */
152 rd->flags &= ~RDMA_DEVICE_F_PROMISC;
156 static clib_error_t *
157 rdma_mac_change (vnet_hw_interface_t * hw, const u8 * old, const u8 * new)
159 rdma_main_t *rm = &rdma_main;
160 rdma_device_t *rd = vec_elt_at_index (rm->devices, hw->dev_instance);
161 mac_address_from_bytes (&rd->hwaddr, new);
162 if (!(rd->flags & RDMA_DEVICE_F_PROMISC) && rdma_dev_set_ucast (rd))
164 mac_address_from_bytes (&rd->hwaddr, old);
165 return clib_error_return_unix (0, "MAC update failed");
171 rdma_dev_change_mtu (rdma_device_t * rd)
173 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "MTU change not supported");
178 rdma_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
180 rdma_main_t *rm = &rdma_main;
181 rdma_device_t *rd = vec_elt_at_index (rm->devices, hw->dev_instance);
186 return rdma_dev_set_ucast (rd);
187 case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
188 return rdma_dev_set_promisc (rd);
189 case ETHERNET_INTERFACE_FLAG_MTU:
190 return rdma_dev_change_mtu (rd);
193 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unknown flag %x requested", flags);
198 rdma_update_state (vnet_main_t * vnm, rdma_device_t * rd, int port)
200 struct ibv_port_attr attr;
204 if (ibv_query_port (rd->ctx, port, &attr))
206 vnet_hw_interface_set_link_speed (vnm, rd->hw_if_index, 0);
207 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
214 case IBV_PORT_ACTIVE: /* fallthrough */
215 case IBV_PORT_ACTIVE_DEFER:
216 rd->flags |= RDMA_DEVICE_F_LINK_UP;
217 vnet_hw_interface_set_flags (vnm, rd->hw_if_index,
218 VNET_HW_INTERFACE_FLAG_LINK_UP);
221 rd->flags &= ~RDMA_DEVICE_F_LINK_UP;
222 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
227 switch (attr.active_width)
242 switch (attr.active_speed)
250 case 4: /* fallthrough */
261 vnet_hw_interface_set_link_speed (vnm, rd->hw_if_index, width * speed);
264 static clib_error_t *
265 rdma_async_event_error_ready (clib_file_t * f)
267 rdma_main_t *rm = &rdma_main;
268 rdma_device_t *rd = vec_elt_at_index (rm->devices, f->private_data);
269 return clib_error_return (0, "RDMA: %s: async event error", rd->name);
272 static clib_error_t *
273 rdma_async_event_read_ready (clib_file_t * f)
275 vnet_main_t *vnm = vnet_get_main ();
276 rdma_main_t *rm = &rdma_main;
277 rdma_device_t *rd = vec_elt_at_index (rm->devices, f->private_data);
279 struct ibv_async_event event;
280 ret = ibv_get_async_event (rd->ctx, &event);
282 return clib_error_return_unix (0, "ibv_get_async_event() failed");
284 switch (event.event_type)
286 case IBV_EVENT_PORT_ACTIVE:
287 rdma_update_state (vnm, rd, event.element.port_num);
289 case IBV_EVENT_PORT_ERR:
290 rdma_update_state (vnm, rd, event.element.port_num);
292 case IBV_EVENT_DEVICE_FATAL:
293 rd->flags &= ~RDMA_DEVICE_F_LINK_UP;
294 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
295 vlib_log_emerg (rm->log_class, "%s: fatal error", rd->name);
298 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unhandeld RDMA async event %i",
303 ibv_ack_async_event (&event);
307 static clib_error_t *
308 rdma_async_event_init (rdma_device_t * rd)
310 clib_file_t t = { 0 };
313 /* make RDMA async event fd non-blocking */
314 ret = fcntl (rd->ctx->async_fd, F_GETFL);
316 return clib_error_return_unix (0, "fcntl(F_GETFL) failed");
318 ret = fcntl (rd->ctx->async_fd, F_SETFL, ret | O_NONBLOCK);
320 return clib_error_return_unix (0, "fcntl(F_SETFL, O_NONBLOCK) failed");
322 /* register RDMA async event fd */
323 t.read_function = rdma_async_event_read_ready;
324 t.file_descriptor = rd->ctx->async_fd;
325 t.error_function = rdma_async_event_error_ready;
326 t.private_data = rd->dev_instance;
327 t.description = format (0, "%v async event", rd->name);
329 rd->async_event_clib_file_index = clib_file_add (&file_main, &t);
334 rdma_async_event_cleanup (rdma_device_t * rd)
336 clib_file_del_by_index (&file_main, rd->async_event_clib_file_index);
339 static clib_error_t *
340 rdma_register_interface (vnet_main_t * vnm, rdma_device_t * rd)
342 return ethernet_register_interface (vnm, rdma_device_class.index,
343 rd->dev_instance, rd->hwaddr.bytes,
344 &rd->hw_if_index, rdma_flag_change);
348 rdma_unregister_interface (vnet_main_t * vnm, rdma_device_t * rd)
350 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
351 vnet_hw_interface_unassign_rx_thread (vnm, rd->hw_if_index, 0);
352 ethernet_delete_interface (vnm, rd->hw_if_index);
356 rdma_dev_cleanup (rdma_device_t * rd)
358 rdma_main_t *rm = &rdma_main;
362 #define _(fn, arg) if (arg) \
365 if ((rv = fn (arg))) \
366 rdma_log (VLIB_LOG_LEVEL_DEBUG, rd, #fn "() failed (rv = %d)", rv); \
369 _(ibv_destroy_flow, rd->flow_mcast);
370 _(ibv_destroy_flow, rd->flow_ucast);
371 _(ibv_dereg_mr, rd->mr);
372 vec_foreach (txq, rd->txqs)
374 _(ibv_destroy_qp, txq->qp);
375 _(ibv_destroy_cq, txq->cq);
377 vec_foreach (rxq, rd->rxqs)
379 _(ibv_destroy_wq, rxq->wq);
380 _(ibv_destroy_cq, rxq->cq);
382 _(ibv_destroy_rwq_ind_table, rd->rx_rwq_ind_tbl);
383 _(ibv_destroy_qp, rd->rx_qp);
384 _(ibv_dealloc_pd, rd->pd);
385 _(ibv_close_device, rd->ctx);
388 clib_error_free (rd->error);
393 vlib_pci_free_device_info (rd->pci);
394 pool_put (rm->devices, rd);
397 static clib_error_t *
398 rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc)
401 struct ibv_wq_init_attr wqia;
402 struct ibv_cq_init_attr_ex cqa = { };
403 struct ibv_wq_attr wqa;
404 struct ibv_cq_ex *cqex;
406 vec_validate_aligned (rd->rxqs, qid, CLIB_CACHE_LINE_BYTES);
407 rxq = vec_elt_at_index (rd->rxqs, qid);
409 vec_validate_aligned (rxq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES);
412 if (rd->flags & RDMA_DEVICE_F_MLX5DV)
414 struct mlx5dv_cq_init_attr dvcq = { };
415 dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
416 dvcq.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
418 if ((cqex = mlx5dv_create_cq (rd->ctx, &cqa, &dvcq)) == 0)
419 return clib_error_return_unix (0, "Create mlx5dv rx CQ Failed");
423 if ((cqex = ibv_create_cq_ex (rd->ctx, &cqa)) == 0)
424 return clib_error_return_unix (0, "Create CQ Failed");
427 rxq->cq = ibv_cq_ex_to_cq (cqex);
429 memset (&wqia, 0, sizeof (wqia));
430 wqia.wq_type = IBV_WQT_RQ;
431 wqia.max_wr = n_desc;
435 if ((rxq->wq = ibv_create_wq (rd->ctx, &wqia)) == 0)
436 return clib_error_return_unix (0, "Create WQ Failed");
438 memset (&wqa, 0, sizeof (wqa));
439 wqa.attr_mask = IBV_WQ_ATTR_STATE;
440 wqa.wq_state = IBV_WQS_RDY;
441 if (ibv_modify_wq (rxq->wq, &wqa) != 0)
442 return clib_error_return_unix (0, "Modify WQ (RDY) Failed");
444 if (rd->flags & RDMA_DEVICE_F_MLX5DV)
446 struct mlx5dv_obj obj = { };
447 struct mlx5dv_cq dv_cq;
448 struct mlx5dv_rwq dv_rwq;
453 obj.rwq.in = rxq->wq;
454 obj.rwq.out = &dv_rwq;
456 if ((mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ)))
457 return clib_error_return_unix (0, "mlx5dv: failed to init rx obj");
459 if (dv_cq.cqe_size != sizeof (mlx5dv_cqe_t))
460 return clib_error_return_unix (0, "mlx5dv: incompatible rx CQE size");
462 rxq->log2_cq_size = max_log2 (dv_cq.cqe_cnt);
463 rxq->cqes = (mlx5dv_cqe_t *) dv_cq.buf;
464 rxq->cq_db = (volatile u32 *) dv_cq.dbrec;
465 rxq->cqn = dv_cq.cqn;
467 rxq->wqes = (mlx5dv_rwq_t *) dv_rwq.buf;
468 rxq->wq_db = (volatile u32 *) dv_rwq.dbrec;
469 rxq->wq_stride = dv_rwq.stride;
470 rxq->wqe_cnt = dv_rwq.wqe_cnt;
472 qw0 = clib_host_to_net_u32 (vlib_buffer_get_default_data_size (vm));
473 qw0 |= (u64) clib_host_to_net_u32 (rd->lkey) << 32;
475 for (int i = 0; i < rxq->size; i++)
476 rxq->wqes[i].dsz_and_lkey = qw0;
478 for (int i = 0; i < (1 << rxq->log2_cq_size); i++)
479 rxq->cqes[i].opcode_cqefmt_se_owner = 0xff;
485 static clib_error_t *
486 rdma_rxq_finalize (vlib_main_t * vm, rdma_device_t * rd)
488 struct ibv_rwq_ind_table_init_attr rwqia;
489 struct ibv_qp_init_attr_ex qpia;
490 struct ibv_wq **ind_tbl;
493 ASSERT (is_pow2 (vec_len (rd->rxqs))
494 && "rxq number should be a power of 2");
496 ind_tbl = vec_new (struct ibv_wq *, vec_len (rd->rxqs));
497 vec_foreach_index (i, rd->rxqs)
498 ind_tbl[i] = vec_elt_at_index (rd->rxqs, i)->wq;
499 memset (&rwqia, 0, sizeof (rwqia));
500 rwqia.log_ind_tbl_size = min_log2 (vec_len (ind_tbl));
501 rwqia.ind_tbl = ind_tbl;
502 if ((rd->rx_rwq_ind_tbl = ibv_create_rwq_ind_table (rd->ctx, &rwqia)) == 0)
503 return clib_error_return_unix (0, "RWQ indirection table create failed");
506 memset (&qpia, 0, sizeof (qpia));
507 qpia.qp_type = IBV_QPT_RAW_PACKET;
509 IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_IND_TABLE |
510 IBV_QP_INIT_ATTR_RX_HASH;
512 qpia.rwq_ind_tbl = rd->rx_rwq_ind_tbl;
513 STATIC_ASSERT_SIZEOF (rdma_rss_hash_key, 40);
514 qpia.rx_hash_conf.rx_hash_key_len = sizeof (rdma_rss_hash_key);
515 qpia.rx_hash_conf.rx_hash_key = rdma_rss_hash_key;
516 qpia.rx_hash_conf.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ;
517 qpia.rx_hash_conf.rx_hash_fields_mask =
518 IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4;
519 if ((rd->rx_qp = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
520 return clib_error_return_unix (0, "Queue Pair create failed");
522 if (rdma_dev_set_ucast (rd))
523 return clib_error_return_unix (0, "Set unicast mode failed");
528 static clib_error_t *
529 rdma_txq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc)
532 struct ibv_qp_init_attr qpia;
533 struct ibv_qp_attr qpa;
536 vec_validate_aligned (rd->txqs, qid, CLIB_CACHE_LINE_BYTES);
537 txq = vec_elt_at_index (rd->txqs, qid);
538 ASSERT (is_pow2 (n_desc));
539 txq->bufs_log2sz = min_log2 (n_desc);
540 vec_validate_aligned (txq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES);
542 if ((txq->cq = ibv_create_cq (rd->ctx, n_desc, NULL, NULL, 0)) == 0)
543 return clib_error_return_unix (0, "Create CQ Failed");
545 memset (&qpia, 0, sizeof (qpia));
546 qpia.send_cq = txq->cq;
547 qpia.recv_cq = txq->cq;
548 qpia.cap.max_send_wr = n_desc;
549 qpia.cap.max_send_sge = 1;
550 qpia.qp_type = IBV_QPT_RAW_PACKET;
552 if ((txq->qp = ibv_create_qp (rd->pd, &qpia)) == 0)
553 return clib_error_return_unix (0, "Queue Pair create failed");
555 memset (&qpa, 0, sizeof (qpa));
556 qp_flags = IBV_QP_STATE | IBV_QP_PORT;
557 qpa.qp_state = IBV_QPS_INIT;
559 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
560 return clib_error_return_unix (0, "Modify QP (init) Failed");
562 memset (&qpa, 0, sizeof (qpa));
563 qp_flags = IBV_QP_STATE;
564 qpa.qp_state = IBV_QPS_RTR;
565 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
566 return clib_error_return_unix (0, "Modify QP (receive) Failed");
568 memset (&qpa, 0, sizeof (qpa));
569 qp_flags = IBV_QP_STATE;
570 qpa.qp_state = IBV_QPS_RTS;
571 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
572 return clib_error_return_unix (0, "Modify QP (send) Failed");
574 txq->ibv_cq = txq->cq;
575 txq->ibv_qp = txq->qp;
577 if (rd->flags & RDMA_DEVICE_F_MLX5DV)
579 rdma_mlx5_wqe_t *tmpl = (void *) txq->dv_wqe_tmpl;
580 struct mlx5dv_cq dv_cq;
581 struct mlx5dv_qp dv_qp;
582 struct mlx5dv_obj obj = { };
589 if (mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP))
590 return clib_error_return_unix (0, "DV init obj failed");
592 if (RDMA_TXQ_BUF_SZ (txq) > dv_qp.sq.wqe_cnt
593 || !is_pow2 (dv_qp.sq.wqe_cnt)
594 || sizeof (rdma_mlx5_wqe_t) != dv_qp.sq.stride
595 || (uword) dv_qp.sq.buf % sizeof (rdma_mlx5_wqe_t))
596 return clib_error_return (0, "Unsupported DV SQ parameters");
598 if (RDMA_TXQ_BUF_SZ (txq) > dv_cq.cqe_cnt
599 || !is_pow2 (dv_cq.cqe_cnt)
600 || sizeof (struct mlx5_cqe64) != dv_cq.cqe_size
601 || (uword) dv_cq.buf % sizeof (struct mlx5_cqe64))
602 return clib_error_return (0, "Unsupported DV CQ parameters");
604 /* get SQ and doorbell addresses */
605 txq->dv_sq_wqes = dv_qp.sq.buf;
606 txq->dv_sq_dbrec = dv_qp.dbrec;
607 txq->dv_sq_db = dv_qp.bf.reg;
608 txq->dv_sq_log2sz = min_log2 (dv_qp.sq.wqe_cnt);
610 /* get CQ and doorbell addresses */
611 txq->dv_cq_cqes = dv_cq.buf;
612 txq->dv_cq_dbrec = dv_cq.dbrec;
613 txq->dv_cq_log2sz = min_log2 (dv_cq.cqe_cnt);
615 /* init tx desc template */
616 STATIC_ASSERT_SIZEOF (txq->dv_wqe_tmpl, sizeof (*tmpl));
617 mlx5dv_set_ctrl_seg (&tmpl->ctrl, 0, MLX5_OPCODE_SEND, 0,
618 txq->qp->qp_num, 0, RDMA_MLX5_WQE_DS, 0,
619 RDMA_TXQ_DV_INVALID_ID);
620 tmpl->eseg.inline_hdr_sz = htobe16 (MLX5_ETH_L2_INLINE_HEADER_SIZE);
621 mlx5dv_set_data_seg (&tmpl->dseg, 0, rd->lkey, 0);
627 static clib_error_t *
628 rdma_dev_init (vlib_main_t * vm, rdma_device_t * rd, u32 rxq_size,
629 u32 txq_size, u32 rxq_num)
632 vlib_buffer_main_t *bm = vm->buffer_main;
633 vlib_thread_main_t *tm = vlib_get_thread_main ();
637 return clib_error_return_unix (0, "Device Open Failed");
639 if ((rd->pd = ibv_alloc_pd (rd->ctx)) == 0)
640 return clib_error_return_unix (0, "PD Alloc Failed");
642 if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start,
644 IBV_ACCESS_LOCAL_WRITE)) == 0)
645 return clib_error_return_unix (0, "Register MR Failed");
647 rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */
649 ethernet_mac_address_generate (rd->hwaddr.bytes);
651 if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start,
653 IBV_ACCESS_LOCAL_WRITE)) == 0)
654 return clib_error_return_unix (0, "Register MR Failed");
655 rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */
658 * /!\ WARNING /!\ creation order is important
659 * We *must* create TX queues *before* RX queues, otherwise we will receive
660 * the broacast packets we sent
662 for (i = 0; i < tm->n_vlib_mains; i++)
663 if ((err = rdma_txq_init (vm, rd, i, txq_size)))
666 for (i = 0; i < rxq_num; i++)
667 if ((err = rdma_rxq_init (vm, rd, i, rxq_size)))
669 if ((err = rdma_rxq_finalize (vm, rd)))
676 sysfs_path_to_pci_addr (char *path, vlib_pci_addr_t * addr)
682 s = clib_sysfs_link_to_name (path);
686 unformat_init_string (&in, (char *) s, strlen ((char *) s));
687 rv = unformat (&in, "%U", unformat_vlib_pci_addr, addr);
694 rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args)
696 vnet_main_t *vnm = vnet_get_main ();
697 rdma_main_t *rm = &rdma_main;
699 vlib_pci_addr_t pci_addr;
700 struct ibv_device **dev_list;
706 args->rxq_size = args->rxq_size ? args->rxq_size : 1024;
707 args->txq_size = args->txq_size ? args->txq_size : 1024;
708 args->rxq_num = args->rxq_num ? args->rxq_num : 1;
710 if (!is_pow2 (args->rxq_num))
712 args->rv = VNET_API_ERROR_INVALID_VALUE;
714 clib_error_return (0, "rx queue number must be a power of two");
718 if (args->rxq_size < VLIB_FRAME_SIZE || args->txq_size < VLIB_FRAME_SIZE ||
719 args->rxq_size > 65535 || args->txq_size > 65535 ||
720 !is_pow2 (args->rxq_size) || !is_pow2 (args->txq_size))
722 args->rv = VNET_API_ERROR_INVALID_VALUE;
723 args->error = clib_error_return (0, "queue size must be a power of two "
724 "between %i and 65535",
729 dev_list = ibv_get_device_list (&n_devs);
733 clib_error_return_unix (0,
734 "no RDMA devices available. Is the ib_uverbs module loaded?");
738 /* get PCI address */
739 s = format (0, "/sys/class/net/%s/device%c", args->ifname, 0);
740 if (sysfs_path_to_pci_addr ((char *) s, &pci_addr) == 0)
743 clib_error_return (0, "cannot find PCI address for device ");
747 pool_get_zero (rm->devices, rd);
748 rd->dev_instance = rd - rm->devices;
749 rd->per_interface_next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
750 rd->linux_ifname = format (0, "%s", args->ifname);
752 if (!args->name || 0 == args->name[0])
753 rd->name = format (0, "%s/%d", args->ifname, rd->dev_instance);
755 rd->name = format (0, "%s", args->name);
757 rd->pci = vlib_pci_get_device_info (vm, &pci_addr, &args->error);
761 /* if we failed to parse NUMA node, default to 0 */
762 if (-1 == rd->pci->numa_node)
763 rd->pci->numa_node = 0;
765 rd->pool = vlib_buffer_pool_get_default_for_numa (vm, rd->pci->numa_node);
767 if (strncmp ((char *) rd->pci->driver_name, "mlx5_core", 9))
770 clib_error_return (0,
771 "invalid interface (only mlx5 supported for now)");
775 for (i = 0; i < n_devs; i++)
777 vlib_pci_addr_t addr;
779 vec_reset_length (s);
780 s = format (s, "%s/device%c", dev_list[i]->dev_path, 0);
782 if (sysfs_path_to_pci_addr ((char *) s, &addr) == 0)
785 if (addr.as_u32 != rd->pci->addr.as_u32)
788 if ((rd->ctx = ibv_open_device (dev_list[i])))
792 if (args->mode != RDMA_MODE_IBV)
794 struct mlx5dv_context mlx5dv_attrs = { };
796 if (mlx5dv_query_device (rd->ctx, &mlx5dv_attrs) == 0)
798 if ((mlx5dv_attrs.flags & MLX5DV_CONTEXT_FLAGS_CQE_V1))
799 rd->flags |= RDMA_DEVICE_F_MLX5DV;
803 if (args->mode == RDMA_MODE_DV)
805 args->error = clib_error_return (0, "Direct Verbs mode not "
806 "supported on this interface");
812 if ((args->error = rdma_dev_init (vm, rd, args->rxq_size, args->txq_size,
816 if ((args->error = rdma_register_interface (vnm, rd)))
819 if ((args->error = rdma_async_event_init (rd)))
822 rdma_update_state (vnm, rd, 1);
824 vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, rd->hw_if_index);
825 args->sw_if_index = rd->sw_if_index = sw->sw_if_index;
827 * FIXME: add support for interrupt mode
828 * vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, rd->hw_if_index);
829 * hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
831 vnet_hw_interface_set_input_node (vnm, rd->hw_if_index,
832 rdma_input_node.index);
833 vec_foreach_index (qid, rd->rxqs)
834 vnet_hw_interface_assign_rx_thread (vnm, rd->hw_if_index, qid, ~0);
840 rdma_unregister_interface (vnm, rd);
842 rdma_dev_cleanup (rd);
844 ibv_free_device_list (dev_list);
846 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
848 vlib_log_err (rm->log_class, "%U", format_clib_error, args->error);
852 rdma_delete_if (vlib_main_t * vm, rdma_device_t * rd)
854 rdma_async_event_cleanup (rd);
855 rdma_unregister_interface (vnet_get_main (), rd);
856 rdma_dev_cleanup (rd);
859 static clib_error_t *
860 rdma_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
862 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
863 rdma_main_t *rm = &rdma_main;
864 rdma_device_t *rd = vec_elt_at_index (rm->devices, hi->dev_instance);
865 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
867 if (rd->flags & RDMA_DEVICE_F_ERROR)
868 return clib_error_return (0, "device is in error state");
872 vnet_hw_interface_set_flags (vnm, rd->hw_if_index,
873 VNET_HW_INTERFACE_FLAG_LINK_UP);
874 rd->flags |= RDMA_DEVICE_F_ADMIN_UP;
878 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
879 rd->flags &= ~RDMA_DEVICE_F_ADMIN_UP;
885 rdma_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
888 rdma_main_t *rm = &rdma_main;
889 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
890 rdma_device_t *rd = pool_elt_at_index (rm->devices, hw->dev_instance);
891 rd->per_interface_next_index =
893 node_index ? VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT :
894 vlib_node_add_next (vlib_get_main (), rdma_input_node.index, node_index);
897 static char *rdma_tx_func_error_strings[] = {
899 foreach_rdma_tx_func_error
904 VNET_DEVICE_CLASS (rdma_device_class) =
906 .name = "RDMA interface",
907 .format_device = format_rdma_device,
908 .format_device_name = format_rdma_device_name,
909 .admin_up_down_function = rdma_interface_admin_up_down,
910 .rx_redirect_to_node = rdma_set_interface_next_node,
911 .tx_function_n_errors = RDMA_TX_N_ERROR,
912 .tx_function_error_strings = rdma_tx_func_error_strings,
913 .mac_addr_change_function = rdma_mac_change,
918 rdma_init (vlib_main_t * vm)
920 rdma_main_t *rm = &rdma_main;
921 vlib_thread_main_t *tm = vlib_get_thread_main ();
923 rm->log_class = vlib_log_register_class ("rdma", 0);
925 /* vlib_buffer_t template */
926 vec_validate_aligned (rm->per_thread_data, tm->n_vlib_mains - 1,
927 CLIB_CACHE_LINE_BYTES);
929 for (int i = 0; i < tm->n_vlib_mains; i++)
931 rdma_per_thread_data_t *ptd = vec_elt_at_index (rm->per_thread_data, i);
932 clib_memset (&ptd->buffer_template, 0, sizeof (vlib_buffer_t));
933 ptd->buffer_template.flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
934 ptd->buffer_template.ref_count = 1;
935 vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_TX] = (u32) ~ 0;
942 VLIB_INIT_FUNCTION (rdma_init) =
944 .runs_after = VLIB_INITS ("pci_bus_init"),
949 * fd.io coding-style-patch-verification: ON
952 * eval: (c-set-style "gnu")