2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <linux/if_link.h>
22 #include <linux/if_ether.h>
24 #include <vppinfra/linux/sysfs.h>
25 #include <vlib/vlib.h>
26 #include <vlib/unix/unix.h>
27 #include <vlib/pci/pci.h>
28 #include <vnet/ethernet/ethernet.h>
30 #include <rdma/rdma.h>
32 /* Default RSS hash key (from DPDK MLX driver) */
33 static u8 rdma_rss_hash_key[] = {
34 0x2c, 0xc6, 0x81, 0xd1,
35 0x5b, 0xdb, 0xf4, 0xf7,
36 0xfc, 0xa2, 0x83, 0x19,
37 0xdb, 0x1a, 0x3e, 0x94,
38 0x6b, 0x9e, 0x38, 0xd9,
39 0x2c, 0x9c, 0x03, 0xd1,
40 0xad, 0x99, 0x44, 0xa7,
41 0xd9, 0x56, 0x3d, 0x59,
42 0x06, 0x3c, 0x25, 0xf3,
43 0xfc, 0x1f, 0xdc, 0x2a,
46 rdma_main_t rdma_main;
48 #define rdma_log__(lvl, dev, f, ...) \
50 vlib_log((lvl), rdma_main.log_class, "%s: " f, \
51 &(dev)->name, ##__VA_ARGS__); \
54 #define rdma_log(lvl, dev, f, ...) \
55 rdma_log__((lvl), (dev), "%s (%d): " f, strerror(errno), errno, ##__VA_ARGS__)
57 static struct ibv_flow *
58 rdma_rxq_init_flow (const rdma_device_t * rd, struct ibv_qp *qp,
59 const mac_address_t * mac, const mac_address_t * mask,
60 u16 ether_type, u32 flags)
62 struct ibv_flow *flow;
63 struct raw_eth_flow_attr
65 struct ibv_flow_attr attr;
66 struct ibv_flow_spec_eth spec_eth;
67 } __attribute__ ((packed)) fa;
69 memset (&fa, 0, sizeof (fa));
70 fa.attr.num_of_specs = 1;
72 fa.attr.flags = flags;
73 fa.spec_eth.type = IBV_FLOW_SPEC_ETH;
74 fa.spec_eth.size = sizeof (struct ibv_flow_spec_eth);
76 memcpy (fa.spec_eth.val.dst_mac, mac, sizeof (fa.spec_eth.val.dst_mac));
77 memcpy (fa.spec_eth.mask.dst_mac, mask, sizeof (fa.spec_eth.mask.dst_mac));
81 fa.spec_eth.val.ether_type = ether_type;
82 fa.spec_eth.mask.ether_type = 0xffff;
85 flow = ibv_create_flow (qp, &fa.attr);
87 rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_create_flow() failed");
92 rdma_rxq_destroy_flow (const rdma_device_t * rd, struct ibv_flow **flow)
97 if (ibv_destroy_flow (*flow))
99 rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_destroy_flow() failed");
108 rdma_dev_set_promisc (rdma_device_t * rd)
110 const mac_address_t all = {.bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0} };
113 err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6);
114 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6);
115 err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4);
116 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4);
121 rdma_rxq_init_flow (rd, rd->rx_qp6, &all, &all, ntohs (ETH_P_IPV6), 0);
122 rd->flow_ucast4 = rdma_rxq_init_flow (rd, rd->rx_qp4, &all, &all, 0, 0);
123 if (!rd->flow_ucast6 || !rd->flow_ucast4)
126 rd->flags |= RDMA_DEVICE_F_PROMISC;
131 rdma_dev_set_ucast (rdma_device_t * rd)
133 const mac_address_t ucast = {.bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
135 const mac_address_t mcast = {.bytes = {0x1, 0x0, 0x0, 0x0, 0x0, 0x0} };
138 err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6);
139 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6);
140 err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4);
141 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4);
146 rdma_rxq_init_flow (rd, rd->rx_qp6, &rd->hwaddr, &ucast,
147 ntohs (ETH_P_IPV6), 0);
149 rdma_rxq_init_flow (rd, rd->rx_qp6, &mcast, &mcast, ntohs (ETH_P_IPV6),
150 IBV_FLOW_ATTR_FLAGS_DONT_TRAP
151 /* let others receive mcast packet too (eg. Linux) */
154 rdma_rxq_init_flow (rd, rd->rx_qp4, &rd->hwaddr, &ucast, 0, 0);
156 rdma_rxq_init_flow (rd, rd->rx_qp4, &mcast, &mcast, 0,
157 IBV_FLOW_ATTR_FLAGS_DONT_TRAP
158 /* let others receive mcast packet too (eg. Linux) */
160 if (!rd->flow_ucast6 || !rd->flow_mcast6 || !rd->flow_ucast4
164 rd->flags &= ~RDMA_DEVICE_F_PROMISC;
168 static clib_error_t *
169 rdma_mac_change (vnet_hw_interface_t * hw, const u8 * old, const u8 * new)
171 rdma_main_t *rm = &rdma_main;
172 rdma_device_t *rd = vec_elt_at_index (rm->devices, hw->dev_instance);
173 mac_address_from_bytes (&rd->hwaddr, new);
174 if (!(rd->flags & RDMA_DEVICE_F_PROMISC) && rdma_dev_set_ucast (rd))
176 mac_address_from_bytes (&rd->hwaddr, old);
177 return clib_error_return_unix (0, "MAC update failed");
183 rdma_dev_change_mtu (rdma_device_t * rd)
185 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "MTU change not supported");
190 rdma_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
192 rdma_main_t *rm = &rdma_main;
193 rdma_device_t *rd = vec_elt_at_index (rm->devices, hw->dev_instance);
197 case ETHERNET_INTERFACE_FLAG_DEFAULT_L3:
198 return rdma_dev_set_ucast (rd);
199 case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
200 return rdma_dev_set_promisc (rd);
201 case ETHERNET_INTERFACE_FLAG_MTU:
202 return rdma_dev_change_mtu (rd);
205 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unknown flag %x requested", flags);
210 rdma_update_state (vnet_main_t * vnm, rdma_device_t * rd, int port)
212 struct ibv_port_attr attr;
216 if (ibv_query_port (rd->ctx, port, &attr))
218 vnet_hw_interface_set_link_speed (vnm, rd->hw_if_index, 0);
219 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
226 case IBV_PORT_ACTIVE: /* fallthrough */
227 case IBV_PORT_ACTIVE_DEFER:
228 rd->flags |= RDMA_DEVICE_F_LINK_UP;
229 vnet_hw_interface_set_flags (vnm, rd->hw_if_index,
230 VNET_HW_INTERFACE_FLAG_LINK_UP);
233 rd->flags &= ~RDMA_DEVICE_F_LINK_UP;
234 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
239 switch (attr.active_width)
254 switch (attr.active_speed)
262 case 4: /* fallthrough */
273 vnet_hw_interface_set_link_speed (vnm, rd->hw_if_index, width * speed);
276 static clib_error_t *
277 rdma_async_event_error_ready (clib_file_t * f)
279 rdma_main_t *rm = &rdma_main;
280 rdma_device_t *rd = vec_elt_at_index (rm->devices, f->private_data);
281 return clib_error_return (0, "RDMA: %s: async event error", rd->name);
284 static clib_error_t *
285 rdma_async_event_read_ready (clib_file_t * f)
287 vnet_main_t *vnm = vnet_get_main ();
288 rdma_main_t *rm = &rdma_main;
289 rdma_device_t *rd = vec_elt_at_index (rm->devices, f->private_data);
291 struct ibv_async_event event;
292 ret = ibv_get_async_event (rd->ctx, &event);
294 return clib_error_return_unix (0, "ibv_get_async_event() failed");
296 switch (event.event_type)
298 case IBV_EVENT_PORT_ACTIVE:
299 rdma_update_state (vnm, rd, event.element.port_num);
301 case IBV_EVENT_PORT_ERR:
302 rdma_update_state (vnm, rd, event.element.port_num);
304 case IBV_EVENT_DEVICE_FATAL:
305 rd->flags &= ~RDMA_DEVICE_F_LINK_UP;
306 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
307 vlib_log_emerg (rm->log_class, "%s: fatal error", rd->name);
310 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unhandeld RDMA async event %i",
315 ibv_ack_async_event (&event);
319 static clib_error_t *
320 rdma_async_event_init (rdma_device_t * rd)
322 clib_file_t t = { 0 };
325 /* make RDMA async event fd non-blocking */
326 ret = fcntl (rd->ctx->async_fd, F_GETFL);
328 return clib_error_return_unix (0, "fcntl(F_GETFL) failed");
330 ret = fcntl (rd->ctx->async_fd, F_SETFL, ret | O_NONBLOCK);
332 return clib_error_return_unix (0, "fcntl(F_SETFL, O_NONBLOCK) failed");
334 /* register RDMA async event fd */
335 t.read_function = rdma_async_event_read_ready;
336 t.file_descriptor = rd->ctx->async_fd;
337 t.error_function = rdma_async_event_error_ready;
338 t.private_data = rd->dev_instance;
339 t.description = format (0, "%v async event", rd->name);
341 rd->async_event_clib_file_index = clib_file_add (&file_main, &t);
346 rdma_async_event_cleanup (rdma_device_t * rd)
348 clib_file_del_by_index (&file_main, rd->async_event_clib_file_index);
351 static clib_error_t *
352 rdma_register_interface (vnet_main_t * vnm, rdma_device_t * rd)
355 ethernet_register_interface (vnm, rdma_device_class.index,
356 rd->dev_instance, rd->hwaddr.bytes,
357 &rd->hw_if_index, rdma_flag_change);
359 /* Indicate ability to support L3 DMAC filtering and
360 * initialize interface to L3 non-promisc mode */
361 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rd->hw_if_index);
362 hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER;
363 ethernet_set_flags (vnm, rd->hw_if_index,
364 ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
369 rdma_unregister_interface (vnet_main_t * vnm, rdma_device_t * rd)
371 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
372 vnet_hw_interface_unassign_rx_thread (vnm, rd->hw_if_index, 0);
373 ethernet_delete_interface (vnm, rd->hw_if_index);
377 rdma_dev_cleanup (rdma_device_t * rd)
379 rdma_main_t *rm = &rdma_main;
383 #define _(fn, arg) if (arg) \
386 if ((rv = fn (arg))) \
387 rdma_log (VLIB_LOG_LEVEL_DEBUG, rd, #fn "() failed (rv = %d)", rv); \
390 _(ibv_destroy_flow, rd->flow_mcast6);
391 _(ibv_destroy_flow, rd->flow_ucast6);
392 _(ibv_destroy_flow, rd->flow_mcast4);
393 _(ibv_destroy_flow, rd->flow_ucast4);
394 _(ibv_dereg_mr, rd->mr);
395 vec_foreach (txq, rd->txqs)
397 _(ibv_destroy_qp, txq->qp);
398 _(ibv_destroy_cq, txq->cq);
400 vec_foreach (rxq, rd->rxqs)
402 _(ibv_destroy_wq, rxq->wq);
403 _(ibv_destroy_cq, rxq->cq);
405 _(ibv_destroy_rwq_ind_table, rd->rx_rwq_ind_tbl);
406 _(ibv_destroy_qp, rd->rx_qp6);
407 _(ibv_destroy_qp, rd->rx_qp4);
408 _(ibv_dealloc_pd, rd->pd);
409 _(ibv_close_device, rd->ctx);
412 clib_error_free (rd->error);
417 vlib_pci_free_device_info (rd->pci);
418 pool_put (rm->devices, rd);
421 static clib_error_t *
422 rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc,
423 u8 no_multi_seg, u16 max_pktlen)
426 struct ibv_wq_init_attr wqia;
427 struct ibv_cq_init_attr_ex cqa = { };
428 struct ibv_wq_attr wqa;
429 struct ibv_cq_ex *cqex;
430 struct mlx5dv_wq_init_attr dv_wqia = { };
431 int is_mlx5dv = ! !(rd->flags & RDMA_DEVICE_F_MLX5DV);
432 int is_striding = ! !(rd->flags & RDMA_DEVICE_F_STRIDING_RQ);
434 vec_validate_aligned (rd->rxqs, qid, CLIB_CACHE_LINE_BYTES);
435 rxq = vec_elt_at_index (rd->rxqs, qid);
438 rxq->buf_sz = vlib_buffer_get_default_data_size (vm);
439 vec_validate_aligned (rxq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES);
444 struct mlx5dv_cq_init_attr dvcq = { };
445 dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
446 dvcq.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
448 if ((cqex = mlx5dv_create_cq (rd->ctx, &cqa, &dvcq)) == 0)
449 return clib_error_return_unix (0, "Create mlx5dv rx CQ Failed");
453 if ((cqex = ibv_create_cq_ex (rd->ctx, &cqa)) == 0)
454 return clib_error_return_unix (0, "Create CQ Failed");
457 rxq->cq = ibv_cq_ex_to_cq (cqex);
459 memset (&wqia, 0, sizeof (wqia));
460 wqia.wq_type = IBV_WQT_RQ;
461 wqia.max_wr = n_desc;
469 /* In STRIDING_RQ mode, map a descriptor to a stride, not a full WQE buffer */
470 uword data_seg_log2_sz =
471 min_log2 (vlib_buffer_get_default_data_size (vm));
472 rxq->buf_sz = 1 << data_seg_log2_sz;
473 /* The trick is also to map a descriptor to a data segment in the WQE SG list
474 The number of strides per WQE and the size of a WQE (in 16-bytes words) both
475 must be powers of two.
476 Moreover, in striding RQ mode, WQEs must include the SRQ header, which occupies
477 one 16-bytes word. That is why WQEs have 2*RDMA_RXQ_MAX_CHAIN_SZ 16-bytes words:
478 - One for the SRQ Header
479 - RDMA_RXQ_MAX_CHAIN_SZ for the different data segments (each mapped to
480 a stride, and a vlib_buffer)
481 - RDMA_RXQ_MAX_CHAIN_SZ-1 null data segments
483 int max_chain_log_sz =
484 max_pktlen ? max_log2 ((max_pktlen /
486 1) : RDMA_RXQ_MAX_CHAIN_LOG_SZ;
487 max_chain_log_sz = clib_max (max_chain_log_sz, 3);
488 wqia.max_sge = 1 << max_chain_log_sz;
489 dv_wqia.comp_mask = MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
490 dv_wqia.striding_rq_attrs.two_byte_shift_en = 0;
491 dv_wqia.striding_rq_attrs.single_wqe_log_num_of_strides =
493 dv_wqia.striding_rq_attrs.single_stride_log_num_of_bytes =
495 wqia.max_wr >>= max_chain_log_sz;
496 rxq->log_wqe_sz = max_chain_log_sz + 1;
497 rxq->log_stride_per_wqe = max_chain_log_sz;
501 /* In non STRIDING_RQ mode and if multiseg is not disabled, each WQE is a SG list of data
502 segments, each pointing to a vlib_buffer. */
507 rxq->n_ds_per_wqe = 1;
512 max_pktlen ? (max_pktlen /
514 1 : RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ;
515 int max_chain_log_sz = max_log2 (max_chain_sz);
516 wqia.max_sge = 1 << max_chain_log_sz;
517 rxq->log_wqe_sz = max_chain_log_sz;
518 rxq->n_ds_per_wqe = max_chain_sz;
523 if ((rxq->wq = mlx5dv_create_wq (rd->ctx, &wqia, &dv_wqia)))
525 rxq->wq->events_completed = 0;
526 pthread_mutex_init (&rxq->wq->mutex, NULL);
527 pthread_cond_init (&rxq->wq->cond, NULL);
530 return clib_error_return_unix (0, "Create WQ Failed");
532 else if ((rxq->wq = ibv_create_wq (rd->ctx, &wqia)) == 0)
533 return clib_error_return_unix (0, "Create WQ Failed");
535 memset (&wqa, 0, sizeof (wqa));
536 wqa.attr_mask = IBV_WQ_ATTR_STATE;
537 wqa.wq_state = IBV_WQS_RDY;
538 if (ibv_modify_wq (rxq->wq, &wqa) != 0)
539 return clib_error_return_unix (0, "Modify WQ (RDY) Failed");
543 struct mlx5dv_obj obj = { };
544 struct mlx5dv_cq dv_cq;
545 struct mlx5dv_rwq dv_rwq;
548 u32 wqe_sz_mask = (1 << rxq->log_wqe_sz) - 1;
552 obj.rwq.in = rxq->wq;
553 obj.rwq.out = &dv_rwq;
555 if ((mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ)))
556 return clib_error_return_unix (0, "mlx5dv: failed to init rx obj");
558 if (dv_cq.cqe_size != sizeof (mlx5dv_cqe_t))
559 return clib_error_return_unix (0, "mlx5dv: incompatible rx CQE size");
561 rxq->log2_cq_size = max_log2 (dv_cq.cqe_cnt);
562 rxq->cqes = (mlx5dv_cqe_t *) dv_cq.buf;
563 rxq->cq_db = (volatile u32 *) dv_cq.dbrec;
564 rxq->cqn = dv_cq.cqn;
566 rxq->wqes = (mlx5dv_wqe_ds_t *) dv_rwq.buf;
567 rxq->wq_db = (volatile u32 *) dv_rwq.dbrec;
568 rxq->wq_stride = dv_rwq.stride;
569 rxq->wqe_cnt = dv_rwq.wqe_cnt;
571 qw0 = clib_host_to_net_u32 (rxq->buf_sz);
573 qw0 |= (u64) clib_host_to_net_u32 (rd->lkey) << 32;
574 qw0_nullseg |= (u64) clib_host_to_net_u32 (rd->lkey) << 32;
576 /* Prefill the different 16 bytes words of the WQ.
577 - If not in striding RQ mode, for each WQE, init with qw0 the first
578 RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ, and init the rest of the WQE
580 - If in striding RQ mode, for each WQE, the RDMA_RXQ_MAX_CHAIN_SZ + 1
581 first 16-bytes words are initialised with qw0, the rest are null segments */
583 for (int i = 0; i < rxq->wqe_cnt << rxq->log_wqe_sz; i++)
585 && ((i & wqe_sz_mask) < rxq->n_ds_per_wqe))
588 || !(((i - 1) >> rxq->log_stride_per_wqe) & 0x1))))
589 rxq->wqes[i].dsz_and_lkey = qw0;
591 rxq->wqes[i].dsz_and_lkey = qw0_nullseg;
593 for (int i = 0; i < (1 << rxq->log2_cq_size); i++)
594 rxq->cqes[i].opcode_cqefmt_se_owner = 0xff;
598 vec_validate_aligned (rxq->second_bufs, n_desc - 1,
599 CLIB_CACHE_LINE_BYTES);
600 vec_validate_aligned (rxq->n_used_per_chain, n_desc - 1,
601 CLIB_CACHE_LINE_BYTES);
602 rxq->n_total_additional_segs = n_desc * (rxq->n_ds_per_wqe - 1);
603 for (int i = 0; i < n_desc; i++)
604 rxq->n_used_per_chain[i] = rxq->n_ds_per_wqe - 1;
611 static clib_error_t *
612 rdma_rxq_finalize (vlib_main_t * vm, rdma_device_t * rd)
614 struct ibv_rwq_ind_table_init_attr rwqia;
615 struct ibv_qp_init_attr_ex qpia;
616 struct ibv_wq **ind_tbl;
619 ASSERT (is_pow2 (vec_len (rd->rxqs))
620 && "rxq number should be a power of 2");
622 ind_tbl = vec_new (struct ibv_wq *, vec_len (rd->rxqs));
623 vec_foreach_index (i, rd->rxqs)
624 ind_tbl[i] = vec_elt_at_index (rd->rxqs, i)->wq;
625 memset (&rwqia, 0, sizeof (rwqia));
626 rwqia.log_ind_tbl_size = min_log2 (vec_len (ind_tbl));
627 rwqia.ind_tbl = ind_tbl;
628 if ((rd->rx_rwq_ind_tbl = ibv_create_rwq_ind_table (rd->ctx, &rwqia)) == 0)
629 return clib_error_return_unix (0, "RWQ indirection table create failed");
632 memset (&qpia, 0, sizeof (qpia));
633 qpia.qp_type = IBV_QPT_RAW_PACKET;
635 IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_IND_TABLE |
636 IBV_QP_INIT_ATTR_RX_HASH;
638 qpia.rwq_ind_tbl = rd->rx_rwq_ind_tbl;
639 STATIC_ASSERT_SIZEOF (rdma_rss_hash_key, 40);
640 qpia.rx_hash_conf.rx_hash_key_len = sizeof (rdma_rss_hash_key);
641 qpia.rx_hash_conf.rx_hash_key = rdma_rss_hash_key;
642 qpia.rx_hash_conf.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ;
644 qpia.rx_hash_conf.rx_hash_fields_mask =
645 IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_SRC_PORT_TCP |
646 IBV_RX_HASH_DST_PORT_TCP;
647 if ((rd->rx_qp4 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
648 return clib_error_return_unix (0, "IPv4 Queue Pair create failed");
650 qpia.rx_hash_conf.rx_hash_fields_mask =
651 IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 | IBV_RX_HASH_SRC_PORT_TCP |
652 IBV_RX_HASH_DST_PORT_TCP;
653 if ((rd->rx_qp6 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
654 return clib_error_return_unix (0, "IPv6 Queue Pair create failed");
656 if (rdma_dev_set_ucast (rd))
657 return clib_error_return_unix (0, "Set unicast mode failed");
662 static clib_error_t *
663 rdma_txq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc)
666 struct ibv_qp_init_attr qpia;
667 struct ibv_qp_attr qpa;
670 vec_validate_aligned (rd->txqs, qid, CLIB_CACHE_LINE_BYTES);
671 txq = vec_elt_at_index (rd->txqs, qid);
672 ASSERT (is_pow2 (n_desc));
673 txq->bufs_log2sz = min_log2 (n_desc);
674 vec_validate_aligned (txq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES);
676 if ((txq->cq = ibv_create_cq (rd->ctx, n_desc, NULL, NULL, 0)) == 0)
677 return clib_error_return_unix (0, "Create CQ Failed");
679 memset (&qpia, 0, sizeof (qpia));
680 qpia.send_cq = txq->cq;
681 qpia.recv_cq = txq->cq;
682 qpia.cap.max_send_wr = n_desc;
683 qpia.cap.max_send_sge = 1;
684 qpia.qp_type = IBV_QPT_RAW_PACKET;
686 if ((txq->qp = ibv_create_qp (rd->pd, &qpia)) == 0)
687 return clib_error_return_unix (0, "Queue Pair create failed");
689 memset (&qpa, 0, sizeof (qpa));
690 qp_flags = IBV_QP_STATE | IBV_QP_PORT;
691 qpa.qp_state = IBV_QPS_INIT;
693 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
694 return clib_error_return_unix (0, "Modify QP (init) Failed");
696 memset (&qpa, 0, sizeof (qpa));
697 qp_flags = IBV_QP_STATE;
698 qpa.qp_state = IBV_QPS_RTR;
699 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
700 return clib_error_return_unix (0, "Modify QP (receive) Failed");
702 memset (&qpa, 0, sizeof (qpa));
703 qp_flags = IBV_QP_STATE;
704 qpa.qp_state = IBV_QPS_RTS;
705 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
706 return clib_error_return_unix (0, "Modify QP (send) Failed");
708 txq->ibv_cq = txq->cq;
709 txq->ibv_qp = txq->qp;
711 if (rd->flags & RDMA_DEVICE_F_MLX5DV)
713 rdma_mlx5_wqe_t *tmpl = (void *) txq->dv_wqe_tmpl;
714 struct mlx5dv_cq dv_cq;
715 struct mlx5dv_qp dv_qp;
716 struct mlx5dv_obj obj = { };
723 if (mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP))
724 return clib_error_return_unix (0, "DV init obj failed");
726 if (RDMA_TXQ_BUF_SZ (txq) > dv_qp.sq.wqe_cnt
727 || !is_pow2 (dv_qp.sq.wqe_cnt)
728 || sizeof (rdma_mlx5_wqe_t) != dv_qp.sq.stride
729 || (uword) dv_qp.sq.buf % sizeof (rdma_mlx5_wqe_t))
730 return clib_error_return (0, "Unsupported DV SQ parameters");
732 if (RDMA_TXQ_BUF_SZ (txq) > dv_cq.cqe_cnt
733 || !is_pow2 (dv_cq.cqe_cnt)
734 || sizeof (struct mlx5_cqe64) != dv_cq.cqe_size
735 || (uword) dv_cq.buf % sizeof (struct mlx5_cqe64))
736 return clib_error_return (0, "Unsupported DV CQ parameters");
738 /* get SQ and doorbell addresses */
739 txq->dv_sq_wqes = dv_qp.sq.buf;
740 txq->dv_sq_dbrec = dv_qp.dbrec;
741 txq->dv_sq_db = dv_qp.bf.reg;
742 txq->dv_sq_log2sz = min_log2 (dv_qp.sq.wqe_cnt);
744 /* get CQ and doorbell addresses */
745 txq->dv_cq_cqes = dv_cq.buf;
746 txq->dv_cq_dbrec = dv_cq.dbrec;
747 txq->dv_cq_log2sz = min_log2 (dv_cq.cqe_cnt);
749 /* init tx desc template */
750 STATIC_ASSERT_SIZEOF (txq->dv_wqe_tmpl, sizeof (*tmpl));
751 mlx5dv_set_ctrl_seg (&tmpl->ctrl, 0, MLX5_OPCODE_SEND, 0,
752 txq->qp->qp_num, 0, RDMA_MLX5_WQE_DS, 0,
753 RDMA_TXQ_DV_INVALID_ID);
754 tmpl->eseg.inline_hdr_sz = htobe16 (MLX5_ETH_L2_INLINE_HEADER_SIZE);
755 mlx5dv_set_data_seg (&tmpl->dseg, 0, rd->lkey, 0);
761 static clib_error_t *
762 rdma_dev_init (vlib_main_t * vm, rdma_device_t * rd,
763 rdma_create_if_args_t * args)
766 vlib_buffer_main_t *bm = vm->buffer_main;
767 vlib_thread_main_t *tm = vlib_get_thread_main ();
768 u32 rxq_num = args->rxq_num;
769 u32 rxq_size = args->rxq_size;
770 u32 txq_size = args->txq_size;
774 return clib_error_return_unix (0, "Device Open Failed");
776 if ((rd->pd = ibv_alloc_pd (rd->ctx)) == 0)
777 return clib_error_return_unix (0, "PD Alloc Failed");
779 if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start,
781 IBV_ACCESS_LOCAL_WRITE)) == 0)
782 return clib_error_return_unix (0, "Register MR Failed");
784 rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */
786 ethernet_mac_address_generate (rd->hwaddr.bytes);
788 if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start,
790 IBV_ACCESS_LOCAL_WRITE)) == 0)
791 return clib_error_return_unix (0, "Register MR Failed");
792 rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */
795 * /!\ WARNING /!\ creation order is important
796 * We *must* create TX queues *before* RX queues, otherwise we will receive
797 * the broacast packets we sent
799 for (i = 0; i < tm->n_vlib_mains; i++)
800 if ((err = rdma_txq_init (vm, rd, i, txq_size)))
803 for (i = 0; i < rxq_num; i++)
805 rdma_rxq_init (vm, rd, i, rxq_size,
806 args->no_multi_seg, args->max_pktlen)))
808 if ((err = rdma_rxq_finalize (vm, rd)))
815 sysfs_path_to_pci_addr (char *path, vlib_pci_addr_t * addr)
821 s = clib_sysfs_link_to_name (path);
825 unformat_init_string (&in, (char *) s, strlen ((char *) s));
826 rv = unformat (&in, "%U", unformat_vlib_pci_addr, addr);
833 rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args)
835 vnet_main_t *vnm = vnet_get_main ();
836 rdma_main_t *rm = &rdma_main;
838 vlib_pci_addr_t pci_addr;
839 struct ibv_device **dev_list;
845 args->rxq_size = args->rxq_size ? args->rxq_size : 1024;
846 args->txq_size = args->txq_size ? args->txq_size : 1024;
847 args->rxq_num = args->rxq_num ? args->rxq_num : 2;
849 if (!is_pow2 (args->rxq_num))
851 args->rv = VNET_API_ERROR_INVALID_VALUE;
853 clib_error_return (0, "rx queue number must be a power of two");
857 if (args->rxq_size < VLIB_FRAME_SIZE || args->txq_size < VLIB_FRAME_SIZE ||
858 args->rxq_size > 65535 || args->txq_size > 65535 ||
859 !is_pow2 (args->rxq_size) || !is_pow2 (args->txq_size))
861 args->rv = VNET_API_ERROR_INVALID_VALUE;
862 args->error = clib_error_return (0, "queue size must be a power of two "
863 "between %i and 65535",
868 dev_list = ibv_get_device_list (&n_devs);
872 clib_error_return_unix (0,
873 "no RDMA devices available. Is the ib_uverbs module loaded?");
877 /* get PCI address */
878 s = format (0, "/sys/class/net/%s/device%c", args->ifname, 0);
879 if (sysfs_path_to_pci_addr ((char *) s, &pci_addr) == 0)
882 clib_error_return (0, "cannot find PCI address for device ");
886 pool_get_zero (rm->devices, rd);
887 rd->dev_instance = rd - rm->devices;
888 rd->per_interface_next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
889 rd->linux_ifname = format (0, "%s", args->ifname);
891 if (!args->name || 0 == args->name[0])
892 rd->name = format (0, "%s/%d", args->ifname, rd->dev_instance);
894 rd->name = format (0, "%s", args->name);
896 rd->pci = vlib_pci_get_device_info (vm, &pci_addr, &args->error);
900 /* if we failed to parse NUMA node, default to 0 */
901 if (-1 == rd->pci->numa_node)
902 rd->pci->numa_node = 0;
904 rd->pool = vlib_buffer_pool_get_default_for_numa (vm, rd->pci->numa_node);
906 if (strncmp ((char *) rd->pci->driver_name, "mlx5_core", 9))
909 clib_error_return (0,
910 "invalid interface (only mlx5 supported for now)");
914 for (i = 0; i < n_devs; i++)
916 vlib_pci_addr_t addr;
918 vec_reset_length (s);
919 s = format (s, "%s/device%c", dev_list[i]->dev_path, 0);
921 if (sysfs_path_to_pci_addr ((char *) s, &addr) == 0)
924 if (addr.as_u32 != rd->pci->addr.as_u32)
927 if ((rd->ctx = ibv_open_device (dev_list[i])))
931 if (args->mode != RDMA_MODE_IBV)
933 struct mlx5dv_context mlx5dv_attrs = { };
934 mlx5dv_attrs.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
936 if (mlx5dv_query_device (rd->ctx, &mlx5dv_attrs) == 0)
938 uword data_seg_log2_sz =
939 min_log2 (vlib_buffer_get_default_data_size (vm));
941 if ((mlx5dv_attrs.flags & MLX5DV_CONTEXT_FLAGS_CQE_V1))
942 rd->flags |= RDMA_DEVICE_F_MLX5DV;
944 /* Enable striding RQ if neither multiseg nor striding rq
945 are explicitly disabled, and if the interface supports it.*/
946 if (!args->no_multi_seg && !args->disable_striding_rq
947 && data_seg_log2_sz <=
948 mlx5dv_attrs.striding_rq_caps.max_single_stride_log_num_of_bytes
949 && data_seg_log2_sz >=
950 mlx5dv_attrs.striding_rq_caps.min_single_stride_log_num_of_bytes
951 && RDMA_RXQ_MAX_CHAIN_LOG_SZ >=
952 mlx5dv_attrs.striding_rq_caps.min_single_wqe_log_num_of_strides
953 && RDMA_RXQ_MAX_CHAIN_LOG_SZ <=
954 mlx5dv_attrs.striding_rq_caps.max_single_wqe_log_num_of_strides)
955 rd->flags |= RDMA_DEVICE_F_STRIDING_RQ;
959 if (args->mode == RDMA_MODE_DV)
961 args->error = clib_error_return (0, "Direct Verbs mode not "
962 "supported on this interface");
968 if ((args->error = rdma_dev_init (vm, rd, args)))
971 if ((args->error = rdma_register_interface (vnm, rd)))
974 if ((args->error = rdma_async_event_init (rd)))
977 rdma_update_state (vnm, rd, 1);
979 vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, rd->hw_if_index);
980 args->sw_if_index = rd->sw_if_index = sw->sw_if_index;
982 * FIXME: add support for interrupt mode
983 * vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, rd->hw_if_index);
984 * hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
986 vnet_hw_interface_set_input_node (vnm, rd->hw_if_index,
987 rdma_input_node.index);
988 vec_foreach_index (qid, rd->rxqs)
989 vnet_hw_interface_assign_rx_thread (vnm, rd->hw_if_index, qid, ~0);
995 rdma_unregister_interface (vnm, rd);
997 rdma_dev_cleanup (rd);
999 ibv_free_device_list (dev_list);
1001 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1003 vlib_log_err (rm->log_class, "%U", format_clib_error, args->error);
1007 rdma_delete_if (vlib_main_t * vm, rdma_device_t * rd)
1009 rdma_async_event_cleanup (rd);
1010 rdma_unregister_interface (vnet_get_main (), rd);
1011 rdma_dev_cleanup (rd);
1014 static clib_error_t *
1015 rdma_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
1017 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1018 rdma_main_t *rm = &rdma_main;
1019 rdma_device_t *rd = vec_elt_at_index (rm->devices, hi->dev_instance);
1020 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1022 if (rd->flags & RDMA_DEVICE_F_ERROR)
1023 return clib_error_return (0, "device is in error state");
1027 vnet_hw_interface_set_flags (vnm, rd->hw_if_index,
1028 VNET_HW_INTERFACE_FLAG_LINK_UP);
1029 rd->flags |= RDMA_DEVICE_F_ADMIN_UP;
1033 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
1034 rd->flags &= ~RDMA_DEVICE_F_ADMIN_UP;
1040 rdma_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
1043 rdma_main_t *rm = &rdma_main;
1044 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1045 rdma_device_t *rd = pool_elt_at_index (rm->devices, hw->dev_instance);
1046 rd->per_interface_next_index =
1048 node_index ? VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT :
1049 vlib_node_add_next (vlib_get_main (), rdma_input_node.index, node_index);
1052 static char *rdma_tx_func_error_strings[] = {
1054 foreach_rdma_tx_func_error
1059 VNET_DEVICE_CLASS (rdma_device_class) =
1061 .name = "RDMA interface",
1062 .format_device = format_rdma_device,
1063 .format_device_name = format_rdma_device_name,
1064 .admin_up_down_function = rdma_interface_admin_up_down,
1065 .rx_redirect_to_node = rdma_set_interface_next_node,
1066 .tx_function_n_errors = RDMA_TX_N_ERROR,
1067 .tx_function_error_strings = rdma_tx_func_error_strings,
1068 .mac_addr_change_function = rdma_mac_change,
1073 rdma_init (vlib_main_t * vm)
1075 rdma_main_t *rm = &rdma_main;
1076 vlib_thread_main_t *tm = vlib_get_thread_main ();
1078 rm->log_class = vlib_log_register_class ("rdma", 0);
1080 /* vlib_buffer_t template */
1081 vec_validate_aligned (rm->per_thread_data, tm->n_vlib_mains - 1,
1082 CLIB_CACHE_LINE_BYTES);
1084 for (int i = 0; i < tm->n_vlib_mains; i++)
1086 rdma_per_thread_data_t *ptd = vec_elt_at_index (rm->per_thread_data, i);
1087 clib_memset (&ptd->buffer_template, 0, sizeof (vlib_buffer_t));
1088 ptd->buffer_template.flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
1089 ptd->buffer_template.ref_count = 1;
1090 vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1097 VLIB_INIT_FUNCTION (rdma_init) =
1099 .runs_after = VLIB_INITS ("pci_bus_init"),
1104 * fd.io coding-style-patch-verification: ON
1107 * eval: (c-set-style "gnu")