2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
21 #include <linux/if_link.h>
22 #include <linux/if_ether.h>
24 #include <vppinfra/linux/sysfs.h>
25 #include <vlib/vlib.h>
26 #include <vlib/unix/unix.h>
27 #include <vlib/pci/pci.h>
28 #include <vnet/ethernet/ethernet.h>
29 #include <vnet/interface/rx_queue_funcs.h>
31 #include <rdma/rdma.h>
33 /* Default RSS hash key (from DPDK MLX driver) */
34 static u8 rdma_rss_hash_key[] = {
35 0x2c, 0xc6, 0x81, 0xd1,
36 0x5b, 0xdb, 0xf4, 0xf7,
37 0xfc, 0xa2, 0x83, 0x19,
38 0xdb, 0x1a, 0x3e, 0x94,
39 0x6b, 0x9e, 0x38, 0xd9,
40 0x2c, 0x9c, 0x03, 0xd1,
41 0xad, 0x99, 0x44, 0xa7,
42 0xd9, 0x56, 0x3d, 0x59,
43 0x06, 0x3c, 0x25, 0xf3,
44 0xfc, 0x1f, 0xdc, 0x2a,
47 rdma_main_t rdma_main;
49 /* (dev) is of type (rdma_device_t *) */
50 #define rdma_log__(lvl, dev, f, ...) \
53 vlib_log ((lvl), rdma_main.log_class, "%s: " f, (dev)->name, \
58 #define rdma_log(lvl, dev, f, ...) \
59 rdma_log__((lvl), (dev), "%s (%d): " f, strerror(errno), errno, ##__VA_ARGS__)
61 static struct ibv_flow *
62 rdma_rxq_init_flow (const rdma_device_t * rd, struct ibv_qp *qp,
63 const mac_address_t * mac, const mac_address_t * mask,
64 u16 ether_type, u32 flags)
66 struct ibv_flow *flow;
67 struct raw_eth_flow_attr
69 struct ibv_flow_attr attr;
70 struct ibv_flow_spec_eth spec_eth;
71 } __attribute__ ((packed)) fa;
73 memset (&fa, 0, sizeof (fa));
74 fa.attr.num_of_specs = 1;
76 fa.attr.flags = flags;
77 fa.spec_eth.type = IBV_FLOW_SPEC_ETH;
78 fa.spec_eth.size = sizeof (struct ibv_flow_spec_eth);
80 memcpy (fa.spec_eth.val.dst_mac, mac, sizeof (fa.spec_eth.val.dst_mac));
81 memcpy (fa.spec_eth.mask.dst_mac, mask, sizeof (fa.spec_eth.mask.dst_mac));
85 fa.spec_eth.val.ether_type = ether_type;
86 fa.spec_eth.mask.ether_type = 0xffff;
89 flow = ibv_create_flow (qp, &fa.attr);
91 rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_create_flow() failed");
96 rdma_rxq_destroy_flow (const rdma_device_t * rd, struct ibv_flow **flow)
101 if (ibv_destroy_flow (*flow))
103 rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_destroy_flow() failed");
112 rdma_dev_set_promisc (rdma_device_t * rd)
114 const mac_address_t all = {.bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0} };
117 err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6);
118 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6);
119 err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4);
120 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4);
125 rdma_rxq_init_flow (rd, rd->rx_qp6, &all, &all, ntohs (ETH_P_IPV6), 0);
126 rd->flow_ucast4 = rdma_rxq_init_flow (rd, rd->rx_qp4, &all, &all, 0, 0);
127 if (!rd->flow_ucast6 || !rd->flow_ucast4)
130 rd->flags |= RDMA_DEVICE_F_PROMISC;
135 rdma_dev_set_ucast (rdma_device_t * rd)
137 const mac_address_t ucast = {.bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
139 const mac_address_t mcast = {.bytes = {0x1, 0x0, 0x0, 0x0, 0x0, 0x0} };
142 err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6);
143 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6);
144 err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4);
145 err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4);
150 rdma_rxq_init_flow (rd, rd->rx_qp6, &rd->hwaddr, &ucast,
151 ntohs (ETH_P_IPV6), 0);
153 rdma_rxq_init_flow (rd, rd->rx_qp6, &mcast, &mcast, ntohs (ETH_P_IPV6),
154 IBV_FLOW_ATTR_FLAGS_DONT_TRAP
155 /* let others receive mcast packet too (eg. Linux) */
158 rdma_rxq_init_flow (rd, rd->rx_qp4, &rd->hwaddr, &ucast, 0, 0);
160 rdma_rxq_init_flow (rd, rd->rx_qp4, &mcast, &mcast, 0,
161 IBV_FLOW_ATTR_FLAGS_DONT_TRAP
162 /* let others receive mcast packet too (eg. Linux) */
164 if (!rd->flow_ucast6 || !rd->flow_mcast6 || !rd->flow_ucast4
168 rd->flags &= ~RDMA_DEVICE_F_PROMISC;
172 static clib_error_t *
173 rdma_mac_change (vnet_hw_interface_t * hw, const u8 * old, const u8 * new)
175 rdma_main_t *rm = &rdma_main;
176 rdma_device_t *rd = vec_elt_at_index (rm->devices, hw->dev_instance);
177 mac_address_from_bytes (&rd->hwaddr, new);
178 if (!(rd->flags & RDMA_DEVICE_F_PROMISC) && rdma_dev_set_ucast (rd))
180 mac_address_from_bytes (&rd->hwaddr, old);
181 return clib_error_return_unix (0, "MAC update failed");
187 rdma_dev_change_mtu (rdma_device_t * rd)
189 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "MTU change not supported");
194 rdma_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
196 rdma_main_t *rm = &rdma_main;
197 rdma_device_t *rd = vec_elt_at_index (rm->devices, hw->dev_instance);
201 case ETHERNET_INTERFACE_FLAG_DEFAULT_L3:
202 return rdma_dev_set_ucast (rd);
203 case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL:
204 return rdma_dev_set_promisc (rd);
205 case ETHERNET_INTERFACE_FLAG_MTU:
206 return rdma_dev_change_mtu (rd);
209 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unknown flag %x requested", flags);
214 rdma_update_state (vnet_main_t * vnm, rdma_device_t * rd, int port)
216 struct ibv_port_attr attr;
220 if (ibv_query_port (rd->ctx, port, &attr))
222 vnet_hw_interface_set_link_speed (vnm, rd->hw_if_index, 0);
223 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
230 case IBV_PORT_ACTIVE: /* fallthrough */
231 case IBV_PORT_ACTIVE_DEFER:
232 rd->flags |= RDMA_DEVICE_F_LINK_UP;
233 vnet_hw_interface_set_flags (vnm, rd->hw_if_index,
234 VNET_HW_INTERFACE_FLAG_LINK_UP);
237 rd->flags &= ~RDMA_DEVICE_F_LINK_UP;
238 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
243 switch (attr.active_width)
258 switch (attr.active_speed)
266 case 4: /* fallthrough */
277 vnet_hw_interface_set_link_speed (vnm, rd->hw_if_index, width * speed);
280 static clib_error_t *
281 rdma_async_event_error_ready (clib_file_t * f)
283 rdma_main_t *rm = &rdma_main;
284 rdma_device_t *rd = vec_elt_at_index (rm->devices, f->private_data);
285 return clib_error_return (0, "RDMA: %s: async event error", rd->name);
288 static clib_error_t *
289 rdma_async_event_read_ready (clib_file_t * f)
291 vnet_main_t *vnm = vnet_get_main ();
292 rdma_main_t *rm = &rdma_main;
293 rdma_device_t *rd = vec_elt_at_index (rm->devices, f->private_data);
295 struct ibv_async_event event;
296 ret = ibv_get_async_event (rd->ctx, &event);
298 return clib_error_return_unix (0, "ibv_get_async_event() failed");
300 switch (event.event_type)
302 case IBV_EVENT_PORT_ACTIVE:
303 rdma_update_state (vnm, rd, event.element.port_num);
305 case IBV_EVENT_PORT_ERR:
306 rdma_update_state (vnm, rd, event.element.port_num);
308 case IBV_EVENT_DEVICE_FATAL:
309 rd->flags &= ~RDMA_DEVICE_F_LINK_UP;
310 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
311 vlib_log_emerg (rm->log_class, "%s: fatal error", rd->name);
314 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unhandeld RDMA async event %i",
319 ibv_ack_async_event (&event);
323 static clib_error_t *
324 rdma_async_event_init (rdma_device_t * rd)
326 clib_file_t t = { 0 };
329 /* make RDMA async event fd non-blocking */
330 ret = fcntl (rd->ctx->async_fd, F_GETFL);
332 return clib_error_return_unix (0, "fcntl(F_GETFL) failed");
334 ret = fcntl (rd->ctx->async_fd, F_SETFL, ret | O_NONBLOCK);
336 return clib_error_return_unix (0, "fcntl(F_SETFL, O_NONBLOCK) failed");
338 /* register RDMA async event fd */
339 t.read_function = rdma_async_event_read_ready;
340 t.file_descriptor = rd->ctx->async_fd;
341 t.error_function = rdma_async_event_error_ready;
342 t.private_data = rd->dev_instance;
343 t.description = format (0, "%v async event", rd->name);
345 rd->async_event_clib_file_index = clib_file_add (&file_main, &t);
350 rdma_async_event_cleanup (rdma_device_t * rd)
352 clib_file_del_by_index (&file_main, rd->async_event_clib_file_index);
355 static clib_error_t *
356 rdma_register_interface (vnet_main_t * vnm, rdma_device_t * rd)
359 ethernet_register_interface (vnm, rdma_device_class.index,
360 rd->dev_instance, rd->hwaddr.bytes,
361 &rd->hw_if_index, rdma_flag_change);
363 /* Indicate ability to support L3 DMAC filtering and
364 * initialize interface to L3 non-promisc mode */
365 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rd->hw_if_index);
366 hi->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_MAC_FILTER;
367 ethernet_set_flags (vnm, rd->hw_if_index,
368 ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
373 rdma_unregister_interface (vnet_main_t * vnm, rdma_device_t * rd)
375 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
376 ethernet_delete_interface (vnm, rd->hw_if_index);
380 rdma_dev_cleanup (rdma_device_t * rd)
382 rdma_main_t *rm = &rdma_main;
386 #define _(fn, arg) if (arg) \
389 if ((rv = fn (arg))) \
390 rdma_log (VLIB_LOG_LEVEL_DEBUG, rd, #fn "() failed (rv = %d)", rv); \
393 _(ibv_destroy_flow, rd->flow_mcast6);
394 _(ibv_destroy_flow, rd->flow_ucast6);
395 _(ibv_destroy_flow, rd->flow_mcast4);
396 _(ibv_destroy_flow, rd->flow_ucast4);
397 _(ibv_dereg_mr, rd->mr);
398 vec_foreach (txq, rd->txqs)
400 _(ibv_destroy_qp, txq->qp);
401 _(ibv_destroy_cq, txq->cq);
403 vec_foreach (rxq, rd->rxqs)
405 _(ibv_destroy_wq, rxq->wq);
406 _(ibv_destroy_cq, rxq->cq);
408 _(ibv_destroy_rwq_ind_table, rd->rx_rwq_ind_tbl);
409 _(ibv_destroy_qp, rd->rx_qp6);
410 _(ibv_destroy_qp, rd->rx_qp4);
411 _(ibv_dealloc_pd, rd->pd);
412 _(ibv_close_device, rd->ctx);
415 clib_error_free (rd->error);
420 vlib_pci_free_device_info (rd->pci);
421 pool_put (rm->devices, rd);
424 static clib_error_t *
425 rdma_rxq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc,
426 u8 no_multi_seg, u16 max_pktlen)
429 struct ibv_wq_init_attr wqia;
430 struct ibv_cq_init_attr_ex cqa = { };
431 struct ibv_wq_attr wqa;
432 struct ibv_cq_ex *cqex;
433 struct mlx5dv_wq_init_attr dv_wqia = { };
434 int is_mlx5dv = ! !(rd->flags & RDMA_DEVICE_F_MLX5DV);
435 int is_striding = ! !(rd->flags & RDMA_DEVICE_F_STRIDING_RQ);
437 vec_validate_aligned (rd->rxqs, qid, CLIB_CACHE_LINE_BYTES);
438 rxq = vec_elt_at_index (rd->rxqs, qid);
441 rxq->buf_sz = vlib_buffer_get_default_data_size (vm);
442 vec_validate_aligned (rxq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES);
447 struct mlx5dv_cq_init_attr dvcq = { };
448 dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
449 dvcq.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
451 if ((cqex = mlx5dv_create_cq (rd->ctx, &cqa, &dvcq)) == 0)
452 return clib_error_return_unix (0, "Create mlx5dv rx CQ Failed");
456 if ((cqex = ibv_create_cq_ex (rd->ctx, &cqa)) == 0)
457 return clib_error_return_unix (0, "Create CQ Failed");
460 rxq->cq = ibv_cq_ex_to_cq (cqex);
462 memset (&wqia, 0, sizeof (wqia));
463 wqia.wq_type = IBV_WQT_RQ;
464 wqia.max_wr = n_desc;
472 /* In STRIDING_RQ mode, map a descriptor to a stride, not a full WQE buffer */
473 uword data_seg_log2_sz =
474 min_log2 (vlib_buffer_get_default_data_size (vm));
475 rxq->buf_sz = 1 << data_seg_log2_sz;
476 /* The trick is also to map a descriptor to a data segment in the WQE SG list
477 The number of strides per WQE and the size of a WQE (in 16-bytes words) both
478 must be powers of two.
479 Moreover, in striding RQ mode, WQEs must include the SRQ header, which occupies
480 one 16-bytes word. That is why WQEs have 2*RDMA_RXQ_MAX_CHAIN_SZ 16-bytes words:
481 - One for the SRQ Header
482 - RDMA_RXQ_MAX_CHAIN_SZ for the different data segments (each mapped to
483 a stride, and a vlib_buffer)
484 - RDMA_RXQ_MAX_CHAIN_SZ-1 null data segments
486 int max_chain_log_sz =
487 max_pktlen ? max_log2 ((max_pktlen /
489 1) : RDMA_RXQ_MAX_CHAIN_LOG_SZ;
490 max_chain_log_sz = clib_max (max_chain_log_sz, 3);
491 wqia.max_sge = 1 << max_chain_log_sz;
492 dv_wqia.comp_mask = MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
493 dv_wqia.striding_rq_attrs.two_byte_shift_en = 0;
494 dv_wqia.striding_rq_attrs.single_wqe_log_num_of_strides =
496 dv_wqia.striding_rq_attrs.single_stride_log_num_of_bytes =
498 wqia.max_wr >>= max_chain_log_sz;
499 rxq->log_wqe_sz = max_chain_log_sz + 1;
500 rxq->log_stride_per_wqe = max_chain_log_sz;
504 /* In non STRIDING_RQ mode and if multiseg is not disabled, each WQE is a SG list of data
505 segments, each pointing to a vlib_buffer. */
510 rxq->n_ds_per_wqe = 1;
515 max_pktlen ? (max_pktlen /
517 1 : RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ;
518 int max_chain_log_sz = max_log2 (max_chain_sz);
519 wqia.max_sge = 1 << max_chain_log_sz;
520 rxq->log_wqe_sz = max_chain_log_sz;
521 rxq->n_ds_per_wqe = max_chain_sz;
526 if ((rxq->wq = mlx5dv_create_wq (rd->ctx, &wqia, &dv_wqia)))
528 rxq->wq->events_completed = 0;
529 pthread_mutex_init (&rxq->wq->mutex, NULL);
530 pthread_cond_init (&rxq->wq->cond, NULL);
533 return clib_error_return_unix (0, "Create WQ Failed");
535 else if ((rxq->wq = ibv_create_wq (rd->ctx, &wqia)) == 0)
536 return clib_error_return_unix (0, "Create WQ Failed");
538 memset (&wqa, 0, sizeof (wqa));
539 wqa.attr_mask = IBV_WQ_ATTR_STATE;
540 wqa.wq_state = IBV_WQS_RDY;
541 if (ibv_modify_wq (rxq->wq, &wqa) != 0)
542 return clib_error_return_unix (0, "Modify WQ (RDY) Failed");
546 struct mlx5dv_obj obj = { };
547 struct mlx5dv_cq dv_cq;
548 struct mlx5dv_rwq dv_rwq;
551 u32 wqe_sz_mask = (1 << rxq->log_wqe_sz) - 1;
555 obj.rwq.in = rxq->wq;
556 obj.rwq.out = &dv_rwq;
558 if ((mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ)))
559 return clib_error_return_unix (0, "mlx5dv: failed to init rx obj");
561 if (dv_cq.cqe_size != sizeof (mlx5dv_cqe_t))
562 return clib_error_return_unix (0, "mlx5dv: incompatible rx CQE size");
564 rxq->log2_cq_size = max_log2 (dv_cq.cqe_cnt);
565 rxq->cqes = (mlx5dv_cqe_t *) dv_cq.buf;
566 rxq->cq_db = (volatile u32 *) dv_cq.dbrec;
567 rxq->cqn = dv_cq.cqn;
569 rxq->wqes = (mlx5dv_wqe_ds_t *) dv_rwq.buf;
570 rxq->wq_db = (volatile u32 *) dv_rwq.dbrec;
571 rxq->wq_stride = dv_rwq.stride;
572 rxq->wqe_cnt = dv_rwq.wqe_cnt;
574 qw0 = clib_host_to_net_u32 (rxq->buf_sz);
576 qw0 |= (u64) clib_host_to_net_u32 (rd->lkey) << 32;
577 qw0_nullseg |= (u64) clib_host_to_net_u32 (rd->lkey) << 32;
579 /* Prefill the different 16 bytes words of the WQ.
580 - If not in striding RQ mode, for each WQE, init with qw0 the first
581 RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ, and init the rest of the WQE
583 - If in striding RQ mode, for each WQE, the RDMA_RXQ_MAX_CHAIN_SZ + 1
584 first 16-bytes words are initialised with qw0, the rest are null segments */
586 for (int i = 0; i < rxq->wqe_cnt << rxq->log_wqe_sz; i++)
588 && ((i & wqe_sz_mask) < rxq->n_ds_per_wqe))
591 || !(((i - 1) >> rxq->log_stride_per_wqe) & 0x1))))
592 rxq->wqes[i].dsz_and_lkey = qw0;
594 rxq->wqes[i].dsz_and_lkey = qw0_nullseg;
596 for (int i = 0; i < (1 << rxq->log2_cq_size); i++)
597 rxq->cqes[i].opcode_cqefmt_se_owner = 0xff;
601 vec_validate_aligned (rxq->second_bufs, n_desc - 1,
602 CLIB_CACHE_LINE_BYTES);
603 vec_validate_aligned (rxq->n_used_per_chain, n_desc - 1,
604 CLIB_CACHE_LINE_BYTES);
605 rxq->n_total_additional_segs = n_desc * (rxq->n_ds_per_wqe - 1);
606 for (int i = 0; i < n_desc; i++)
607 rxq->n_used_per_chain[i] = rxq->n_ds_per_wqe - 1;
614 static clib_error_t *
615 rdma_rxq_finalize (vlib_main_t * vm, rdma_device_t * rd)
617 struct ibv_rwq_ind_table_init_attr rwqia;
618 struct ibv_qp_init_attr_ex qpia;
619 struct ibv_wq **ind_tbl;
622 ASSERT (is_pow2 (vec_len (rd->rxqs))
623 && "rxq number should be a power of 2");
625 ind_tbl = vec_new (struct ibv_wq *, vec_len (rd->rxqs));
626 vec_foreach_index (i, rd->rxqs)
627 ind_tbl[i] = vec_elt_at_index (rd->rxqs, i)->wq;
628 memset (&rwqia, 0, sizeof (rwqia));
629 rwqia.log_ind_tbl_size = min_log2 (vec_len (ind_tbl));
630 rwqia.ind_tbl = ind_tbl;
631 if ((rd->rx_rwq_ind_tbl = ibv_create_rwq_ind_table (rd->ctx, &rwqia)) == 0)
632 return clib_error_return_unix (0, "RWQ indirection table create failed");
635 memset (&qpia, 0, sizeof (qpia));
636 qpia.qp_type = IBV_QPT_RAW_PACKET;
638 IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_IND_TABLE |
639 IBV_QP_INIT_ATTR_RX_HASH;
641 qpia.rwq_ind_tbl = rd->rx_rwq_ind_tbl;
642 STATIC_ASSERT_SIZEOF (rdma_rss_hash_key, 40);
643 qpia.rx_hash_conf.rx_hash_key_len = sizeof (rdma_rss_hash_key);
644 qpia.rx_hash_conf.rx_hash_key = rdma_rss_hash_key;
645 qpia.rx_hash_conf.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ;
647 qpia.rx_hash_conf.rx_hash_fields_mask =
648 IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_SRC_PORT_TCP |
649 IBV_RX_HASH_DST_PORT_TCP;
650 if ((rd->rx_qp4 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
651 return clib_error_return_unix (0, "IPv4 Queue Pair create failed");
653 qpia.rx_hash_conf.rx_hash_fields_mask =
654 IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 | IBV_RX_HASH_SRC_PORT_TCP |
655 IBV_RX_HASH_DST_PORT_TCP;
656 if ((rd->rx_qp6 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
657 return clib_error_return_unix (0, "IPv6 Queue Pair create failed");
659 if (rdma_dev_set_ucast (rd))
660 return clib_error_return_unix (0, "Set unicast mode failed");
665 static clib_error_t *
666 rdma_txq_init (vlib_main_t * vm, rdma_device_t * rd, u16 qid, u32 n_desc)
669 struct ibv_qp_init_attr qpia;
670 struct ibv_qp_attr qpa;
673 vec_validate_aligned (rd->txqs, qid, CLIB_CACHE_LINE_BYTES);
674 txq = vec_elt_at_index (rd->txqs, qid);
675 ASSERT (is_pow2 (n_desc));
676 txq->bufs_log2sz = min_log2 (n_desc);
677 vec_validate_aligned (txq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES);
679 if ((txq->cq = ibv_create_cq (rd->ctx, n_desc, NULL, NULL, 0)) == 0)
680 return clib_error_return_unix (0, "Create CQ Failed");
682 memset (&qpia, 0, sizeof (qpia));
683 qpia.send_cq = txq->cq;
684 qpia.recv_cq = txq->cq;
685 qpia.cap.max_send_wr = n_desc;
686 qpia.cap.max_send_sge = 1;
687 qpia.qp_type = IBV_QPT_RAW_PACKET;
689 if ((txq->qp = ibv_create_qp (rd->pd, &qpia)) == 0)
690 return clib_error_return_unix (0, "Queue Pair create failed");
692 memset (&qpa, 0, sizeof (qpa));
693 qp_flags = IBV_QP_STATE | IBV_QP_PORT;
694 qpa.qp_state = IBV_QPS_INIT;
696 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
697 return clib_error_return_unix (0, "Modify QP (init) Failed");
699 memset (&qpa, 0, sizeof (qpa));
700 qp_flags = IBV_QP_STATE;
701 qpa.qp_state = IBV_QPS_RTR;
702 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
703 return clib_error_return_unix (0, "Modify QP (receive) Failed");
705 memset (&qpa, 0, sizeof (qpa));
706 qp_flags = IBV_QP_STATE;
707 qpa.qp_state = IBV_QPS_RTS;
708 if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
709 return clib_error_return_unix (0, "Modify QP (send) Failed");
711 txq->ibv_cq = txq->cq;
712 txq->ibv_qp = txq->qp;
714 if (rd->flags & RDMA_DEVICE_F_MLX5DV)
716 rdma_mlx5_wqe_t *tmpl = (void *) txq->dv_wqe_tmpl;
717 struct mlx5dv_cq dv_cq;
718 struct mlx5dv_qp dv_qp;
719 struct mlx5dv_obj obj = { };
726 if (mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP))
727 return clib_error_return_unix (0, "DV init obj failed");
729 if (RDMA_TXQ_BUF_SZ (txq) > dv_qp.sq.wqe_cnt
730 || !is_pow2 (dv_qp.sq.wqe_cnt)
731 || sizeof (rdma_mlx5_wqe_t) != dv_qp.sq.stride
732 || (uword) dv_qp.sq.buf % sizeof (rdma_mlx5_wqe_t))
733 return clib_error_return (0, "Unsupported DV SQ parameters");
735 if (RDMA_TXQ_BUF_SZ (txq) > dv_cq.cqe_cnt
736 || !is_pow2 (dv_cq.cqe_cnt)
737 || sizeof (struct mlx5_cqe64) != dv_cq.cqe_size
738 || (uword) dv_cq.buf % sizeof (struct mlx5_cqe64))
739 return clib_error_return (0, "Unsupported DV CQ parameters");
741 /* get SQ and doorbell addresses */
742 txq->dv_sq_wqes = dv_qp.sq.buf;
743 txq->dv_sq_dbrec = dv_qp.dbrec;
744 txq->dv_sq_db = dv_qp.bf.reg;
745 txq->dv_sq_log2sz = min_log2 (dv_qp.sq.wqe_cnt);
747 /* get CQ and doorbell addresses */
748 txq->dv_cq_cqes = dv_cq.buf;
749 txq->dv_cq_dbrec = dv_cq.dbrec;
750 txq->dv_cq_log2sz = min_log2 (dv_cq.cqe_cnt);
752 /* init tx desc template */
753 STATIC_ASSERT_SIZEOF (txq->dv_wqe_tmpl, sizeof (*tmpl));
754 mlx5dv_set_ctrl_seg (&tmpl->ctrl, 0, MLX5_OPCODE_SEND, 0,
755 txq->qp->qp_num, 0, RDMA_MLX5_WQE_DS, 0,
756 RDMA_TXQ_DV_INVALID_ID);
757 tmpl->eseg.inline_hdr_sz = htobe16 (MLX5_ETH_L2_INLINE_HEADER_SIZE);
758 mlx5dv_set_data_seg (&tmpl->dseg, 0, rd->lkey, 0);
764 static clib_error_t *
765 rdma_dev_init (vlib_main_t * vm, rdma_device_t * rd,
766 rdma_create_if_args_t * args)
769 vlib_buffer_main_t *bm = vm->buffer_main;
770 vlib_thread_main_t *tm = vlib_get_thread_main ();
771 u32 rxq_num = args->rxq_num;
772 u32 rxq_size = args->rxq_size;
773 u32 txq_size = args->txq_size;
777 return clib_error_return_unix (0, "Device Open Failed");
779 if ((rd->pd = ibv_alloc_pd (rd->ctx)) == 0)
780 return clib_error_return_unix (0, "PD Alloc Failed");
782 if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start,
784 IBV_ACCESS_LOCAL_WRITE)) == 0)
785 return clib_error_return_unix (0, "Register MR Failed");
787 rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */
789 ethernet_mac_address_generate (rd->hwaddr.bytes);
791 if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start,
793 IBV_ACCESS_LOCAL_WRITE)) == 0)
794 return clib_error_return_unix (0, "Register MR Failed");
795 rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */
798 * /!\ WARNING /!\ creation order is important
799 * We *must* create TX queues *before* RX queues, otherwise we will receive
800 * the broacast packets we sent
802 for (i = 0; i < tm->n_vlib_mains; i++)
803 if ((err = rdma_txq_init (vm, rd, i, txq_size)))
806 for (i = 0; i < rxq_num; i++)
808 rdma_rxq_init (vm, rd, i, rxq_size,
809 args->no_multi_seg, args->max_pktlen)))
811 if ((err = rdma_rxq_finalize (vm, rd)))
818 sysfs_path_to_pci_addr (char *path, vlib_pci_addr_t * addr)
824 s = clib_sysfs_link_to_name (path);
828 unformat_init_string (&in, (char *) s, strlen ((char *) s));
829 rv = unformat (&in, "%U", unformat_vlib_pci_addr, addr);
836 rdma_create_if (vlib_main_t * vm, rdma_create_if_args_t * args)
838 vnet_main_t *vnm = vnet_get_main ();
839 rdma_main_t *rm = &rdma_main;
841 vlib_pci_addr_t pci_addr;
842 struct ibv_device **dev_list;
848 args->rxq_size = args->rxq_size ? args->rxq_size : 1024;
849 args->txq_size = args->txq_size ? args->txq_size : 1024;
850 args->rxq_num = args->rxq_num ? args->rxq_num : 2;
852 if (!is_pow2 (args->rxq_num))
854 args->rv = VNET_API_ERROR_INVALID_VALUE;
856 clib_error_return (0, "rx queue number must be a power of two");
860 if (args->rxq_size < VLIB_FRAME_SIZE || args->txq_size < VLIB_FRAME_SIZE ||
861 args->rxq_size > 65535 || args->txq_size > 65535 ||
862 !is_pow2 (args->rxq_size) || !is_pow2 (args->txq_size))
864 args->rv = VNET_API_ERROR_INVALID_VALUE;
865 args->error = clib_error_return (0, "queue size must be a power of two "
866 "between %i and 65535",
871 dev_list = ibv_get_device_list (&n_devs);
875 clib_error_return_unix (0,
876 "no RDMA devices available. Is the ib_uverbs module loaded?");
880 /* get PCI address */
881 s = format (0, "/sys/class/net/%s/device%c", args->ifname, 0);
882 if (sysfs_path_to_pci_addr ((char *) s, &pci_addr) == 0)
885 clib_error_return (0, "cannot find PCI address for device ");
889 pool_get_zero (rm->devices, rd);
890 rd->dev_instance = rd - rm->devices;
891 rd->per_interface_next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
892 rd->linux_ifname = format (0, "%s", args->ifname);
894 if (!args->name || 0 == args->name[0])
895 rd->name = format (0, "%s/%d", args->ifname, rd->dev_instance);
897 rd->name = format (0, "%s", args->name);
899 rd->pci = vlib_pci_get_device_info (vm, &pci_addr, &args->error);
903 /* if we failed to parse NUMA node, default to 0 */
904 if (-1 == rd->pci->numa_node)
905 rd->pci->numa_node = 0;
907 rd->pool = vlib_buffer_pool_get_default_for_numa (vm, rd->pci->numa_node);
909 if (strncmp ((char *) rd->pci->driver_name, "mlx5_core", 9))
912 clib_error_return (0,
913 "invalid interface (only mlx5 supported for now)");
917 for (i = 0; i < n_devs; i++)
919 vlib_pci_addr_t addr;
921 vec_reset_length (s);
922 s = format (s, "%s/device%c", dev_list[i]->dev_path, 0);
924 if (sysfs_path_to_pci_addr ((char *) s, &addr) == 0)
927 if (addr.as_u32 != rd->pci->addr.as_u32)
930 if ((rd->ctx = ibv_open_device (dev_list[i])))
934 if (args->mode != RDMA_MODE_IBV)
936 struct mlx5dv_context mlx5dv_attrs = { };
937 mlx5dv_attrs.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
939 if (mlx5dv_query_device (rd->ctx, &mlx5dv_attrs) == 0)
941 uword data_seg_log2_sz =
942 min_log2 (vlib_buffer_get_default_data_size (vm));
944 if ((mlx5dv_attrs.flags & MLX5DV_CONTEXT_FLAGS_CQE_V1))
945 rd->flags |= RDMA_DEVICE_F_MLX5DV;
947 /* Enable striding RQ if neither multiseg nor striding rq
948 are explicitly disabled, and if the interface supports it.*/
949 if (!args->no_multi_seg && !args->disable_striding_rq
950 && data_seg_log2_sz <=
951 mlx5dv_attrs.striding_rq_caps.max_single_stride_log_num_of_bytes
952 && data_seg_log2_sz >=
953 mlx5dv_attrs.striding_rq_caps.min_single_stride_log_num_of_bytes
954 && RDMA_RXQ_MAX_CHAIN_LOG_SZ >=
955 mlx5dv_attrs.striding_rq_caps.min_single_wqe_log_num_of_strides
956 && RDMA_RXQ_MAX_CHAIN_LOG_SZ <=
957 mlx5dv_attrs.striding_rq_caps.max_single_wqe_log_num_of_strides)
958 rd->flags |= RDMA_DEVICE_F_STRIDING_RQ;
962 if (args->mode == RDMA_MODE_DV)
964 args->error = clib_error_return (0, "Direct Verbs mode not "
965 "supported on this interface");
971 if ((args->error = rdma_dev_init (vm, rd, args)))
974 if ((args->error = rdma_register_interface (vnm, rd)))
977 if ((args->error = rdma_async_event_init (rd)))
980 rdma_update_state (vnm, rd, 1);
982 vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, rd->hw_if_index);
983 args->sw_if_index = rd->sw_if_index = sw->sw_if_index;
985 * FIXME: add support for interrupt mode
986 * vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, rd->hw_if_index);
987 * hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE;
989 vnet_hw_if_set_input_node (vnm, rd->hw_if_index, rdma_input_node.index);
991 vec_foreach_index (qid, rd->rxqs)
993 u32 queue_index = vnet_hw_if_register_rx_queue (
994 vnm, rd->hw_if_index, qid, VNET_HW_IF_RXQ_THREAD_ANY);
995 rd->rxqs[qid].queue_index = queue_index;
997 vnet_hw_if_update_runtime_data (vnm, rd->hw_if_index);
1002 rdma_unregister_interface (vnm, rd);
1004 rdma_dev_cleanup (rd);
1006 ibv_free_device_list (dev_list);
1008 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1010 vlib_log_err (rm->log_class, "%U", format_clib_error, args->error);
1014 rdma_delete_if (vlib_main_t * vm, rdma_device_t * rd)
1016 rdma_async_event_cleanup (rd);
1017 rdma_unregister_interface (vnet_get_main (), rd);
1018 rdma_dev_cleanup (rd);
1021 static clib_error_t *
1022 rdma_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
1024 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1025 rdma_main_t *rm = &rdma_main;
1026 rdma_device_t *rd = vec_elt_at_index (rm->devices, hi->dev_instance);
1027 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1029 if (rd->flags & RDMA_DEVICE_F_ERROR)
1030 return clib_error_return (0, "device is in error state");
1034 vnet_hw_interface_set_flags (vnm, rd->hw_if_index,
1035 VNET_HW_INTERFACE_FLAG_LINK_UP);
1036 rd->flags |= RDMA_DEVICE_F_ADMIN_UP;
1040 vnet_hw_interface_set_flags (vnm, rd->hw_if_index, 0);
1041 rd->flags &= ~RDMA_DEVICE_F_ADMIN_UP;
1047 rdma_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
1050 rdma_main_t *rm = &rdma_main;
1051 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1052 rdma_device_t *rd = pool_elt_at_index (rm->devices, hw->dev_instance);
1053 rd->per_interface_next_index =
1055 node_index ? VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT :
1056 vlib_node_add_next (vlib_get_main (), rdma_input_node.index, node_index);
1059 static char *rdma_tx_func_error_strings[] = {
1061 foreach_rdma_tx_func_error
1065 VNET_DEVICE_CLASS (rdma_device_class) =
1067 .name = "RDMA interface",
1068 .format_device = format_rdma_device,
1069 .format_device_name = format_rdma_device_name,
1070 .admin_up_down_function = rdma_interface_admin_up_down,
1071 .rx_redirect_to_node = rdma_set_interface_next_node,
1072 .tx_function_n_errors = RDMA_TX_N_ERROR,
1073 .tx_function_error_strings = rdma_tx_func_error_strings,
1074 .mac_addr_change_function = rdma_mac_change,
1078 rdma_init (vlib_main_t * vm)
1080 rdma_main_t *rm = &rdma_main;
1081 vlib_thread_main_t *tm = vlib_get_thread_main ();
1083 rm->log_class = vlib_log_register_class ("rdma", 0);
1085 /* vlib_buffer_t template */
1086 vec_validate_aligned (rm->per_thread_data, tm->n_vlib_mains - 1,
1087 CLIB_CACHE_LINE_BYTES);
1089 for (int i = 0; i < tm->n_vlib_mains; i++)
1091 rdma_per_thread_data_t *ptd = vec_elt_at_index (rm->per_thread_data, i);
1092 clib_memset (&ptd->buffer_template, 0, sizeof (vlib_buffer_t));
1093 ptd->buffer_template.flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
1094 ptd->buffer_template.ref_count = 1;
1095 vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1101 VLIB_INIT_FUNCTION (rdma_init) =
1103 .runs_after = VLIB_INITS ("pci_bus_init"),
1107 * fd.io coding-style-patch-verification: ON
1110 * eval: (c-set-style "gnu")