2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vppinfra/ring.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/devices/devices.h>
25 #include <rdma/rdma.h>
27 static_always_inline void
28 rdma_device_output_free (vlib_main_t * vm, rdma_txq_t * txq)
30 struct ibv_wc wc[VLIB_FRAME_SIZE];
31 u32 to_free[VLIB_FRAME_SIZE];
35 n_free = ibv_poll_cq (txq->cq, VLIB_FRAME_SIZE, wc);
39 for (i = 0; i < n_free; i++)
40 to_free[i] = wc[i].wr_id;
42 vlib_buffer_free (vm, to_free, n_free);
45 VNET_DEVICE_CLASS_TX_FN (rdma_device_class) (vlib_main_t * vm,
46 vlib_node_runtime_t * node,
49 rdma_main_t *rm = &rdma_main;
50 vnet_interface_output_runtime_t *ord = (void *) node->runtime_data;
51 rdma_device_t *rd = pool_elt_at_index (rm->devices, ord->dev_instance);
52 u32 thread_index = vm->thread_index;
54 vec_elt_at_index (rd->txqs, thread_index % vec_len (rd->txqs));
55 u32 *from, *f, n_left_from;
56 u32 n_tx_packets, n_tx_failed;
57 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
58 struct ibv_send_wr wr[VLIB_FRAME_SIZE], *w = wr;
59 struct ibv_sge sge[VLIB_FRAME_SIZE], *s = sge;
62 f = from = vlib_frame_vector_args (frame);
63 n_left_from = frame->n_vectors;
64 vlib_get_buffers (vm, from, bufs, n_left_from);
66 memset (w, 0, n_left_from * sizeof (w[0]));
68 while (n_left_from >= 2)
70 if (PREDICT_TRUE (n_left_from >= 4))
72 vlib_prefetch_buffer_header (b[2 + 0], LOAD);
73 vlib_prefetch_buffer_header (b[2 + 1], LOAD);
74 CLIB_PREFETCH (&s[2 + 0], sizeof (s[0]), STORE);
75 CLIB_PREFETCH (&s[2 + 1], sizeof (s[0]), STORE);
76 CLIB_PREFETCH (&w[2 + 0], sizeof (w[0]), STORE);
77 CLIB_PREFETCH (&w[2 + 1], sizeof (w[0]), STORE);
80 s[0].addr = vlib_buffer_get_current_va (b[0]);
81 s[0].length = b[0]->current_length;
82 s[0].lkey = rd->mr->lkey;
84 s[1].addr = vlib_buffer_get_current_va (b[1]);
85 s[1].length = b[1]->current_length;
86 s[1].lkey = rd->mr->lkey;
89 w[0].next = &w[1 + 0];
92 w[0].opcode = IBV_WR_SEND;
93 w[0].send_flags = IBV_SEND_SIGNALED;
96 w[1].next = &w[1 + 1];
99 w[1].opcode = IBV_WR_SEND;
100 w[1].send_flags = IBV_SEND_SIGNALED;
109 while (n_left_from >= 1)
111 s[0].addr = vlib_buffer_get_current_va (b[0]);
112 s[0].length = b[0]->current_length;
113 s[0].lkey = rd->mr->lkey;
116 w[0].next = &w[1 + 0];
117 w[0].sg_list = &s[0];
119 w[0].opcode = IBV_WR_SEND;
120 w[0].send_flags = IBV_SEND_SIGNALED;
129 w[-1].next = 0; /* fix next pointer in WR linked-list last item */
132 clib_spinlock_lock_if_init (&txq->lock);
133 for (i = 0; i < 5; i++)
135 rdma_device_output_free (vm, txq);
136 if (0 == ibv_post_send (txq->qp, w, &w))
139 clib_spinlock_unlock_if_init (&txq->lock);
141 n_tx_packets = w == wr ? frame->n_vectors : w - wr;
142 n_tx_failed = frame->n_vectors - n_tx_packets;
144 if (PREDICT_FALSE (n_tx_failed))
146 vlib_buffer_free (vm, &from[n_tx_packets], n_tx_failed);
147 vlib_error_count (vm, node->node_index,
148 RDMA_TX_ERROR_NO_FREE_SLOTS, n_tx_failed);
155 * fd.io coding-style-patch-verification: ON
158 * eval: (c-set-style "gnu")