2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vppinfra/ring.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/devices/devices.h>
25 #include <rdma/rdma.h>
27 static_always_inline void
28 rdma_device_output_free (vlib_main_t * vm, rdma_txq_t * txq)
30 struct ibv_wc wc[VLIB_FRAME_SIZE];
34 n = ibv_poll_cq (txq->cq, VLIB_FRAME_SIZE, wc);
38 tail = wc[n - 1].wr_id;
39 slot = txq->head & (txq->size - 1);
40 vlib_buffer_free_from_ring (vm, txq->bufs, slot, txq->size,
45 static_always_inline u32
46 rmda_device_output_tx (vlib_main_t * vm, const rdma_device_t * rd,
47 rdma_txq_t * txq, u32 n_left_from, u32 * bi)
49 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
50 struct ibv_send_wr wr[VLIB_FRAME_SIZE], *w = wr;
51 struct ibv_sge sge[VLIB_FRAME_SIZE], *s = sge;
52 u32 n, slot = txq->tail & (txq->size - 1);
53 u32 *tx = &txq->bufs[slot];
55 /* do not enqueue more packet than ring space */
56 n_left_from = clib_min (n_left_from, txq->size - (txq->tail - txq->head));
57 /* avoid wrap-around logic in core loop */
58 n = n_left_from = clib_min (n_left_from, txq->size - slot);
60 /* if ring is full, do nothing */
61 if (PREDICT_FALSE (0 == n_left_from))
64 vlib_get_buffers (vm, bi, bufs, n_left_from);
65 memset (w, 0, n_left_from * sizeof (w[0]));
69 if (PREDICT_TRUE (n >= 8))
71 vlib_prefetch_buffer_header (b[4 + 0], LOAD);
72 vlib_prefetch_buffer_header (b[4 + 1], LOAD);
73 vlib_prefetch_buffer_header (b[4 + 2], LOAD);
74 vlib_prefetch_buffer_header (b[4 + 3], LOAD);
76 CLIB_PREFETCH (&s[4 + 0], 4 * sizeof (s[0]), STORE);
78 CLIB_PREFETCH (&w[4 + 0], CLIB_CACHE_LINE_BYTES, STORE);
79 CLIB_PREFETCH (&w[4 + 1], CLIB_CACHE_LINE_BYTES, STORE);
80 CLIB_PREFETCH (&w[4 + 2], CLIB_CACHE_LINE_BYTES, STORE);
81 CLIB_PREFETCH (&w[4 + 3], CLIB_CACHE_LINE_BYTES, STORE);
84 vlib_buffer_copy_indices (tx, bi, 4);
86 s[0].addr = vlib_buffer_get_current_va (b[0]);
87 s[0].length = b[0]->current_length;
90 s[1].addr = vlib_buffer_get_current_va (b[1]);
91 s[1].length = b[1]->current_length;
94 s[2].addr = vlib_buffer_get_current_va (b[2]);
95 s[2].length = b[2]->current_length;
98 s[3].addr = vlib_buffer_get_current_va (b[3]);
99 s[3].length = b[3]->current_length;
100 s[3].lkey = rd->lkey;
102 w[0].next = &w[0] + 1;
103 w[0].sg_list = &s[0];
105 w[0].opcode = IBV_WR_SEND;
107 w[1].next = &w[1] + 1;
108 w[1].sg_list = &s[1];
110 w[1].opcode = IBV_WR_SEND;
112 w[2].next = &w[2] + 1;
113 w[2].sg_list = &s[2];
115 w[2].opcode = IBV_WR_SEND;
117 w[3].next = &w[3] + 1;
118 w[3].sg_list = &s[3];
120 w[3].opcode = IBV_WR_SEND;
132 vlib_buffer_copy_indices (tx, bi, 1);
134 s[0].addr = vlib_buffer_get_current_va (b[0]);
135 s[0].length = b[0]->current_length;
136 s[0].lkey = rd->lkey;
138 w[0].next = &w[0] + 1;
139 w[0].sg_list = &s[0];
141 w[0].opcode = IBV_WR_SEND;
151 w[-1].wr_id = txq->tail + n_left_from; /* register item to free */
152 w[-1].next = 0; /* fix next pointer in WR linked-list */
153 w[-1].send_flags = IBV_SEND_SIGNALED; /* generate a CQE so we can free buffers */
156 if (PREDICT_FALSE (0 != ibv_post_send (txq->qp, w, &w)))
157 n_left_from = w - wr;
159 txq->tail += n_left_from;
163 VNET_DEVICE_CLASS_TX_FN (rdma_device_class) (vlib_main_t * vm,
164 vlib_node_runtime_t * node,
165 vlib_frame_t * frame)
167 rdma_main_t *rm = &rdma_main;
168 vnet_interface_output_runtime_t *ord = (void *) node->runtime_data;
169 rdma_device_t *rd = pool_elt_at_index (rm->devices, ord->dev_instance);
170 u32 thread_index = vm->thread_index;
172 vec_elt_at_index (rd->txqs, thread_index % vec_len (rd->txqs));
177 ASSERT (txq->size >= VLIB_FRAME_SIZE && is_pow2 (txq->size));
178 ASSERT (txq->tail - txq->head <= txq->size);
180 from = vlib_frame_vector_args (frame);
181 n_left_from = frame->n_vectors;
183 clib_spinlock_lock_if_init (&txq->lock);
185 for (i = 0; i < 5 && n_left_from > 0; i++)
188 rdma_device_output_free (vm, txq);
189 n_enq = rmda_device_output_tx (vm, rd, txq, n_left_from, from);
190 n_left_from -= n_enq;
194 clib_spinlock_unlock_if_init (&txq->lock);
196 if (PREDICT_FALSE (n_left_from))
198 vlib_buffer_free (vm, from, n_left_from);
199 vlib_error_count (vm, node->node_index,
200 RDMA_TX_ERROR_NO_FREE_SLOTS, n_left_from);
203 return frame->n_vectors - n_left_from;
207 * fd.io coding-style-patch-verification: ON
210 * eval: (c-set-style "gnu")