2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
24 #include <rdma/rdma.h>
26 #define foreach_rdma_input_error \
27 _(BUFFER_ALLOC, "buffer alloc error")
31 #define _(f,s) RDMA_INPUT_ERROR_##f,
32 foreach_rdma_input_error
37 static __clib_unused char *rdma_input_error_strings[] = {
39 foreach_rdma_input_error
43 static_always_inline void
44 rdma_device_input_refill (vlib_main_t * vm, rdma_device_t * rd,
48 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
49 struct ibv_recv_wr wr[VLIB_FRAME_SIZE], *w = wr;
50 struct ibv_sge sge[VLIB_FRAME_SIZE], *s = sge;
51 u32 slot = rxq->tail & (rxq->size - 1);
53 /* do not enqueue more packet than ring space */
54 n_alloc = clib_min (VLIB_FRAME_SIZE, rxq->size - (rxq->tail - rxq->head));
56 /* do not bother to allocate if too small */
60 /* avoid wrap-around logic in core loop */
61 n_alloc = clib_min (n_alloc, rxq->size - slot);
64 vlib_buffer_alloc_to_ring_from_pool (vm, rxq->bufs, slot, rxq->size,
67 /* if ring is full or allocation error, do nothing */
68 if (PREDICT_FALSE (0 == n_alloc))
71 vlib_get_buffers (vm, &rxq->bufs[slot], bufs, n_alloc);
75 if (PREDICT_TRUE (n >= 8))
77 CLIB_PREFETCH (&s[4 + 0], 4 * sizeof (s[0]), STORE);
78 CLIB_PREFETCH (&w[4 + 0], 4 * sizeof (w[0]), STORE);
81 s[0].addr = vlib_buffer_get_va (b[0]);
82 s[0].length = vlib_buffer_get_default_data_size (vm);
85 s[1].addr = vlib_buffer_get_va (b[1]);
86 s[1].length = vlib_buffer_get_default_data_size (vm);
89 s[2].addr = vlib_buffer_get_va (b[2]);
90 s[2].length = vlib_buffer_get_default_data_size (vm);
93 s[3].addr = vlib_buffer_get_va (b[3]);
94 s[3].length = vlib_buffer_get_default_data_size (vm);
97 w[0].next = &w[0] + 1;
101 w[1].next = &w[1] + 1;
102 w[1].sg_list = &s[1];
105 w[2].next = &w[2] + 1;
106 w[2].sg_list = &s[2];
109 w[3].next = &w[3] + 1;
110 w[3].sg_list = &s[3];
121 s[0].addr = vlib_buffer_get_va (b[0]);
122 s[0].length = vlib_buffer_get_default_data_size (vm);
123 s[0].lkey = rd->lkey;
125 w[0].next = &w[0] + 1;
126 w[0].sg_list = &s[0];
135 w[-1].next = 0; /* fix next pointer in WR linked-list last item */
138 if (ibv_post_wq_recv (rxq->wq, wr, &w) != 0)
141 vlib_buffer_free_from_ring (vm, rxq->bufs, slot + n, rxq->size,
148 static_always_inline void
149 rdma_device_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
150 const rdma_device_t * rd, u32 n_left, const u32 * bi,
155 if (PREDICT_TRUE (0 == (n_trace = vlib_get_trace_count (vm, node))))
159 while (n_trace && n_left)
162 rdma_input_trace_t *tr;
163 b = vlib_get_buffer (vm, bi[0]);
164 vlib_trace_buffer (vm, node, next_index, b,
165 /* follow_chain */ 0);
166 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
167 tr->next_index = next_index;
168 tr->hw_if_index = rd->hw_if_index;
176 vlib_set_trace_count (vm, node, n_trace);
179 static_always_inline void
180 rdma_device_input_ethernet (vlib_main_t * vm, vlib_node_runtime_t * node,
181 const rdma_device_t * rd, u32 next_index)
183 vlib_next_frame_t *nf;
185 ethernet_input_frame_t *ef;
187 if (PREDICT_FALSE (VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT != next_index))
191 vlib_node_runtime_get_next_frame (vm, node,
192 VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT);
193 f = vlib_get_frame (vm, nf->frame);
194 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
195 /* FIXME: f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK; */
197 ef = vlib_frame_scalar_args (f);
198 ef->sw_if_index = rd->sw_if_index;
199 ef->hw_if_index = rd->hw_if_index;
202 static_always_inline u32
203 rdma_device_input_bufs (vlib_main_t * vm, const rdma_device_t * rd,
204 u32 * next, u32 * bi, struct ibv_wc * wc,
205 u32 n_left_from, vlib_buffer_t * bt)
207 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
208 u32 n_rx_bytes[4] = { 0 };
210 vlib_get_buffers (vm, bi, bufs, n_left_from);
211 ASSERT (bt->buffer_pool_index == bufs[0]->buffer_pool_index);
213 while (n_left_from >= 4)
215 if (PREDICT_TRUE (n_left_from >= 8))
217 CLIB_PREFETCH (&wc[4 + 0], CLIB_CACHE_LINE_BYTES, LOAD);
218 CLIB_PREFETCH (&wc[4 + 1], CLIB_CACHE_LINE_BYTES, LOAD);
219 CLIB_PREFETCH (&wc[4 + 2], CLIB_CACHE_LINE_BYTES, LOAD);
220 CLIB_PREFETCH (&wc[4 + 3], CLIB_CACHE_LINE_BYTES, LOAD);
221 vlib_prefetch_buffer_header (b[4 + 0], STORE);
222 vlib_prefetch_buffer_header (b[4 + 1], STORE);
223 vlib_prefetch_buffer_header (b[4 + 2], STORE);
224 vlib_prefetch_buffer_header (b[4 + 3], STORE);
227 vlib_buffer_copy_indices (next, bi, 4);
229 vlib_buffer_copy_template (b[0], bt);
230 vlib_buffer_copy_template (b[1], bt);
231 vlib_buffer_copy_template (b[2], bt);
232 vlib_buffer_copy_template (b[3], bt);
234 b[0]->current_length = wc[0].byte_len;
235 b[1]->current_length = wc[1].byte_len;
236 b[2]->current_length = wc[2].byte_len;
237 b[3]->current_length = wc[3].byte_len;
239 n_rx_bytes[0] += wc[0].byte_len;
240 n_rx_bytes[1] += wc[1].byte_len;
241 n_rx_bytes[2] += wc[2].byte_len;
242 n_rx_bytes[3] += wc[3].byte_len;
251 while (n_left_from >= 1)
253 vlib_buffer_copy_indices (next, bi, 1);
254 vlib_buffer_copy_template (b[0], bt);
255 b[0]->current_length = wc[0].byte_len;
256 n_rx_bytes[0] += wc[0].byte_len;
265 return n_rx_bytes[0] + n_rx_bytes[1] + n_rx_bytes[2] + n_rx_bytes[3];
268 static_always_inline uword
269 rdma_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
270 vlib_frame_t * frame, rdma_device_t * rd, u16 qid)
272 vnet_main_t *vnm = vnet_get_main ();
273 rdma_rxq_t *rxq = vec_elt_at_index (rd->rxqs, qid);
274 struct ibv_wc wc[VLIB_FRAME_SIZE];
276 u32 next_index, *to_next, n_left_to_next;
277 u32 n_rx_packets, n_rx_bytes;
280 ASSERT (rxq->size >= VLIB_FRAME_SIZE && is_pow2 (rxq->size));
281 ASSERT (rxq->tail - rxq->head <= rxq->size);
283 n_rx_packets = ibv_poll_cq (rxq->cq, VLIB_FRAME_SIZE, wc);
284 ASSERT (n_rx_packets <= rxq->tail - rxq->head);
286 if (PREDICT_FALSE (n_rx_packets <= 0))
288 rdma_device_input_refill (vm, rd, rxq);
292 /* init buffer template */
293 clib_memset_u64 (&bt, 0,
294 STRUCT_OFFSET_OF (vlib_buffer_t,
295 template_end) / sizeof (u64));
296 vnet_buffer (&bt)->sw_if_index[VLIB_RX] = rd->sw_if_index;
297 vnet_buffer (&bt)->sw_if_index[VLIB_TX] = ~0;
298 bt.buffer_pool_index = rd->pool;
301 /* update buffer template for input feature arcs if any */
302 next_index = rd->per_interface_next_index;
303 if (PREDICT_FALSE (vnet_device_input_have_features (rd->sw_if_index)))
304 vnet_feature_start_device_input_x1 (rd->sw_if_index, &next_index, &bt);
306 vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
307 ASSERT (n_rx_packets <= n_left_to_next);
310 * avoid wrap-around logic in core loop
311 * we requested VLIB_FRAME_SIZE packets and rxq->size >= VLIB_FRAME_SIZE
312 * => we can process all packets in 2 iterations max
314 slot = rxq->head & (rxq->size - 1);
315 n_tail = clib_min (n_rx_packets, rxq->size - slot);
317 rdma_device_input_bufs (vm, rd, &to_next[0], &rxq->bufs[slot], wc, n_tail,
319 if (n_tail < n_rx_packets)
321 rdma_device_input_bufs (vm, rd, &to_next[n_tail], &rxq->bufs[0], wc,
322 n_rx_packets - n_tail, &bt);
323 rdma_device_input_ethernet (vm, node, rd, next_index);
325 vlib_put_next_frame (vm, node, next_index, n_left_to_next - n_rx_packets);
327 rxq->head += n_rx_packets;
329 rdma_device_input_trace (vm, node, rd, n_rx_packets, to_next, next_index);
331 vlib_increment_combined_counter
332 (vnm->interface_main.combined_sw_if_counters +
333 VNET_INTERFACE_COUNTER_RX, vm->thread_index,
334 rd->hw_if_index, n_rx_packets, n_rx_bytes);
336 rdma_device_input_refill (vm, rd, rxq);
341 VLIB_NODE_FN (rdma_input_node) (vlib_main_t * vm,
342 vlib_node_runtime_t * node,
343 vlib_frame_t * frame)
346 rdma_main_t *rm = &rdma_main;
347 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
348 vnet_device_and_queue_t *dq;
350 foreach_device_and_queue (dq, rt->devices_and_queues)
353 rd = vec_elt_at_index (rm->devices, dq->dev_instance);
354 if (PREDICT_TRUE (rd->flags & RDMA_DEVICE_F_ADMIN_UP))
355 n_rx += rdma_device_input_inline (vm, node, frame, rd, dq->queue_id);
361 VLIB_REGISTER_NODE (rdma_input_node) = {
362 .name = "rdma-input",
363 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
364 .sibling_of = "device-input",
365 .format_trace = format_rdma_input_trace,
366 .type = VLIB_NODE_TYPE_INPUT,
367 .state = VLIB_NODE_STATE_DISABLED,
368 .n_errors = RDMA_INPUT_N_ERROR,
369 .error_strings = rdma_input_error_strings,
376 * fd.io coding-style-patch-verification: ON
379 * eval: (c-set-style "gnu")