2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
24 #include <rdma/rdma.h>
26 #define foreach_rdma_input_error \
27 _(BUFFER_ALLOC, "buffer alloc error")
31 #define _(f,s) RDMA_INPUT_ERROR_##f,
32 foreach_rdma_input_error
37 static __clib_unused char *rdma_input_error_strings[] = {
39 foreach_rdma_input_error
43 static_always_inline void
44 rdma_device_input_refill (vlib_main_t * vm, rdma_device_t * rd,
48 struct ibv_sge sg_entry;
49 struct ibv_recv_wr wr, *bad_wr;
50 u32 buffers[VLIB_FRAME_SIZE];
52 if (rxq->n_enq >= rxq->size)
55 n_alloc = clib_min (VLIB_FRAME_SIZE, rxq->size - rxq->n_enq);
56 n_alloc = vlib_buffer_alloc (vm, buffers, n_alloc);
58 sg_entry.length = vlib_buffer_get_default_data_size (vm);
59 sg_entry.lkey = rd->mr->lkey;
61 wr.sg_list = &sg_entry;
63 for (n = 0; n < n_alloc; n++)
65 vlib_buffer_t *b = vlib_get_buffer (vm, buffers[n]);
66 sg_entry.addr = vlib_buffer_get_va (b);
67 wr.wr_id = buffers[n];
68 if (ibv_post_recv (rxq->qp, &wr, &bad_wr) != 0)
69 vlib_buffer_free (vm, buffers + n, 1);
75 static_always_inline uword
76 rdma_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
77 vlib_frame_t * frame, rdma_device_t * rd, u16 qid)
79 vnet_main_t *vnm = vnet_get_main ();
80 rdma_rxq_t *rxq = vec_elt_at_index (rd->rxqs, qid);
82 struct ibv_wc wc[VLIB_FRAME_SIZE];
83 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
84 u32 *bi, *to_next, n_left_to_next;
86 u32 n_rx_packets = 0, n_rx_bytes = 0;
88 n_rx_packets = ibv_poll_cq (rxq->cq, VLIB_FRAME_SIZE, wc);
90 if (n_rx_packets <= 0)
91 rdma_device_input_refill (vm, rd, rxq);
93 if (PREDICT_FALSE (rd->per_interface_next_index != ~0))
94 next_index = rd->per_interface_next_index;
96 vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next);
98 for (i = 0; i < n_rx_packets; i++)
100 u32 bi = wc[i].wr_id;
101 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
102 b->current_length = wc[i].byte_len;
103 vnet_buffer (b)->sw_if_index[VLIB_RX] = rd->sw_if_index;
104 vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0;
106 n_rx_bytes += wc[i].byte_len;
109 if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
111 u32 n_left = n_rx_packets, i = 0;
114 while (n_trace && n_left)
117 rdma_input_trace_t *tr;
118 b = vlib_get_buffer (vm, bi[0]);
119 vlib_trace_buffer (vm, node, next_index, b, /* follow_chain */ 0);
120 tr = vlib_add_trace (vm, node, b, sizeof (*tr));
121 tr->next_index = next_index;
122 tr->hw_if_index = rd->hw_if_index;
130 vlib_set_trace_count (vm, node, n_trace);
133 if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT))
135 vlib_next_frame_t *nf;
137 ethernet_input_frame_t *ef;
138 nf = vlib_node_runtime_get_next_frame (vm, node, next_index);
139 f = vlib_get_frame (vm, nf->frame_index);
140 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
142 ef = vlib_frame_scalar_args (f);
143 ef->sw_if_index = rd->sw_if_index;
144 ef->hw_if_index = rd->hw_if_index;
145 //f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
148 n_left_to_next -= n_rx_packets;
149 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
151 vlib_increment_combined_counter
152 (vnm->interface_main.combined_sw_if_counters +
153 VNET_INTERFACE_COUNTER_RX, vm->thread_index,
154 rd->hw_if_index, n_rx_packets, n_rx_bytes);
156 rxq->n_enq -= n_rx_packets;
157 rdma_device_input_refill (vm, rd, rxq);
162 VLIB_NODE_FN (rdma_input_node) (vlib_main_t * vm,
163 vlib_node_runtime_t * node,
164 vlib_frame_t * frame)
167 rdma_main_t *rm = &rdma_main;
168 vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
169 vnet_device_and_queue_t *dq;
171 foreach_device_and_queue (dq, rt->devices_and_queues)
174 rd = vec_elt_at_index (rm->devices, dq->dev_instance);
175 if ((rd->flags & RDMA_DEVICE_F_ADMIN_UP) == 0)
177 n_rx += rdma_device_input_inline (vm, node, frame, rd, dq->queue_id);
183 VLIB_REGISTER_NODE (rdma_input_node) = {
184 .name = "rdma-input",
185 .sibling_of = "device-input",
186 .format_trace = format_rdma_input_trace,
187 .type = VLIB_NODE_TYPE_INPUT,
188 .state = VLIB_NODE_STATE_DISABLED,
189 .n_errors = RDMA_INPUT_N_ERROR,
190 .error_strings = rdma_input_error_strings,
197 * fd.io coding-style-patch-verification: ON
200 * eval: (c-set-style "gnu")