2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vppinfra/ring.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/devices/devices.h>
27 static_always_inline u8
28 avf_tx_desc_get_dtyp (avf_tx_desc_t * d)
30 return d->qword[1] & 0x0f;
33 static_always_inline u16
34 avf_tx_enqueue (vlib_main_t * vm, vlib_node_runtime_t * node, avf_txq_t * txq,
35 u32 * buffers, u32 n_packets, int use_va_dma)
38 u64 bits = AVF_TXD_CMD_EOP | AVF_TXD_CMD_RSV;
40 u16 *slot, n_desc_left, n_packets_left = n_packets;
41 u16 mask = txq->size - 1;
43 avf_tx_desc_t *d = txq->descs + next;
48 n_desc_left = txq->size - clib_max (txq->next, txq->n_enqueued + 8);
53 /* Fast path, no ring wrap */
54 while (n_packets_left && n_desc_left)
57 if (n_packets_left < 8 || n_desc_left < 4)
60 vlib_prefetch_buffer_with_index (vm, buffers[4], LOAD);
61 vlib_prefetch_buffer_with_index (vm, buffers[5], LOAD);
62 vlib_prefetch_buffer_with_index (vm, buffers[6], LOAD);
63 vlib_prefetch_buffer_with_index (vm, buffers[7], LOAD);
65 b[0] = vlib_get_buffer (vm, buffers[0]);
66 b[1] = vlib_get_buffer (vm, buffers[1]);
67 b[2] = vlib_get_buffer (vm, buffers[2]);
68 b[3] = vlib_get_buffer (vm, buffers[3]);
70 or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
72 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
75 vlib_buffer_copy_indices (txq->bufs + next, buffers, 4);
79 d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
80 d[1].qword[0] = vlib_buffer_get_current_va (b[1]);
81 d[2].qword[0] = vlib_buffer_get_current_va (b[2]);
82 d[3].qword[0] = vlib_buffer_get_current_va (b[3]);
86 d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
87 d[1].qword[0] = vlib_buffer_get_current_pa (vm, b[1]);
88 d[2].qword[0] = vlib_buffer_get_current_pa (vm, b[2]);
89 d[3].qword[0] = vlib_buffer_get_current_pa (vm, b[3]);
92 d[0].qword[1] = ((u64) b[0]->current_length) << 34 | bits;
93 d[1].qword[1] = ((u64) b[1]->current_length) << 34 | bits;
94 d[2].qword[1] = ((u64) b[2]->current_length) << 34 | bits;
95 d[3].qword[1] = ((u64) b[3]->current_length) << 34 | bits;
106 txq->bufs[next] = buffers[0];
107 b[0] = vlib_get_buffer (vm, buffers[0]);
109 /* Deal with chain buffer if present */
110 if (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
115 /* Wish there were a buffer count for chain buffer */
116 while (b0->flags & VLIB_BUFFER_NEXT_PRESENT)
118 b0 = vlib_get_buffer (vm, b0->next_buffer);
122 /* spec says data descriptor is limited to 8 segments */
123 if (PREDICT_FALSE (n_desc_needed > 8))
125 vlib_buffer_free_one (vm, buffers[0]);
126 vlib_error_count (vm, node->node_index,
127 AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
133 if (PREDICT_FALSE (n_desc_left < n_desc_needed))
135 * Slow path may be able to to deal with this since it can handle
140 while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
143 d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
145 d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
147 d[0].qword[1] = (((u64) b[0]->current_length) << 34) |
155 txq->bufs[next] = b[0]->next_buffer;
156 b[0] = vlib_get_buffer (vm, b[0]->next_buffer);
161 d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
163 d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
165 d[0].qword[1] = (((u64) b[0]->current_length) << 34) | bits;
175 /* Slow path to support ring wrap */
176 if (PREDICT_FALSE (n_packets_left))
178 txq->n_enqueued += n_desc;
181 d = txq->descs + (next & mask);
183 /* +8 to be consistent with fast path */
184 n_desc_left = txq->size - (txq->n_enqueued + 8);
186 while (n_packets_left && n_desc_left)
188 txq->bufs[next & mask] = buffers[0];
189 b[0] = vlib_get_buffer (vm, buffers[0]);
191 /* Deal with chain buffer if present */
192 if (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
197 while (b0->flags & VLIB_BUFFER_NEXT_PRESENT)
199 b0 = vlib_get_buffer (vm, b0->next_buffer);
203 /* Spec says data descriptor is limited to 8 segments */
204 if (PREDICT_FALSE (n_desc_needed > 8))
206 vlib_buffer_free_one (vm, buffers[0]);
207 vlib_error_count (vm, node->node_index,
208 AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
214 if (PREDICT_FALSE (n_desc_left < n_desc_needed))
217 while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
220 d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
222 d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
224 d[0].qword[1] = (((u64) b[0]->current_length) << 34) |
230 d = txq->descs + (next & mask);
232 txq->bufs[next & mask] = b[0]->next_buffer;
233 b[0] = vlib_get_buffer (vm, b[0]->next_buffer);
238 d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
240 d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
242 d[0].qword[1] = (((u64) b[0]->current_length) << 34) | bits;
249 d = txq->descs + (next & mask);
253 if ((slot = clib_ring_enq (txq->rs_slots)))
255 u16 rs_slot = slot[0] = (next - 1) & mask;
256 d = txq->descs + rs_slot;
257 d[0].qword[1] |= AVF_TXD_CMD_RS;
260 CLIB_MEMORY_BARRIER ();
261 *(txq->qtx_tail) = txq->next = next & mask;
262 txq->n_enqueued += n_desc;
263 return n_packets - n_packets_left;
266 VNET_DEVICE_CLASS_TX_FN (avf_device_class) (vlib_main_t * vm,
267 vlib_node_runtime_t * node,
268 vlib_frame_t * frame)
270 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
271 avf_device_t *ad = avf_get_device (rd->dev_instance);
272 u32 thread_index = vm->thread_index;
273 u8 qid = thread_index;
274 avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid % ad->num_queue_pairs);
275 u32 *buffers = vlib_frame_vector_args (frame);
279 clib_spinlock_lock_if_init (&txq->lock);
281 n_left = frame->n_vectors;
284 /* release consumed bufs */
287 i32 complete_slot = -1;
290 u16 *slot = clib_ring_get_first (txq->rs_slots);
295 if (avf_tx_desc_get_dtyp (txq->descs + slot[0]) != 0x0F)
298 complete_slot = slot[0];
300 clib_ring_deq (txq->rs_slots);
303 if (complete_slot >= 0)
305 u16 first, mask, n_free;
306 mask = txq->size - 1;
307 first = (txq->next - txq->n_enqueued) & mask;
308 n_free = (complete_slot + 1 - first) & mask;
310 txq->n_enqueued -= n_free;
311 vlib_buffer_free_from_ring_no_next (vm, txq->bufs, first, txq->size,
316 if (ad->flags & AVF_DEVICE_F_VA_DMA)
317 n_enq = avf_tx_enqueue (vm, node, txq, buffers, n_left, 1);
319 n_enq = avf_tx_enqueue (vm, node, txq, buffers, n_left, 0);
330 vlib_buffer_free (vm, buffers, n_left);
331 vlib_error_count (vm, node->node_index,
332 AVF_TX_ERROR_NO_FREE_SLOTS, n_left);
335 clib_spinlock_unlock_if_init (&txq->lock);
337 return frame->n_vectors - n_left;
341 * fd.io coding-style-patch-verification: ON
344 * eval: (c-set-style "gnu")