2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
26 #define AVF_TXQ_DESC_CMD(x) (1 << (x + 4))
27 #define AVF_TXQ_DESC_CMD_EOP AVF_TXQ_DESC_CMD(0)
28 #define AVF_TXQ_DESC_CMD_RS AVF_TXQ_DESC_CMD(1)
29 #define AVF_TXQ_DESC_CMD_RSV AVF_TXQ_DESC_CMD(2)
31 static_always_inline u8
32 avf_tx_desc_get_dtyp (avf_tx_desc_t * d)
34 return d->qword[1] & 0x0f;
38 CLIB_MULTIARCH_FN (avf_interface_tx) (vlib_main_t * vm,
39 vlib_node_runtime_t * node,
42 avf_main_t *am = &avf_main;
43 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
44 avf_device_t *ad = pool_elt_at_index (am->devices, rd->dev_instance);
45 u32 thread_index = vm->thread_index;
46 u8 qid = thread_index;
47 avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid % ad->num_queue_pairs);
48 avf_tx_desc_t *d0, *d1, *d2, *d3;
49 u32 *buffers = vlib_frame_args (frame);
50 u32 bi0, bi1, bi2, bi3;
51 u16 n_left = frame->n_vectors;
52 vlib_buffer_t *b0, *b1, *b2, *b3;
53 u16 mask = txq->size - 1;
54 u64 bits = (AVF_TXQ_DESC_CMD_EOP | AVF_TXQ_DESC_CMD_RS |
55 AVF_TXQ_DESC_CMD_RSV);
57 clib_spinlock_lock_if_init (&txq->lock);
59 /* release cosumed bufs */
62 u16 first, slot, n_free = 0;
63 first = slot = (txq->next - txq->n_enqueued) & mask;
64 d0 = txq->descs + slot;
65 while (n_free < txq->n_enqueued && avf_tx_desc_get_dtyp (d0) == 0x0F)
68 slot = (slot + 1) & mask;
69 d0 = txq->descs + slot;
74 txq->n_enqueued -= n_free;
75 vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
82 u16 slot0, slot1, slot2, slot3;
84 vlib_prefetch_buffer_with_index (vm, buffers[4], LOAD);
85 vlib_prefetch_buffer_with_index (vm, buffers[5], LOAD);
86 vlib_prefetch_buffer_with_index (vm, buffers[6], LOAD);
87 vlib_prefetch_buffer_with_index (vm, buffers[7], LOAD);
90 slot1 = (txq->next + 1) & mask;
91 slot2 = (txq->next + 2) & mask;
92 slot3 = (txq->next + 3) & mask;
94 d0 = txq->descs + slot0;
95 d1 = txq->descs + slot1;
96 d2 = txq->descs + slot2;
97 d3 = txq->descs + slot3;
104 txq->bufs[slot0] = bi0;
105 txq->bufs[slot1] = bi1;
106 txq->bufs[slot2] = bi2;
107 txq->bufs[slot3] = bi3;
108 b0 = vlib_get_buffer (vm, bi0);
109 b1 = vlib_get_buffer (vm, bi1);
110 b2 = vlib_get_buffer (vm, bi2);
111 b3 = vlib_get_buffer (vm, bi3);
114 d->qword[0] = vlib_get_buffer_data_physical_address (vm, bi0) +
117 d0->qword[0] = pointer_to_uword (b0->data) + b0->current_data;
118 d1->qword[0] = pointer_to_uword (b1->data) + b1->current_data;
119 d2->qword[0] = pointer_to_uword (b2->data) + b2->current_data;
120 d3->qword[0] = pointer_to_uword (b3->data) + b3->current_data;
123 d0->qword[1] = ((u64) b0->current_length) << 34 | bits;
124 d1->qword[1] = ((u64) b1->current_length) << 34 | bits;
125 d2->qword[1] = ((u64) b2->current_length) << 34 | bits;
126 d3->qword[1] = ((u64) b3->current_length) << 34 | bits;
128 txq->next = (txq->next + 4) & mask;
129 txq->n_enqueued += 4;
136 d0 = txq->descs + txq->next;
138 txq->bufs[txq->next] = bi0;
139 b0 = vlib_get_buffer (vm, bi0);
142 d->qword[0] = vlib_get_buffer_data_physical_address (vm, bi0) +
145 d0->qword[0] = pointer_to_uword (b0->data) + b0->current_data;
147 d0->qword[1] = (((u64) b0->current_length) << 34) | bits;
149 txq->next = (txq->next + 1) & mask;
154 CLIB_MEMORY_BARRIER ();
155 *(txq->qtx_tail) = txq->next;
157 clib_spinlock_unlock_if_init (&txq->lock);
159 return frame->n_vectors - n_left;
162 #ifndef CLIB_MARCH_VARIANT
164 vlib_node_function_t __clib_weak avf_interface_tx_avx512;
165 vlib_node_function_t __clib_weak avf_interface_tx_avx2;
166 static void __clib_constructor
167 avf_interface_tx_multiarch_select (void)
169 if (avf_interface_tx_avx512 && clib_cpu_supports_avx512f ())
170 avf_device_class.tx_function = avf_interface_tx_avx512;
171 else if (avf_interface_tx_avx2 && clib_cpu_supports_avx2 ())
172 avf_device_class.tx_function = avf_interface_tx_avx2;
178 * fd.io coding-style-patch-verification: ON
181 * eval: (c-set-style "gnu")