2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <vnet/devices/devices.h>
24 #include <vmxnet3/vmxnet3.h>
26 static_always_inline void
27 vmxnet3_tx_comp_ring_advance_next (vmxnet3_txq_t * txq)
29 vmxnet3_tx_comp_ring *comp_ring = &txq->tx_comp_ring;
32 if (PREDICT_FALSE (comp_ring->next == txq->size))
35 comp_ring->gen ^= VMXNET3_TXCF_GEN;
39 static_always_inline void
40 vmxnet3_tx_ring_advance_produce (vmxnet3_txq_t * txq)
42 txq->tx_ring.produce++;
43 if (PREDICT_FALSE (txq->tx_ring.produce == txq->size))
45 txq->tx_ring.produce = 0;
46 txq->tx_ring.gen ^= VMXNET3_TXF_GEN;
50 static_always_inline void
51 vmxnet3_tx_ring_advance_consume (vmxnet3_txq_t * txq)
53 txq->tx_ring.consume++;
54 txq->tx_ring.consume &= txq->size - 1;
57 static_always_inline void
58 vmxnet3_txq_release (vlib_main_t * vm, vmxnet3_device_t * vd,
61 vmxnet3_tx_comp *tx_comp;
62 vmxnet3_tx_comp_ring *comp_ring;
64 comp_ring = &txq->tx_comp_ring;
65 tx_comp = &txq->tx_comp[comp_ring->next];
67 while ((tx_comp->flags & VMXNET3_TXCF_GEN) == comp_ring->gen)
69 u16 eop_idx = tx_comp->index & VMXNET3_TXC_INDEX;
70 u32 bi0 = txq->tx_ring.bufs[txq->tx_ring.consume];
72 vlib_buffer_free_one (vm, bi0);
73 while (txq->tx_ring.consume != eop_idx)
75 vmxnet3_tx_ring_advance_consume (txq);
77 vmxnet3_tx_ring_advance_consume (txq);
79 vmxnet3_tx_comp_ring_advance_next (txq);
80 tx_comp = &txq->tx_comp[comp_ring->next];
84 static_always_inline u16
85 vmxnet3_tx_ring_space_left (vmxnet3_txq_t * txq)
89 count = (txq->tx_ring.consume - txq->tx_ring.produce - 1);
91 if (txq->tx_ring.produce >= txq->tx_ring.consume)
96 VNET_DEVICE_CLASS_TX_FN (vmxnet3_device_class) (vlib_main_t * vm,
97 vlib_node_runtime_t * node,
100 vmxnet3_main_t *vmxm = &vmxnet3_main;
101 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
102 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, rd->dev_instance);
103 u32 *buffers = vlib_frame_vector_args (frame);
106 vmxnet3_tx_desc *txd = 0;
107 u32 desc_idx, generation, first_idx;
109 u16 n_left = frame->n_vectors;
111 u16 qid = vm->thread_index % vd->num_tx_queues, produce;
113 if (PREDICT_FALSE (!(vd->flags & VMXNET3_DEVICE_F_LINK_UP)))
115 vlib_buffer_free (vm, buffers, n_left);
116 vlib_error_count (vm, node->node_index, VMXNET3_TX_ERROR_LINK_DOWN,
121 txq = vec_elt_at_index (vd->txqs, qid);
122 clib_spinlock_lock_if_init (&txq->lock);
124 vmxnet3_txq_release (vm, vd, txq);
126 produce = txq->tx_ring.produce;
127 while (PREDICT_TRUE (n_left))
129 u16 space_needed = 1, i;
133 b0 = vlib_get_buffer (vm, bi0);
136 space_left = vmxnet3_tx_ring_space_left (txq);
137 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
139 u32 next_buffer = b->next_buffer;
141 b = vlib_get_buffer (vm, next_buffer);
144 if (PREDICT_FALSE (space_left < space_needed))
146 vmxnet3_txq_release (vm, vd, txq);
147 space_left = vmxnet3_tx_ring_space_left (txq);
149 if (PREDICT_FALSE (space_left < space_needed))
151 vlib_buffer_free_one (vm, bi0);
152 vlib_error_count (vm, node->node_index,
153 VMXNET3_TX_ERROR_NO_FREE_SLOTS, 1);
157 * Drop this packet. But we may have enough room for the next
165 * Toggle the generation bit for SOP fragment to avoid device starts
166 * reading incomplete packet
168 generation = txq->tx_ring.gen ^ VMXNET3_TXF_GEN;
169 first_idx = txq->tx_ring.produce;
170 for (i = 0; i < space_needed; i++)
172 b0 = vlib_get_buffer (vm, bi0);
173 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
175 desc_idx = txq->tx_ring.produce;
177 vmxnet3_tx_ring_advance_produce (txq);
178 txq->tx_ring.bufs[desc_idx] = bi0;
180 txd = &txq->tx_desc[desc_idx];
181 txd->address = vlib_buffer_get_current_pa (vm, b0);
183 txd->flags[0] = generation | b0->current_length;
185 generation = txq->tx_ring.gen;
188 bi0 = b0->next_buffer;
191 txd->flags[1] = VMXNET3_TXF_CQ | VMXNET3_TXF_EOP;
192 asm volatile ("":::"memory");
194 * Now toggle back the generation bit for the first segment.
195 * Device can start reading the packet
197 txq->tx_desc[first_idx].flags[0] ^= VMXNET3_TXF_GEN;
203 if (PREDICT_TRUE (produce != txq->tx_ring.produce))
204 vmxnet3_reg_write_inline (vd, 0, txq->reg_txprod, txq->tx_ring.produce);
206 clib_spinlock_unlock_if_init (&txq->lock);
208 return (frame->n_vectors - n_left);
212 * fd.io coding-style-patch-verification: ON
215 * eval: (c-set-style "gnu")