2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vppinfra/ring.h>
23 #include <vnet/ethernet/ethernet.h>
24 #include <vnet/ip/ip4_packet.h>
25 #include <vnet/ip/ip6_packet.h>
26 #include <vnet/udp/udp_packet.h>
27 #include <vnet/tcp/tcp_packet.h>
29 #include <vnet/devices/devices.h>
33 static_always_inline u8
34 avf_tx_desc_get_dtyp (avf_tx_desc_t * d)
36 return d->qword[1] & 0x0f;
56 static_always_inline u64
57 avf_tx_prepare_cksum (vlib_buffer_t * b, u8 is_tso)
60 if (!is_tso && !(b->flags & ((VNET_BUFFER_F_OFFLOAD_IP_CKSUM |
61 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
62 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))))
64 u32 is_tcp = is_tso || b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
65 u32 is_udp = !is_tso && b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
66 u32 is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
67 u32 is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6;
68 ASSERT (!is_tcp || !is_udp);
69 ASSERT (is_ip4 || is_ip6);
70 i16 l2_hdr_offset = vnet_buffer (b)->l2_hdr_offset;
71 i16 l3_hdr_offset = vnet_buffer (b)->l3_hdr_offset;
72 i16 l4_hdr_offset = vnet_buffer (b)->l4_hdr_offset;
73 u16 l2_len = l3_hdr_offset - l2_hdr_offset;
74 u16 l3_len = l4_hdr_offset - l3_hdr_offset;
75 ip4_header_t *ip4 = (void *) (b->data + l3_hdr_offset);
76 ip6_header_t *ip6 = (void *) (b->data + l3_hdr_offset);
77 tcp_header_t *tcp = (void *) (b->data + l4_hdr_offset);
78 udp_header_t *udp = (void *) (b->data + l4_hdr_offset);
80 is_tcp ? tcp_header_bytes (tcp) : is_udp ? sizeof (udp_header_t) : 0;
83 flags |= AVF_TXD_OFFSET_MACLEN (l2_len) |
84 AVF_TXD_OFFSET_IPLEN (l3_len) | AVF_TXD_OFFSET_L4LEN (l4_len);
85 flags |= is_ip4 ? AVF_TXD_CMD_IIPT_IPV4 : AVF_TXD_CMD_IIPT_IPV6;
86 flags |= is_tcp ? AVF_TXD_CMD_L4T_TCP : is_udp ? AVF_TXD_CMD_L4T_UDP : 0;
96 ip6->payload_length = 0;
103 struct avf_ip4_psh psh = { 0 };
104 psh.src = ip4->src_address.as_u32;
105 psh.dst = ip4->dst_address.as_u32;
106 psh.proto = ip4->protocol;
109 clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
110 (l4_hdr_offset - l3_hdr_offset));
111 sum = ~ip_csum (&psh, sizeof (psh));
115 struct avf_ip6_psh psh = { 0 };
116 clib_memcpy_fast (&psh.src, &ip6->src_address, 16);
117 clib_memcpy_fast (&psh.dst, &ip6->dst_address, 16);
118 psh.proto = clib_host_to_net_u32 ((u32) ip6->protocol);
119 psh.l4len = is_tso ? 0 : ip6->payload_length;
120 sum = ~ip_csum (&psh, sizeof (psh));
123 /* ip_csum does a byte swap for some reason... */
124 sum = clib_net_to_host_u16 (sum);
132 static_always_inline int
133 avf_tx_fill_ctx_desc (vlib_main_t * vm, avf_txq_t * txq, avf_tx_desc_t * d,
136 vlib_buffer_t *ctx_ph = vlib_get_buffer (vm, txq->ctx_desc_placeholder_bi);
137 if (PREDICT_FALSE (ctx_ph->ref_count == 255))
139 /* We need a new placeholder buffer */
141 u8 bpi = vlib_buffer_pool_get_default_for_numa (vm, vm->numa_node);
143 (vlib_buffer_alloc_from_pool (vm, &new_bi, 1, bpi) == 1))
145 /* Remove our own reference on the current placeholder buffer */
147 /* Replace with the new placeholder buffer */
148 txq->ctx_desc_placeholder_bi = new_bi;
149 ctx_ph = vlib_get_buffer (vm, new_bi);
152 /* Impossible to enqueue a ctx descriptor, fail */
156 /* Acquire a reference on the placeholder buffer */
160 vnet_buffer (b)->l4_hdr_offset -
161 vnet_buffer (b)->l2_hdr_offset + vnet_buffer2 (b)->gso_l4_hdr_sz;
162 u16 tlen = vlib_buffer_length_in_chain (vm, b) - l234hdr_sz;
164 d[0].qword[1] = AVF_TXD_DTYP_CTX | AVF_TXD_CTX_CMD_TSO
165 | AVF_TXD_CTX_SEG_MSS (vnet_buffer2 (b)->gso_size) |
166 AVF_TXD_CTX_SEG_TLEN (tlen);
171 static_always_inline u16
172 avf_tx_enqueue (vlib_main_t * vm, vlib_node_runtime_t * node, avf_txq_t * txq,
173 u32 * buffers, u32 n_packets, int use_va_dma)
175 u16 next = txq->next;
176 u64 bits = AVF_TXD_CMD_EOP | AVF_TXD_CMD_RSV;
177 const u32 offload_mask = VNET_BUFFER_F_OFFLOAD_IP_CKSUM |
178 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
180 u64 one_by_one_offload_flags = 0;
183 u16 *slot, n_desc_left, n_packets_left = n_packets;
184 u16 mask = txq->size - 1;
186 avf_tx_desc_t *d = txq->descs + next;
190 /* avoid ring wrap */
191 n_desc_left = txq->size - clib_max (txq->next, txq->n_enqueued + 8);
193 if (n_desc_left == 0)
196 /* Fast path, no ring wrap */
197 while (n_packets_left && n_desc_left)
200 if (n_packets_left < 8 || n_desc_left < 4)
203 vlib_prefetch_buffer_with_index (vm, buffers[4], LOAD);
204 vlib_prefetch_buffer_with_index (vm, buffers[5], LOAD);
205 vlib_prefetch_buffer_with_index (vm, buffers[6], LOAD);
206 vlib_prefetch_buffer_with_index (vm, buffers[7], LOAD);
208 b[0] = vlib_get_buffer (vm, buffers[0]);
209 b[1] = vlib_get_buffer (vm, buffers[1]);
210 b[2] = vlib_get_buffer (vm, buffers[2]);
211 b[3] = vlib_get_buffer (vm, buffers[3]);
213 or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
215 if (or_flags & (VLIB_BUFFER_NEXT_PRESENT | offload_mask))
218 vlib_buffer_copy_indices (txq->bufs + next, buffers, 4);
222 d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
223 d[1].qword[0] = vlib_buffer_get_current_va (b[1]);
224 d[2].qword[0] = vlib_buffer_get_current_va (b[2]);
225 d[3].qword[0] = vlib_buffer_get_current_va (b[3]);
229 d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
230 d[1].qword[0] = vlib_buffer_get_current_pa (vm, b[1]);
231 d[2].qword[0] = vlib_buffer_get_current_pa (vm, b[2]);
232 d[3].qword[0] = vlib_buffer_get_current_pa (vm, b[3]);
235 d[0].qword[1] = ((u64) b[0]->current_length) << 34 | bits;
236 d[1].qword[1] = ((u64) b[1]->current_length) << 34 | bits;
237 d[2].qword[1] = ((u64) b[2]->current_length) << 34 | bits;
238 d[3].qword[1] = ((u64) b[3]->current_length) << 34 | bits;
249 one_by_one_offload_flags = 0;
250 txq->bufs[next] = buffers[0];
251 b[0] = vlib_get_buffer (vm, buffers[0]);
252 is_tso = ! !(b[0]->flags & VNET_BUFFER_F_GSO);
253 if (PREDICT_FALSE (is_tso || b[0]->flags & offload_mask))
254 one_by_one_offload_flags |= avf_tx_prepare_cksum (b[0], is_tso);
256 /* Deal with chain buffer if present */
257 if (is_tso || b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
259 n_desc_needed = 1 + is_tso;
262 /* Wish there were a buffer count for chain buffer */
263 while (b0->flags & VLIB_BUFFER_NEXT_PRESENT)
265 b0 = vlib_get_buffer (vm, b0->next_buffer);
269 /* spec says data descriptor is limited to 8 segments */
270 if (PREDICT_FALSE (!is_tso && n_desc_needed > 8))
272 vlib_buffer_free_one (vm, buffers[0]);
273 vlib_error_count (vm, node->node_index,
274 AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
280 if (PREDICT_FALSE (n_desc_left < n_desc_needed))
282 * Slow path may be able to to deal with this since it can handle
287 /* Enqueue a context descriptor if needed */
288 if (PREDICT_FALSE (is_tso))
290 if (avf_tx_fill_ctx_desc (vm, txq, d, b[0]))
291 /* Failure to acquire ref on ctx placeholder */
293 txq->bufs[next + 1] = txq->bufs[next];
294 txq->bufs[next] = txq->ctx_desc_placeholder_bi;
300 while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
303 d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
305 d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
307 d[0].qword[1] = (((u64) b[0]->current_length) << 34) |
308 AVF_TXD_CMD_RSV | one_by_one_offload_flags;
315 txq->bufs[next] = b[0]->next_buffer;
316 b[0] = vlib_get_buffer (vm, b[0]->next_buffer);
321 d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
323 d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
326 (((u64) b[0]->current_length) << 34) | bits |
327 one_by_one_offload_flags;
337 /* Slow path to support ring wrap */
338 if (PREDICT_FALSE (n_packets_left))
340 txq->n_enqueued += n_desc;
343 d = txq->descs + (next & mask);
345 /* +8 to be consistent with fast path */
346 n_desc_left = txq->size - (txq->n_enqueued + 8);
348 while (n_packets_left && n_desc_left)
351 txq->bufs[next & mask] = buffers[0];
352 b[0] = vlib_get_buffer (vm, buffers[0]);
354 one_by_one_offload_flags = 0;
355 is_tso = ! !(b[0]->flags & VNET_BUFFER_F_GSO);
356 if (PREDICT_FALSE (is_tso || b[0]->flags & offload_mask))
357 one_by_one_offload_flags |= avf_tx_prepare_cksum (b[0], is_tso);
359 /* Deal with chain buffer if present */
360 if (is_tso || b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
362 n_desc_needed = 1 + is_tso;
365 while (b0->flags & VLIB_BUFFER_NEXT_PRESENT)
367 b0 = vlib_get_buffer (vm, b0->next_buffer);
371 /* Spec says data descriptor is limited to 8 segments */
372 if (PREDICT_FALSE (!is_tso && n_desc_needed > 8))
374 vlib_buffer_free_one (vm, buffers[0]);
375 vlib_error_count (vm, node->node_index,
376 AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
382 if (PREDICT_FALSE (n_desc_left < n_desc_needed))
385 /* Enqueue a context descriptor if needed */
386 if (PREDICT_FALSE (is_tso))
388 if (avf_tx_fill_ctx_desc (vm, txq, d, b[0]))
389 /* Failure to acquire ref on ctx placeholder */
392 txq->bufs[(next + 1) & mask] = txq->bufs[next & mask];
393 txq->bufs[next & mask] = txq->ctx_desc_placeholder_bi;
397 d = txq->descs + (next & mask);
399 while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
402 d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
404 d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
406 d[0].qword[1] = (((u64) b[0]->current_length) << 34) |
407 AVF_TXD_CMD_RSV | one_by_one_offload_flags;
412 d = txq->descs + (next & mask);
414 txq->bufs[next & mask] = b[0]->next_buffer;
415 b[0] = vlib_get_buffer (vm, b[0]->next_buffer);
420 d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
422 d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
425 (((u64) b[0]->current_length) << 34) | bits |
426 one_by_one_offload_flags;
433 d = txq->descs + (next & mask);
437 if ((slot = clib_ring_enq (txq->rs_slots)))
439 u16 rs_slot = slot[0] = (next - 1) & mask;
440 d = txq->descs + rs_slot;
441 d[0].qword[1] |= AVF_TXD_CMD_RS;
444 txq->next = next & mask;
445 clib_atomic_store_rel_n (txq->qtx_tail, txq->next);
446 txq->n_enqueued += n_desc;
447 return n_packets - n_packets_left;
450 VNET_DEVICE_CLASS_TX_FN (avf_device_class) (vlib_main_t * vm,
451 vlib_node_runtime_t * node,
452 vlib_frame_t * frame)
454 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
455 avf_device_t *ad = avf_get_device (rd->dev_instance);
456 u32 thread_index = vm->thread_index;
457 u8 qid = thread_index;
458 avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid % ad->num_queue_pairs);
459 u32 *buffers = vlib_frame_vector_args (frame);
463 clib_spinlock_lock_if_init (&txq->lock);
465 n_left = frame->n_vectors;
468 /* release consumed bufs */
471 i32 complete_slot = -1;
474 u16 *slot = clib_ring_get_first (txq->rs_slots);
479 if (avf_tx_desc_get_dtyp (txq->descs + slot[0]) != 0x0F)
482 complete_slot = slot[0];
484 clib_ring_deq (txq->rs_slots);
487 if (complete_slot >= 0)
489 u16 first, mask, n_free;
490 mask = txq->size - 1;
491 first = (txq->next - txq->n_enqueued) & mask;
492 n_free = (complete_slot + 1 - first) & mask;
494 txq->n_enqueued -= n_free;
495 vlib_buffer_free_from_ring_no_next (vm, txq->bufs, first, txq->size,
500 if (ad->flags & AVF_DEVICE_F_VA_DMA)
501 n_enq = avf_tx_enqueue (vm, node, txq, buffers, n_left, 1);
503 n_enq = avf_tx_enqueue (vm, node, txq, buffers, n_left, 0);
514 vlib_buffer_free (vm, buffers, n_left);
515 vlib_error_count (vm, node->node_index,
516 AVF_TX_ERROR_NO_FREE_SLOTS, n_left);
519 clib_spinlock_unlock_if_init (&txq->lock);
521 return frame->n_vectors - n_left;
525 * fd.io coding-style-patch-verification: ON
528 * eval: (c-set-style "gnu")