2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vlib/unix/unix.h>
20 #include <vlib/pci/pci.h>
21 #include <vppinfra/ring.h>
22 #include <vppinfra/vector/ip_csum.h>
24 #include <vnet/ethernet/ethernet.h>
25 #include <vnet/ip/ip4_packet.h>
26 #include <vnet/ip/ip6_packet.h>
27 #include <vnet/udp/udp_packet.h>
28 #include <vnet/tcp/tcp_packet.h>
30 #include <vnet/devices/devices.h>
34 static_always_inline u8
35 avf_tx_desc_get_dtyp (avf_tx_desc_t * d)
37 return d->qword[1] & 0x0f;
57 static_always_inline u64
58 avf_tx_prepare_cksum (vlib_buffer_t * b, u8 is_tso)
61 if (!is_tso && !(b->flags & VNET_BUFFER_F_OFFLOAD))
64 vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
65 u32 is_tcp = is_tso || oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
66 u32 is_udp = !is_tso && oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
68 if (!is_tcp && !is_udp)
71 u32 is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
72 u32 is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6;
74 ASSERT (!(is_tcp && is_udp));
75 ASSERT (is_ip4 || is_ip6);
76 i16 l2_hdr_offset = b->current_data;
77 i16 l3_hdr_offset = vnet_buffer (b)->l3_hdr_offset;
78 i16 l4_hdr_offset = vnet_buffer (b)->l4_hdr_offset;
79 u16 l2_len = l3_hdr_offset - l2_hdr_offset;
80 u16 l3_len = l4_hdr_offset - l3_hdr_offset;
81 ip4_header_t *ip4 = (void *) (b->data + l3_hdr_offset);
82 ip6_header_t *ip6 = (void *) (b->data + l3_hdr_offset);
83 tcp_header_t *tcp = (void *) (b->data + l4_hdr_offset);
84 udp_header_t *udp = (void *) (b->data + l4_hdr_offset);
85 u16 l4_len = is_tcp ? tcp_header_bytes (tcp) : sizeof (udp_header_t);
88 flags |= AVF_TXD_OFFSET_MACLEN (l2_len) |
89 AVF_TXD_OFFSET_IPLEN (l3_len) | AVF_TXD_OFFSET_L4LEN (l4_len);
90 flags |= is_ip4 ? AVF_TXD_CMD_IIPT_IPV4 : AVF_TXD_CMD_IIPT_IPV6;
91 flags |= is_tcp ? AVF_TXD_CMD_L4T_TCP : AVF_TXD_CMD_L4T_UDP;
101 ip6->payload_length = 0;
106 struct avf_ip4_psh psh = { 0 };
107 psh.src = ip4->src_address.as_u32;
108 psh.dst = ip4->dst_address.as_u32;
109 psh.proto = ip4->protocol;
112 clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
113 (l4_hdr_offset - l3_hdr_offset));
114 sum = ~clib_ip_csum ((u8 *) &psh, sizeof (psh));
118 struct avf_ip6_psh psh = { 0 };
119 psh.src = ip6->src_address;
120 psh.dst = ip6->dst_address;
121 psh.proto = clib_host_to_net_u32 ((u32) ip6->protocol);
122 psh.l4len = is_tso ? 0 : ip6->payload_length;
123 sum = ~clib_ip_csum ((u8 *) &psh, sizeof (psh));
133 static_always_inline u32
134 avf_tx_fill_ctx_desc (vlib_main_t *vm, avf_txq_t *txq, avf_tx_desc_t *d,
137 vlib_buffer_t *ctx_ph;
138 u32 *bi = txq->ph_bufs;
141 ctx_ph = vlib_get_buffer (vm, bi[0]);
142 if (PREDICT_FALSE (ctx_ph->ref_count == 255))
148 /* Acquire a reference on the placeholder buffer */
151 u16 l234hdr_sz = vnet_buffer (b)->l4_hdr_offset - b->current_data +
152 vnet_buffer2 (b)->gso_l4_hdr_sz;
153 u16 tlen = vlib_buffer_length_in_chain (vm, b) - l234hdr_sz;
155 d[0].qword[1] = AVF_TXD_DTYP_CTX | AVF_TXD_CTX_CMD_TSO
156 | AVF_TXD_CTX_SEG_MSS (vnet_buffer2 (b)->gso_size) |
157 AVF_TXD_CTX_SEG_TLEN (tlen);
161 static_always_inline void
162 avf_tx_copy_desc (avf_tx_desc_t *d, avf_tx_desc_t *s, u32 n_descs)
164 #if defined CLIB_HAVE_VEC512
167 u64x8u *dv = (u64x8u *) d;
168 u64x8u *sv = (u64x8u *) s;
178 #elif defined CLIB_HAVE_VEC256
181 u64x4u *dv = (u64x4u *) d;
182 u64x4u *sv = (u64x4u *) s;
192 #elif defined CLIB_HAVE_VEC128
195 u64x2u *dv = (u64x2u *) d;
196 u64x2u *sv = (u64x2u *) s;
209 d[0].qword[0] = s[0].qword[0];
210 d[0].qword[1] = s[0].qword[1];
217 static_always_inline void
218 avf_tx_fill_data_desc (vlib_main_t *vm, avf_tx_desc_t *d, vlib_buffer_t *b,
219 u64 cmd, int use_va_dma)
222 d->qword[0] = vlib_buffer_get_current_va (b);
224 d->qword[0] = vlib_buffer_get_current_pa (vm, b);
225 d->qword[1] = (((u64) b->current_length) << 34 | cmd | AVF_TXD_CMD_RSV);
227 static_always_inline u16
228 avf_tx_prepare (vlib_main_t *vm, vlib_node_runtime_t *node, avf_txq_t *txq,
229 u32 *buffers, u32 n_packets, u16 *n_enq_descs, int use_va_dma)
231 const u64 cmd_eop = AVF_TXD_CMD_EOP;
232 u16 n_free_desc, n_desc_left, n_packets_left = n_packets;
233 #if defined CLIB_HAVE_VEC512
238 avf_tx_desc_t *d = txq->tmp_descs;
239 u32 *tb = txq->tmp_bufs;
241 n_free_desc = n_desc_left = txq->size - txq->n_enqueued - 8;
243 if (n_desc_left == 0)
246 while (n_packets_left && n_desc_left)
248 #if defined CLIB_HAVE_VEC512
250 u64x8 or_flags_vec512;
251 u64x8 flags_mask_vec512;
256 #if defined CLIB_HAVE_VEC512
257 if (n_packets_left < 8 || n_desc_left < 8)
259 if (n_packets_left < 8 || n_desc_left < 4)
263 #if defined CLIB_HAVE_VEC512
264 u64x8 base_ptr = u64x8_splat (vm->buffer_main->buffer_mem_start);
265 u32x8 buf_indices = u32x8_load_unaligned (buffers);
267 *(u64x8 *) &b = base_ptr + u64x8_from_u32x8 (
268 buf_indices << CLIB_LOG2_CACHE_LINE_BYTES);
270 or_flags_vec512 = u64x8_i64gather (u64x8_load_unaligned (b), 0, 1);
272 vlib_prefetch_buffer_with_index (vm, buffers[4], LOAD);
273 vlib_prefetch_buffer_with_index (vm, buffers[5], LOAD);
274 vlib_prefetch_buffer_with_index (vm, buffers[6], LOAD);
275 vlib_prefetch_buffer_with_index (vm, buffers[7], LOAD);
277 b[0] = vlib_get_buffer (vm, buffers[0]);
278 b[1] = vlib_get_buffer (vm, buffers[1]);
279 b[2] = vlib_get_buffer (vm, buffers[2]);
280 b[3] = vlib_get_buffer (vm, buffers[3]);
282 or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
285 #if defined CLIB_HAVE_VEC512
286 flags_mask_vec512 = u64x8_splat (
287 VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD | VNET_BUFFER_F_GSO);
289 !u64x8_is_all_zero (or_flags_vec512 & flags_mask_vec512)))
291 if (PREDICT_FALSE (or_flags &
292 (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD |
297 #if defined CLIB_HAVE_VEC512
298 vlib_buffer_copy_indices (tb, buffers, 8);
299 avf_tx_fill_data_desc (vm, d + 0, b[0], cmd_eop, use_va_dma);
300 avf_tx_fill_data_desc (vm, d + 1, b[1], cmd_eop, use_va_dma);
301 avf_tx_fill_data_desc (vm, d + 2, b[2], cmd_eop, use_va_dma);
302 avf_tx_fill_data_desc (vm, d + 3, b[3], cmd_eop, use_va_dma);
303 avf_tx_fill_data_desc (vm, d + 4, b[4], cmd_eop, use_va_dma);
304 avf_tx_fill_data_desc (vm, d + 5, b[5], cmd_eop, use_va_dma);
305 avf_tx_fill_data_desc (vm, d + 6, b[6], cmd_eop, use_va_dma);
306 avf_tx_fill_data_desc (vm, d + 7, b[7], cmd_eop, use_va_dma);
314 vlib_buffer_copy_indices (tb, buffers, 4);
316 avf_tx_fill_data_desc (vm, d + 0, b[0], cmd_eop, use_va_dma);
317 avf_tx_fill_data_desc (vm, d + 1, b[1], cmd_eop, use_va_dma);
318 avf_tx_fill_data_desc (vm, d + 2, b[2], cmd_eop, use_va_dma);
319 avf_tx_fill_data_desc (vm, d + 3, b[3], cmd_eop, use_va_dma);
332 b[0] = vlib_get_buffer (vm, buffers[0]);
335 /* No chained buffers or TSO case */
337 (flags & (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_GSO)) == 0))
341 if (PREDICT_FALSE (flags & VNET_BUFFER_F_OFFLOAD))
342 cmd |= avf_tx_prepare_cksum (b[0], 0 /* is_tso */);
344 avf_tx_fill_data_desc (vm, d, b[0], cmd, use_va_dma);
348 u16 n_desc_needed = 1;
351 if (flags & VLIB_BUFFER_NEXT_PRESENT)
353 vlib_buffer_t *next = vlib_get_buffer (vm, b[0]->next_buffer);
355 while (next->flags & VLIB_BUFFER_NEXT_PRESENT)
357 next = vlib_get_buffer (vm, next->next_buffer);
362 if (flags & VNET_BUFFER_F_GSO)
366 else if (PREDICT_FALSE (n_desc_needed > 8))
368 vlib_buffer_free_one (vm, buffers[0]);
369 vlib_error_count (vm, node->node_index,
370 AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
376 if (PREDICT_FALSE (n_desc_left < n_desc_needed))
379 if (flags & VNET_BUFFER_F_GSO)
381 /* Enqueue a context descriptor */
383 tb[0] = avf_tx_fill_ctx_desc (vm, txq, d, b[0]);
387 cmd = avf_tx_prepare_cksum (b[0], 1 /* is_tso */);
389 else if (flags & VNET_BUFFER_F_OFFLOAD)
391 cmd = avf_tx_prepare_cksum (b[0], 0 /* is_tso */);
394 /* Deal with chain buffer if present */
395 while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
397 avf_tx_fill_data_desc (vm, d, b[0], cmd, use_va_dma);
403 tb[0] = b[0]->next_buffer;
404 b[0] = vlib_get_buffer (vm, b[0]->next_buffer);
407 avf_tx_fill_data_desc (vm, d, b[0], cmd_eop | cmd, use_va_dma);
417 *n_enq_descs = n_free_desc - n_desc_left;
418 return n_packets - n_packets_left;
421 VNET_DEVICE_CLASS_TX_FN (avf_device_class) (vlib_main_t * vm,
422 vlib_node_runtime_t * node,
423 vlib_frame_t * frame)
425 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
426 avf_device_t *ad = avf_get_device (rd->dev_instance);
427 vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (frame);
428 u8 qid = tf->queue_id;
429 avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid);
431 u16 mask = txq->size - 1;
432 u32 *buffers = vlib_frame_vector_args (frame);
433 u16 n_enq, n_left, n_desc, *slot;
436 if (tf->shared_queue)
437 clib_spinlock_lock (&txq->lock);
439 n_left = frame->n_vectors;
443 /* release consumed bufs */
446 i32 complete_slot = -1;
449 u16 *slot = clib_ring_get_first (txq->rs_slots);
454 if (avf_tx_desc_get_dtyp (txq->descs + slot[0]) != 0x0F)
457 complete_slot = slot[0];
459 clib_ring_deq (txq->rs_slots);
462 if (complete_slot >= 0)
464 u16 first, mask, n_free;
465 mask = txq->size - 1;
466 first = (txq->next - txq->n_enqueued) & mask;
467 n_free = (complete_slot + 1 - first) & mask;
469 txq->n_enqueued -= n_free;
470 vlib_buffer_free_from_ring_no_next (vm, txq->bufs, first, txq->size,
476 if (ad->flags & AVF_DEVICE_F_VA_DMA)
477 n_enq = avf_tx_prepare (vm, node, txq, buffers, n_left, &n_desc, 1);
479 n_enq = avf_tx_prepare (vm, node, txq, buffers, n_left, &n_desc, 0);
483 if (PREDICT_TRUE (next + n_desc <= txq->size))
486 avf_tx_copy_desc (txq->descs + next, txq->tmp_descs, n_desc);
487 vlib_buffer_copy_indices (txq->bufs + next, txq->tmp_bufs, n_desc);
492 u32 n_not_wrap = txq->size - next;
493 avf_tx_copy_desc (txq->descs + next, txq->tmp_descs, n_not_wrap);
494 avf_tx_copy_desc (txq->descs, txq->tmp_descs + n_not_wrap,
495 n_desc - n_not_wrap);
496 vlib_buffer_copy_indices (txq->bufs + next, txq->tmp_bufs,
498 vlib_buffer_copy_indices (txq->bufs, txq->tmp_bufs + n_not_wrap,
499 n_desc - n_not_wrap);
503 if ((slot = clib_ring_enq (txq->rs_slots)))
505 u16 rs_slot = slot[0] = (next - 1) & mask;
506 txq->descs[rs_slot].qword[1] |= AVF_TXD_CMD_RS;
509 txq->next = next & mask;
510 avf_tail_write (txq->qtx_tail, txq->next);
511 txq->n_enqueued += n_desc;
522 vlib_buffer_free (vm, buffers, n_left);
523 vlib_error_count (vm, node->node_index,
524 AVF_TX_ERROR_NO_FREE_SLOTS, n_left);
527 if (tf->shared_queue)
528 clib_spinlock_unlock (&txq->lock);
530 return frame->n_vectors - n_left;
534 * fd.io coding-style-patch-verification: ON
537 * eval: (c-set-style "gnu")