#include <vlib/unix/unix.h>
#include <vlib/pci/pci.h>
#include <vppinfra/ring.h>
+#include <vppinfra/vector/ip_csum.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/ip/ip4_packet.h>
if (!is_tso && !(b->flags & VNET_BUFFER_F_OFFLOAD))
return 0;
- u32 oflags = vnet_buffer2 (b)->oflags;
+ vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
u32 is_tcp = is_tso || oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
u32 is_udp = !is_tso && oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
+
+ if (!is_tcp && !is_udp)
+ return 0;
+
u32 is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
u32 is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6;
- ASSERT (!is_tcp || !is_udp);
+
+ ASSERT (!(is_tcp && is_udp));
ASSERT (is_ip4 || is_ip6);
i16 l2_hdr_offset = b->current_data;
i16 l3_hdr_offset = vnet_buffer (b)->l3_hdr_offset;
ip6_header_t *ip6 = (void *) (b->data + l3_hdr_offset);
tcp_header_t *tcp = (void *) (b->data + l4_hdr_offset);
udp_header_t *udp = (void *) (b->data + l4_hdr_offset);
- u16 l4_len =
- is_tcp ? tcp_header_bytes (tcp) : is_udp ? sizeof (udp_header_t) : 0;
+ u16 l4_len = is_tcp ? tcp_header_bytes (tcp) : sizeof (udp_header_t);
u16 sum = 0;
flags |= AVF_TXD_OFFSET_MACLEN (l2_len) |
AVF_TXD_OFFSET_IPLEN (l3_len) | AVF_TXD_OFFSET_L4LEN (l4_len);
flags |= is_ip4 ? AVF_TXD_CMD_IIPT_IPV4 : AVF_TXD_CMD_IIPT_IPV6;
- flags |= is_tcp ? AVF_TXD_CMD_L4T_TCP : is_udp ? AVF_TXD_CMD_L4T_UDP : 0;
+ flags |= is_tcp ? AVF_TXD_CMD_L4T_TCP : AVF_TXD_CMD_L4T_UDP;
if (is_ip4)
ip4->checksum = 0;
ip6->payload_length = 0;
}
- if (is_tcp || is_udp)
- {
if (is_ip4)
{
struct avf_ip4_psh psh = { 0 };
is_tso ? 0 :
clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
(l4_hdr_offset - l3_hdr_offset));
- sum = ~ip_csum (&psh, sizeof (psh));
+ sum = ~clib_ip_csum ((u8 *) &psh, sizeof (psh));
}
else
{
psh.dst = ip6->dst_address;
psh.proto = clib_host_to_net_u32 ((u32) ip6->protocol);
psh.l4len = is_tso ? 0 : ip6->payload_length;
- sum = ~ip_csum (&psh, sizeof (psh));
+ sum = ~clib_ip_csum ((u8 *) &psh, sizeof (psh));
}
- }
- /* ip_csum does a byte swap for some reason... */
- sum = clib_net_to_host_u16 (sum);
+
if (is_tcp)
tcp->checksum = sum;
- else if (is_udp)
+ else
udp->checksum = sum;
return flags;
}
-static_always_inline int
-avf_tx_fill_ctx_desc (vlib_main_t * vm, avf_txq_t * txq, avf_tx_desc_t * d,
- vlib_buffer_t * b)
+static_always_inline u32
+avf_tx_fill_ctx_desc (vlib_main_t *vm, avf_txq_t *txq, avf_tx_desc_t *d,
+ vlib_buffer_t *b)
{
- vlib_buffer_t *ctx_ph = vlib_get_buffer (vm, txq->ctx_desc_placeholder_bi);
+ vlib_buffer_t *ctx_ph;
+ u32 *bi = txq->ph_bufs;
+
+next:
+ ctx_ph = vlib_get_buffer (vm, bi[0]);
if (PREDICT_FALSE (ctx_ph->ref_count == 255))
{
- /* We need a new placeholder buffer */
- u32 new_bi;
- u8 bpi = vlib_buffer_pool_get_default_for_numa (vm, vm->numa_node);
- if (PREDICT_TRUE
- (vlib_buffer_alloc_from_pool (vm, &new_bi, 1, bpi) == 1))
- {
- /* Remove our own reference on the current placeholder buffer */
- ctx_ph->ref_count--;
- /* Replace with the new placeholder buffer */
- txq->ctx_desc_placeholder_bi = new_bi;
- ctx_ph = vlib_get_buffer (vm, new_bi);
- }
- else
- /* Impossible to enqueue a ctx descriptor, fail */
- return 1;
+ bi++;
+ goto next;
}
/* Acquire a reference on the placeholder buffer */
d[0].qword[1] = AVF_TXD_DTYP_CTX | AVF_TXD_CTX_CMD_TSO
| AVF_TXD_CTX_SEG_MSS (vnet_buffer2 (b)->gso_size) |
AVF_TXD_CTX_SEG_TLEN (tlen);
- return 0;
+ return bi[0];
}
static_always_inline void
}
}
+static_always_inline void
+avf_tx_fill_data_desc (vlib_main_t *vm, avf_tx_desc_t *d, vlib_buffer_t *b,
+ u64 cmd, int use_va_dma)
+{
+ if (use_va_dma)
+ d->qword[0] = vlib_buffer_get_current_va (b);
+ else
+ d->qword[0] = vlib_buffer_get_current_pa (vm, b);
+ d->qword[1] = (((u64) b->current_length) << 34 | cmd | AVF_TXD_CMD_RSV);
+}
static_always_inline u16
avf_tx_prepare (vlib_main_t *vm, vlib_node_runtime_t *node, avf_txq_t *txq,
u32 *buffers, u32 n_packets, u16 *n_enq_descs, int use_va_dma)
{
- u64 bits = AVF_TXD_CMD_EOP | AVF_TXD_CMD_RSV;
- const u32 offload_mask = VNET_BUFFER_F_OFFLOAD | VNET_BUFFER_F_GSO;
- u64 one_by_one_offload_flags = 0;
- int is_tso;
- u16 n_desc = 0;
- u16 n_desc_left, n_packets_left = n_packets;
+ const u64 cmd_eop = AVF_TXD_CMD_EOP;
+ u16 n_free_desc, n_desc_left, n_packets_left = n_packets;
vlib_buffer_t *b[4];
avf_tx_desc_t *d = txq->tmp_descs;
u32 *tb = txq->tmp_bufs;
- n_desc_left = txq->size - txq->n_enqueued - 8;
+ n_free_desc = n_desc_left = txq->size - txq->n_enqueued - 8;
if (n_desc_left == 0)
return 0;
while (n_packets_left && n_desc_left)
{
- u32 or_flags;
+ u32 flags, or_flags;
+
if (n_packets_left < 8 || n_desc_left < 4)
goto one_by_one;
or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
- if (or_flags & (VLIB_BUFFER_NEXT_PRESENT | offload_mask))
+ if (PREDICT_FALSE (or_flags &
+ (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD |
+ VNET_BUFFER_F_GSO)))
goto one_by_one;
vlib_buffer_copy_indices (tb, buffers, 4);
- if (use_va_dma)
- {
- d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
- d[1].qword[0] = vlib_buffer_get_current_va (b[1]);
- d[2].qword[0] = vlib_buffer_get_current_va (b[2]);
- d[3].qword[0] = vlib_buffer_get_current_va (b[3]);
- }
- else
- {
- d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
- d[1].qword[0] = vlib_buffer_get_current_pa (vm, b[1]);
- d[2].qword[0] = vlib_buffer_get_current_pa (vm, b[2]);
- d[3].qword[0] = vlib_buffer_get_current_pa (vm, b[3]);
- }
-
- d[0].qword[1] = ((u64) b[0]->current_length) << 34 | bits;
- d[1].qword[1] = ((u64) b[1]->current_length) << 34 | bits;
- d[2].qword[1] = ((u64) b[2]->current_length) << 34 | bits;
- d[3].qword[1] = ((u64) b[3]->current_length) << 34 | bits;
+ avf_tx_fill_data_desc (vm, d + 0, b[0], cmd_eop, use_va_dma);
+ avf_tx_fill_data_desc (vm, d + 1, b[1], cmd_eop, use_va_dma);
+ avf_tx_fill_data_desc (vm, d + 2, b[2], cmd_eop, use_va_dma);
+ avf_tx_fill_data_desc (vm, d + 3, b[3], cmd_eop, use_va_dma);
- n_desc += 4;
buffers += 4;
n_packets_left -= 4;
n_desc_left -= 4;
continue;
one_by_one:
- one_by_one_offload_flags = 0;
tb[0] = buffers[0];
b[0] = vlib_get_buffer (vm, buffers[0]);
- is_tso = ! !(b[0]->flags & VNET_BUFFER_F_GSO);
- if (PREDICT_FALSE (is_tso || b[0]->flags & offload_mask))
- one_by_one_offload_flags |= avf_tx_prepare_cksum (b[0], is_tso);
+ flags = b[0]->flags;
+
+ /* No chained buffers or TSO case */
+ if (PREDICT_TRUE (
+ (flags & (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_GSO)) == 0))
+ {
+ u64 cmd = cmd_eop;
- /* Deal with chain buffer if present */
- if (is_tso || b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
+ if (PREDICT_FALSE (flags & VNET_BUFFER_F_OFFLOAD))
+ cmd |= avf_tx_prepare_cksum (b[0], 0 /* is_tso */);
+
+ avf_tx_fill_data_desc (vm, d, b[0], cmd, use_va_dma);
+ }
+ else
{
- u16 n_desc_needed = 1 + is_tso;
- vlib_buffer_t *b0 = b[0];
+ u16 n_desc_needed = 1;
+ u64 cmd = 0;
- /* Wish there were a buffer count for chain buffer */
- while (b0->flags & VLIB_BUFFER_NEXT_PRESENT)
+ if (flags & VLIB_BUFFER_NEXT_PRESENT)
{
- b0 = vlib_get_buffer (vm, b0->next_buffer);
- n_desc_needed++;
+ vlib_buffer_t *next = vlib_get_buffer (vm, b[0]->next_buffer);
+ n_desc_needed = 2;
+ while (next->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ next = vlib_get_buffer (vm, next->next_buffer);
+ n_desc_needed++;
+ }
}
- /* spec says data descriptor is limited to 8 segments */
- if (PREDICT_FALSE (!is_tso && n_desc_needed > 8))
+ if (flags & VNET_BUFFER_F_GSO)
+ {
+ n_desc_needed++;
+ }
+ else if (PREDICT_FALSE (n_desc_needed > 8))
{
vlib_buffer_free_one (vm, buffers[0]);
vlib_error_count (vm, node->node_index,
}
if (PREDICT_FALSE (n_desc_left < n_desc_needed))
- /*
- * Slow path may be able to to deal with this since it can handle
- * ring wrap
- */
break;
- /* Enqueue a context descriptor if needed */
- if (PREDICT_FALSE (is_tso))
+ if (flags & VNET_BUFFER_F_GSO)
{
- if (avf_tx_fill_ctx_desc (vm, txq, d, b[0]))
- /* Failure to acquire ref on ctx placeholder */
- break;
+ /* Enqueue a context descriptor */
tb[1] = tb[0];
- tb[0] = txq->ctx_desc_placeholder_bi;
- n_desc += 1;
+ tb[0] = avf_tx_fill_ctx_desc (vm, txq, d, b[0]);
n_desc_left -= 1;
d += 1;
tb += 1;
+ cmd = avf_tx_prepare_cksum (b[0], 1 /* is_tso */);
}
- while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
+ else if (flags & VNET_BUFFER_F_OFFLOAD)
{
- if (use_va_dma)
- d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
- else
- d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
+ cmd = avf_tx_prepare_cksum (b[0], 0 /* is_tso */);
+ }
- d[0].qword[1] = (((u64) b[0]->current_length) << 34) |
- AVF_TXD_CMD_RSV | one_by_one_offload_flags;
+ /* Deal with chain buffer if present */
+ while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ avf_tx_fill_data_desc (vm, d, b[0], cmd, use_va_dma);
- n_desc += 1;
n_desc_left -= 1;
d += 1;
tb += 1;
tb[0] = b[0]->next_buffer;
b[0] = vlib_get_buffer (vm, b[0]->next_buffer);
}
- }
- if (use_va_dma)
- d[0].qword[0] = vlib_buffer_get_current_va (b[0]);
- else
- d[0].qword[0] = vlib_buffer_get_current_pa (vm, b[0]);
-
- d[0].qword[1] =
- (((u64) b[0]->current_length) << 34) | bits | one_by_one_offload_flags;
+ avf_tx_fill_data_desc (vm, d, b[0], cmd_eop | cmd, use_va_dma);
+ }
- n_desc += 1;
buffers += 1;
n_packets_left -= 1;
n_desc_left -= 1;
tb += 1;
}
- *n_enq_descs = n_desc;
+ *n_enq_descs = n_free_desc - n_desc_left;
return n_packets - n_packets_left;
}
{
vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
avf_device_t *ad = avf_get_device (rd->dev_instance);
- u32 thread_index = vm->thread_index;
- u8 qid = thread_index;
- avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid % ad->num_queue_pairs);
- u16 next = txq->next;
+ vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (frame);
+ u8 qid = tf->queue_id;
+ avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid);
+ u16 next;
u16 mask = txq->size - 1;
u32 *buffers = vlib_frame_vector_args (frame);
u16 n_enq, n_left, n_desc, *slot;
u16 n_retry = 2;
- clib_spinlock_lock_if_init (&txq->lock);
+ if (tf->shared_queue)
+ clib_spinlock_lock (&txq->lock);
n_left = frame->n_vectors;
retry:
+ next = txq->next;
/* release consumed bufs */
if (txq->n_enqueued)
{
AVF_TX_ERROR_NO_FREE_SLOTS, n_left);
}
- clib_spinlock_unlock_if_init (&txq->lock);
+ if (tf->shared_queue)
+ clib_spinlock_unlock (&txq->lock);
return frame->n_vectors - n_left;
}