{
dpdk_tx_queue_t *txq;
u32 n_retry;
- int n_sent = 0;
+ u32 n_sent = 0;
n_retry = 16;
txq = vec_elt_at_index (xd->tx_queues, queue_id);
vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (f);
u32 n_packets = f->n_vectors;
u32 n_left;
+ u32 n_prep;
u32 thread_index = vm->thread_index;
int queue_id = tf->queue_id;
u8 is_shared = tf->shared_queue;
+ u8 offload_enabled = 0;
u32 tx_pkts = 0;
dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
thread_index);
if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
(or_flags & VNET_BUFFER_F_OFFLOAD)))
{
+ offload_enabled = 1;
dpdk_buffer_tx_offload (xd, b[0], mb[0]);
dpdk_buffer_tx_offload (xd, b[1], mb[1]);
dpdk_buffer_tx_offload (xd, b[2], mb[2]);
if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
(or_flags & VNET_BUFFER_F_OFFLOAD)))
{
+ offload_enabled = 1;
dpdk_buffer_tx_offload (xd, b[0], mb[0]);
dpdk_buffer_tx_offload (xd, b[1], mb[1]);
}
b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
dpdk_validate_rte_mbuf (vm, b[0], 1);
- dpdk_buffer_tx_offload (xd, b[0], mb[0]);
+
+ if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
+ (b[0]->flags & VNET_BUFFER_F_OFFLOAD)))
+ {
+ offload_enabled = 1;
+ dpdk_buffer_tx_offload (xd, b[0], mb[0]);
+ }
if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
n_left--;
}
- /* transmit as many packets as possible */
+ /* prepare and transmit as many packets as possible */
tx_pkts = n_packets = mb - ptd->mbufs;
- n_left = tx_burst_vector_internal (vm, xd, ptd->mbufs, n_packets, queue_id,
- is_shared);
+ n_prep = n_packets;
- {
- /* If there is no callback then drop any non-transmitted packets */
- if (PREDICT_FALSE (n_left))
- {
- tx_pkts -= n_left;
- vlib_simple_counter_main_t *cm;
- vnet_main_t *vnm = vnet_get_main ();
+ if (PREDICT_FALSE (offload_enabled &&
+ (xd->flags & DPDK_DEVICE_FLAG_TX_PREPARE)))
+ {
+ n_prep =
+ rte_eth_tx_prepare (xd->port_id, queue_id, ptd->mbufs, n_packets);
- cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
- VNET_INTERFACE_COUNTER_TX_ERROR);
+ /* If mbufs are malformed then drop any non-prepared packets */
+ if (PREDICT_FALSE (n_prep != n_packets))
+ {
+ n_left = n_packets - n_prep;
+ }
+ }
- vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index,
- n_left);
+ n_left +=
+ tx_burst_vector_internal (vm, xd, ptd->mbufs, n_prep, queue_id, is_shared);
- vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
- n_left);
+ /* If there is no callback then drop any non-transmitted packets */
+ if (PREDICT_FALSE (n_left))
+ {
+ tx_pkts -= n_left;
+ vlib_simple_counter_main_t *cm;
+ vnet_main_t *vnm = vnet_get_main ();
- while (n_left--)
- rte_pktmbuf_free (ptd->mbufs[n_packets - n_left - 1]);
- }
- }
+ cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
+ VNET_INTERFACE_COUNTER_TX_ERROR);
+
+ vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index,
+ n_left);
+
+ vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
+ n_left);
+
+ rte_pktmbuf_free_bulk (&ptd->mbufs[tx_pkts], n_left);
+ }
return tx_pkts;
}
_ (11, RX_FLOW_OFFLOAD, "rx-flow-offload") \
_ (12, RX_IP4_CKSUM, "rx-ip4-cksum") \
_ (13, INT_SUPPORTED, "int-supported") \
- _ (14, INT_UNMASKABLE, "int-unmaskable")
+ _ (14, INT_UNMASKABLE, "int-unmaskable") \
+ _ (15, TX_PREPARE, "tx-prepare")
typedef enum
{
u32 interface_number_from_port_id : 1;
u32 use_intel_phdr_cksum : 1;
u32 int_unmaskable : 1;
+ u32 need_tx_prepare : 1;
} dpdk_driver_t;
dpdk_driver_t *dpdk_driver_find (const char *name, const char **desc);