n_packets = ring->tx_head - ring->tx_tail;
- tx_head = ring->tx_head % DPDK_TX_RING_SIZE;
+ tx_head = ring->tx_head % xd->nb_tx_desc;
/*
* Ensure rte_eth_tx_burst is not called with 0 packets, which can lead to
* a bit because it decreases the probability of having to issue two tx_burst
* calls due to a ring wrap.
*/
- ASSERT (n_packets < DPDK_TX_RING_SIZE);
+ ASSERT (n_packets < xd->nb_tx_desc);
/*
* If there is no flowcontrol callback, there is only temporary buffering
do
{
/* start the burst at the tail */
- tx_tail = ring->tx_tail % DPDK_TX_RING_SIZE;
+ tx_tail = ring->tx_tail % xd->nb_tx_desc;
/*
* This device only supports one TX queue,
* and we're running multi-threaded...
*/
- if (PREDICT_FALSE (xd->dev_type != VNET_DPDK_DEV_VHOST_USER &&
+ if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_VHOST_USER) == 0 &&
xd->lockp != 0))
{
queue_id = queue_id % xd->tx_q_used;
queue_id = (queue_id + 1) % xd->tx_q_used;
}
- if (PREDICT_TRUE (xd->dev_type == VNET_DPDK_DEV_ETH))
+ if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_HQOS)) /* HQoS ON */
+ {
+ if (PREDICT_TRUE (tx_head > tx_tail))
+ {
+ /* no wrap, transmit in one burst */
+ dpdk_device_hqos_per_worker_thread_t *hqos =
+ &xd->hqos_wt[vm->cpu_index];
+
+ dpdk_hqos_metadata_set (hqos,
+ &tx_vector[tx_tail], tx_head - tx_tail);
+ rv = rte_ring_sp_enqueue_burst (hqos->swq,
+ (void **) &tx_vector[tx_tail],
+ (uint16_t) (tx_head - tx_tail));
+ }
+ else
+ {
+ /*
+ * This can only happen if there is a flowcontrol callback.
+ * We need to split the transmit into two calls: one for
+ * the packets up to the wrap point, and one to continue
+ * at the start of the ring.
+ * Transmit pkts up to the wrap point.
+ */
+ dpdk_device_hqos_per_worker_thread_t *hqos =
+ &xd->hqos_wt[vm->cpu_index];
+
+ dpdk_hqos_metadata_set (hqos,
+ &tx_vector[tx_tail],
+ xd->nb_tx_desc - tx_tail);
+ rv = rte_ring_sp_enqueue_burst (hqos->swq,
+ (void **) &tx_vector[tx_tail],
+ (uint16_t) (xd->nb_tx_desc -
+ tx_tail));
+ /*
+ * If we transmitted everything we wanted, then allow 1 retry
+ * so we can try to transmit the rest. If we didn't transmit
+ * everything, stop now.
+ */
+ n_retry = (rv == xd->nb_tx_desc - tx_tail) ? 1 : 0;
+ }
+ }
+ else if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
{
if (PREDICT_TRUE (tx_head > tx_tail))
{
rv = rte_eth_tx_burst (xd->device_index,
(uint16_t) queue_id,
&tx_vector[tx_tail],
- (uint16_t) (DPDK_TX_RING_SIZE -
- tx_tail));
+ (uint16_t) (xd->nb_tx_desc - tx_tail));
/*
* If we transmitted everything we wanted, then allow 1 retry
* so we can try to transmit the rest. If we didn't transmit
* everything, stop now.
*/
- n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
+ n_retry = (rv == xd->nb_tx_desc - tx_tail) ? 1 : 0;
}
}
#if DPDK_VHOST_USER
- else if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
+ else if (xd->flags & DPDK_DEVICE_FLAG_VHOST_USER)
{
u32 offset = 0;
if (xd->need_txlock)
int i;
u32 bytes = 0;
struct rte_mbuf **pkts = &tx_vector[tx_tail];
- for (i = 0; i < (DPDK_TX_RING_SIZE - tx_tail); i++)
+ for (i = 0; i < (xd->nb_tx_desc - tx_tail); i++)
{
struct rte_mbuf *buff = pkts[i];
bytes += rte_pktmbuf_data_len (buff);
rte_vhost_enqueue_burst (&xd->vu_vhost_dev,
offset + VIRTIO_RXQ,
&tx_vector[tx_tail],
- (uint16_t) (DPDK_TX_RING_SIZE -
+ (uint16_t) (xd->nb_tx_desc -
tx_tail));
if (PREDICT_TRUE (rv > 0))
rte_pktmbuf_free (tx_vector[tx_tail + c]);
}
- n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
+ n_retry = (rv == xd->nb_tx_desc - tx_tail) ? 1 : 0;
}
if (xd->need_txlock)
}
#endif
#if RTE_LIBRTE_KNI
- else if (xd->dev_type == VNET_DPDK_DEV_KNI)
+ else if (xd->flags & DPDK_DEVICE_FLAG_KNI)
{
if (PREDICT_TRUE (tx_head > tx_tail))
{
*/
rv = rte_kni_tx_burst (xd->kni,
&tx_vector[tx_tail],
- (uint16_t) (DPDK_TX_RING_SIZE -
- tx_tail));
+ (uint16_t) (xd->nb_tx_desc - tx_tail));
/*
* If we transmitted everything we wanted, then allow 1 retry
* so we can try to transmit the rest. If we didn't transmit
* everything, stop now.
*/
- n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0;
+ n_retry = (rv == xd->nb_tx_desc - tx_tail) ? 1 : 0;
}
}
#endif
rv = 0;
}
- if (PREDICT_FALSE (xd->dev_type != VNET_DPDK_DEV_VHOST_USER &&
+ if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_VHOST_USER) == 0 &&
xd->lockp != 0))
*xd->lockp[queue_id] = 0;
ASSERT (n_packets <= VLIB_FRAME_SIZE);
- if (PREDICT_FALSE (n_on_ring + n_packets > DPDK_TX_RING_SIZE))
+ if (PREDICT_FALSE (n_on_ring + n_packets > xd->nb_tx_desc))
{
/*
* Overflowing the ring should never happen.
from = vlib_frame_vector_args (f);
n_left = n_packets;
- i = ring->tx_head % DPDK_TX_RING_SIZE;
+ i = ring->tx_head % xd->nb_tx_desc;
while (n_left >= 4)
{
if (PREDICT_TRUE (any_clone == 0))
{
- tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
+ tx_vector[i % xd->nb_tx_desc] = mb0;
i++;
- tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
+ tx_vector[i % xd->nb_tx_desc] = mb1;
i++;
}
else
/* cloning was done, need to check for failure */
if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
{
- tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
+ tx_vector[i % xd->nb_tx_desc] = mb0;
i++;
}
if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_REPL_FAIL) == 0))
{
- tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
+ tx_vector[i % xd->nb_tx_desc] = mb1;
i++;
}
}
if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
{
- tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
+ tx_vector[i % xd->nb_tx_desc] = mb0;
i++;
}
n_left--;
dpdk_main_t *dm = &dpdk_main;
dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
- if (!xd || xd->dev_type != VNET_DPDK_DEV_VHOST_USER)
+ if (!xd || (xd->flags & DPDK_DEVICE_FLAG_VHOST_USER) == 0)
{
clib_warning
("cannot renumber non-vhost-user interface (sw_if_index: %d)",
sizeof (xd->last_cleared_xstats[0]));
#if DPDK_VHOST_USER
- if (PREDICT_FALSE (xd->dev_type == VNET_DPDK_DEV_VHOST_USER))
+ if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_VHOST_USER))
{
int i;
for (i = 0; i < xd->rx_q_used * VIRTIO_QNUM; i++)
int rv = 0;
#ifdef RTE_LIBRTE_KNI
- if (xd->dev_type == VNET_DPDK_DEV_KNI)
+ if (xd->flags & DPDK_DEVICE_FLAG_KNI)
{
if (is_up)
{
}
#endif
#if DPDK_VHOST_USER
- if (xd->dev_type == VNET_DPDK_DEV_VHOST_USER)
+ if (xd->flags & DPDK_DEVICE_FLAG_VHOST_USER)
{
if (is_up)
{
dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
int r, vlan_offload;
- u32 prev_subifs = xd->vlan_subifs;
+ u32 prev_subifs = xd->num_subifs;
+ clib_error_t *err = 0;
if (is_add)
- xd->vlan_subifs++;
- else if (xd->vlan_subifs)
- xd->vlan_subifs--;
+ xd->num_subifs++;
+ else if (xd->num_subifs)
+ xd->num_subifs--;
- if (xd->dev_type != VNET_DPDK_DEV_ETH)
- return 0;
+ if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
+ goto done;
/* currently we program VLANS only for IXGBE VF and I40E VF */
if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) && (xd->pmd != VNET_DPDK_PMD_I40EVF))
- return 0;
+ goto done;
if (t->sub.eth.flags.no_tags == 1)
- return 0;
+ goto done;
if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
{
- xd->vlan_subifs = prev_subifs;
- return clib_error_return (0, "unsupported VLAN setup");
+ xd->num_subifs = prev_subifs;
+ err = clib_error_return (0, "unsupported VLAN setup");
+ goto done;
}
vlan_offload = rte_eth_dev_get_vlan_offload (xd->device_index);
if ((r = rte_eth_dev_set_vlan_offload (xd->device_index, vlan_offload)))
{
- xd->vlan_subifs = prev_subifs;
- return clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
- xd->device_index, r);
+ xd->num_subifs = prev_subifs;
+ err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
+ xd->device_index, r);
+ goto done;
}
rte_eth_dev_vlan_filter (xd->device_index, t->sub.eth.outer_vlan_id,
is_add)))
{
- xd->vlan_subifs = prev_subifs;
- return clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
- xd->device_index, r);
+ xd->num_subifs = prev_subifs;
+ err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
+ xd->device_index, r);
+ goto done;
}
- return 0;
+done:
+ if (xd->num_subifs)
+ xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
+ else
+ xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
+
+ return err;
}
/* *INDENT-OFF* */
.rx_redirect_to_node = dpdk_set_interface_next_node,
.no_flatten_output_chains = 1,
.name_renumber = dpdk_device_renumber,
+ .mac_addr_change_function = dpdk_set_mac_address,
};
VLIB_DEVICE_TX_FUNCTION_MULTIARCH (dpdk_device_class, dpdk_interface_tx)