#undef _
};
+clib_error_t *
+dpdk_set_mac_address (vnet_hw_interface_t * hi, char * address)
+{
+ int error;
+ dpdk_main_t * dm = &dpdk_main;
+ dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
+
+ error=rte_eth_dev_default_mac_addr_set(xd->device_index,
+ (struct ether_addr *) address);
+
+ if (error) {
+ return clib_error_return (0, "mac address set failed: %d", error);
+ } else {
+ return NULL;
+ }
+}
+
+clib_error_t *
+dpdk_set_mc_filter (vnet_hw_interface_t * hi,
+ struct ether_addr mc_addr_vec[], int naddr)
+{
+ int error;
+ dpdk_main_t * dm = &dpdk_main;
+ dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
+
+ error=rte_eth_dev_set_mc_addr_list(xd->device_index, mc_addr_vec, naddr);
+
+ if (error) {
+ return clib_error_return (0, "mc addr list failed: %d", error);
+ } else {
+ return NULL;
+ }
+}
+
static struct rte_mbuf * dpdk_replicate_packet_mb (vlib_buffer_t * b)
{
vlib_main_t * vm = vlib_get_main();
unsigned socket_id = rte_socket_id();
ASSERT (bm->pktmbuf_pools[socket_id]);
- pkt_mb = ((struct rte_mbuf *)b)-1;
+ pkt_mb = rte_mbuf_from_vlib_buffer(b);
nb_segs = pkt_mb->nb_segs;
for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--)
{
dpdk_tx_dma_trace_t * t0;
struct rte_mbuf * mb;
- mb = ((struct rte_mbuf *)buffer)-1;
+ mb = rte_mbuf_from_vlib_buffer(buffer);
t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
t0->queue_index = queue_id;
#endif
if (PREDICT_TRUE(tx_head > tx_tail))
{
+ int i; u32 bytes = 0;
+ struct rte_mbuf **pkts = &tx_vector[tx_tail];
+ for (i = 0; i < (tx_head - tx_tail); i++) {
+ struct rte_mbuf *buff = pkts[i];
+ bytes += rte_pktmbuf_data_len(buff);
+ }
+
/* no wrap, transmit in one burst */
rv = rte_vhost_enqueue_burst(&xd->vu_vhost_dev, offset + VIRTIO_RXQ,
&tx_vector[tx_tail],
(uint16_t) (tx_head-tx_tail));
if (PREDICT_TRUE(rv > 0))
{
+ dpdk_vu_vring *vring = &(xd->vu_intf->vrings[offset + VIRTIO_TXQ]);
+ vring->packets += rv;
+ vring->bytes += bytes;
+
if (dpdk_vhost_user_want_interrupt(xd, offset + VIRTIO_RXQ)) {
- dpdk_vu_vring *vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
+ vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
vring->n_since_last_int += rv;
f64 now = vlib_time_now (vm);
* so we can try to transmit the rest. If we didn't transmit
* everything, stop now.
*/
+ int i; u32 bytes = 0;
+ struct rte_mbuf **pkts = &tx_vector[tx_tail];
+ for (i = 0; i < (DPDK_TX_RING_SIZE - tx_tail); i++) {
+ struct rte_mbuf *buff = pkts[i];
+ bytes += rte_pktmbuf_data_len(buff);
+ }
rv = rte_vhost_enqueue_burst(&xd->vu_vhost_dev, offset + VIRTIO_RXQ,
&tx_vector[tx_tail],
(uint16_t) (DPDK_TX_RING_SIZE - tx_tail));
if (PREDICT_TRUE(rv > 0))
{
+ dpdk_vu_vring *vring = &(xd->vu_intf->vrings[offset + VIRTIO_TXQ]);
+ vring->packets += rv;
+ vring->bytes += bytes;
+
if (dpdk_vhost_user_want_interrupt(xd, offset + VIRTIO_RXQ)) {
- dpdk_vu_vring *vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
+ vring = &(xd->vu_intf->vrings[offset + VIRTIO_RXQ]);
vring->n_since_last_int += rv;
f64 now = vlib_time_now (vm);
{
u32 bi0 = from[n_packets];
vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
- struct rte_mbuf *mb0 = ((struct rte_mbuf *)b0) - 1;
+ struct rte_mbuf *mb0 = rte_mbuf_from_vlib_buffer(b0);
rte_pktmbuf_free (mb0);
}
return n_on_ring;
pref0 = vlib_get_buffer (vm, pi0);
pref1 = vlib_get_buffer (vm, pi1);
- prefmb0 = ((struct rte_mbuf *)pref0) - 1;
- prefmb1 = ((struct rte_mbuf *)pref1) - 1;
-
+ prefmb0 = rte_mbuf_from_vlib_buffer(pref0);
+ prefmb1 = rte_mbuf_from_vlib_buffer(pref1);
+
CLIB_PREFETCH(prefmb0, CLIB_CACHE_LINE_BYTES, LOAD);
CLIB_PREFETCH(pref0, CLIB_CACHE_LINE_BYTES, LOAD);
CLIB_PREFETCH(prefmb1, CLIB_CACHE_LINE_BYTES, LOAD);
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
- mb0 = ((struct rte_mbuf *)b0) - 1;
- mb1 = ((struct rte_mbuf *)b1) - 1;
+ mb0 = rte_mbuf_from_vlib_buffer(b0);
+ mb1 = rte_mbuf_from_vlib_buffer(b1);
any_clone = b0->clone_count | b1->clone_count;
if (PREDICT_FALSE(any_clone != 0))
{
if (PREDICT_FALSE(b0->clone_count != 0))
- {
- struct rte_mbuf * mb0_new = dpdk_replicate_packet_mb (b0);
- if (PREDICT_FALSE(mb0_new == 0))
- {
- vlib_error_count (vm, node->node_index,
- DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
- b0->flags |= VLIB_BUFFER_REPL_FAIL;
- }
- else
- mb0 = mb0_new;
- vec_add1 (dm->recycle[my_cpu], bi0);
- }
+ {
+ struct rte_mbuf * mb0_new = dpdk_replicate_packet_mb (b0);
+ if (PREDICT_FALSE(mb0_new == 0))
+ {
+ vlib_error_count (vm, node->node_index,
+ DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
+ b0->flags |= VLIB_BUFFER_REPL_FAIL;
+ }
+ else
+ mb0 = mb0_new;
+ vec_add1 (dm->recycle[my_cpu], bi0);
+ }
if (PREDICT_FALSE(b1->clone_count != 0))
- {
- struct rte_mbuf * mb1_new = dpdk_replicate_packet_mb (b1);
- if (PREDICT_FALSE(mb1_new == 0))
- {
- vlib_error_count (vm, node->node_index,
- DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
- b1->flags |= VLIB_BUFFER_REPL_FAIL;
- }
- else
- mb1 = mb1_new;
- vec_add1 (dm->recycle[my_cpu], bi1);
- }
- }
+ {
+ struct rte_mbuf * mb1_new = dpdk_replicate_packet_mb (b1);
+ if (PREDICT_FALSE(mb1_new == 0))
+ {
+ vlib_error_count (vm, node->node_index,
+ DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
+ b1->flags |= VLIB_BUFFER_REPL_FAIL;
+ }
+ else
+ mb1 = mb1_new;
+ vec_add1 (dm->recycle[my_cpu], bi1);
+ }
+ }
delta0 = PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
- vlib_buffer_length_in_chain (vm, b0) - (i16) mb0->pkt_len;
+ vlib_buffer_length_in_chain (vm, b0) - (i16) mb0->pkt_len;
delta1 = PREDICT_FALSE(b1->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
- vlib_buffer_length_in_chain (vm, b1) - (i16) mb1->pkt_len;
+ vlib_buffer_length_in_chain (vm, b1) - (i16) mb1->pkt_len;
new_data_len0 = (u16)((i16) mb0->data_len + delta0);
new_data_len1 = (u16)((i16) mb1->data_len + delta1);
mb1->pkt_len = new_pkt_len1;
mb0->data_off = (PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL)) ?
- mb0->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b0->current_data);
+ mb0->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b0->current_data);
mb1->data_off = (PREDICT_FALSE(b1->flags & VLIB_BUFFER_REPL_FAIL)) ?
- mb1->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b1->current_data);
+ mb1->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b1->current_data);
if (PREDICT_FALSE(node->flags & VLIB_NODE_FLAG_TRACE))
- {
+ {
if (b0->flags & VLIB_BUFFER_IS_TRACED)
- dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
+ dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
if (b1->flags & VLIB_BUFFER_IS_TRACED)
- dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi1, b1);
- }
+ dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi1, b1);
+ }
if (PREDICT_TRUE(any_clone == 0))
{
- tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
+ tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
i++;
- tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
+ tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
i++;
}
else
/* cloning was done, need to check for failure */
if (PREDICT_TRUE((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
{
- tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
+ tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
i++;
}
if (PREDICT_TRUE((b1->flags & VLIB_BUFFER_REPL_FAIL) == 0))
{
- tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
+ tx_vector[i % DPDK_TX_RING_SIZE] = mb1;
i++;
}
}
b0 = vlib_get_buffer (vm, bi0);
- mb0 = ((struct rte_mbuf *)b0) - 1;
+ mb0 = rte_mbuf_from_vlib_buffer(b0);
if (PREDICT_FALSE(b0->clone_count != 0))
- {
- struct rte_mbuf * mb0_new = dpdk_replicate_packet_mb (b0);
- if (PREDICT_FALSE(mb0_new == 0))
- {
- vlib_error_count (vm, node->node_index,
- DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
- b0->flags |= VLIB_BUFFER_REPL_FAIL;
- }
- else
- mb0 = mb0_new;
- vec_add1 (dm->recycle[my_cpu], bi0);
- }
+ {
+ struct rte_mbuf * mb0_new = dpdk_replicate_packet_mb (b0);
+ if (PREDICT_FALSE(mb0_new == 0))
+ {
+ vlib_error_count (vm, node->node_index,
+ DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
+ b0->flags |= VLIB_BUFFER_REPL_FAIL;
+ }
+ else
+ mb0 = mb0_new;
+ vec_add1 (dm->recycle[my_cpu], bi0);
+ }
delta0 = PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL) ? 0 :
- vlib_buffer_length_in_chain (vm, b0) - (i16) mb0->pkt_len;
+ vlib_buffer_length_in_chain (vm, b0) - (i16) mb0->pkt_len;
new_data_len0 = (u16)((i16) mb0->data_len + delta0);
new_pkt_len0 = (u16)((i16) mb0->pkt_len + delta0);
mb0->data_len = new_data_len0;
mb0->pkt_len = new_pkt_len0;
mb0->data_off = (PREDICT_FALSE(b0->flags & VLIB_BUFFER_REPL_FAIL)) ?
- mb0->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b0->current_data);
+ mb0->data_off : (u16)(RTE_PKTMBUF_HEADROOM + b0->current_data);
if (PREDICT_FALSE(node->flags & VLIB_NODE_FLAG_TRACE))
- if (b0->flags & VLIB_BUFFER_IS_TRACED)
- dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
if (PREDICT_TRUE((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
{
- tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
+ tx_vector[i % DPDK_TX_RING_SIZE] = mb0;
i++;
}
n_left--;
vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index, n_packets);
vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
- n_packets);
+ n_packets);
while (n_packets--)
rte_pktmbuf_free (tx_vector[ring->tx_tail + n_packets]);
}
/* Reset head/tail to avoid unnecessary wrap */
- ring->tx_head = 0;
- ring->tx_tail = 0;
+ ring->tx_head = 0;
+ ring->tx_tail = 0;
}
/* Recycle replicated buffers */
dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
if (!xd || xd->dev_type != VNET_DPDK_DEV_VHOST_USER) {
- clib_warning("cannot renumber non-vhost-user interface (sw_if_index: %d)",
- hi->sw_if_index);
- return 0;
+ clib_warning("cannot renumber non-vhost-user interface (sw_if_index: %d)",
+ hi->sw_if_index);
+ return 0;
}
xd->vu_if_id = new_dev_instance;
*/
if (xd->admin_up != 0xff)
{
- rte_eth_stats_reset (xd->device_index);
- memset (&xd->last_stats, 0, sizeof (xd->last_stats));
+ /*
+ * Set the "last_cleared_stats" to the current stats, so that
+ * things appear to clear from a display perspective.
+ */
dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
+
+ memcpy (&xd->last_cleared_stats, &xd->stats, sizeof(xd->stats));
+ memcpy (xd->last_cleared_xstats, xd->xstats,
+ vec_len(xd->last_cleared_xstats) *
+ sizeof(xd->last_cleared_xstats[0]));
}
else
{
- rte_eth_stats_reset (xd->device_index);
- memset(&xd->stats, 0, sizeof(xd->stats));
+ /*
+ * Internally rte_eth_xstats_reset() is calling rte_eth_stats_reset(),
+ * so we're only calling xstats_reset() here.
+ */
+ rte_eth_xstats_reset (xd->device_index);
+ memset (&xd->stats, 0, sizeof(xd->stats));
memset (&xd->last_stats, 0, sizeof (xd->last_stats));
}
- rte_eth_xstats_reset(xd->device_index);
+
+ if (PREDICT_FALSE(xd->dev_type == VNET_DPDK_DEV_VHOST_USER)) {
+ int i;
+ for (i = 0; i < xd->rx_q_used * VIRTIO_QNUM; i++) {
+ xd->vu_intf->vrings[i].packets = 0;
+ xd->vu_intf->vrings[i].bytes = 0;
+ }
+ }
}
#ifdef RTE_LIBRTE_KNI
vlib_buffer_main_t * bm = vm->buffer_main;
memset(&conf, 0, sizeof(conf));
snprintf(conf.name, RTE_KNI_NAMESIZE, "vpp%u", xd->kni_port_id);
- conf.mbuf_size = MBUF_SIZE;
+ conf.mbuf_size = VLIB_BUFFER_DATA_SIZE;
memset(&ops, 0, sizeof(ops));
ops.port_id = xd->kni_port_id;
ops.change_mtu = kni_change_mtu;
if (xd->dev_type != VNET_DPDK_DEV_ETH)
return 0;
- /* currently we program VLANS only for IXGBE VF */
- if (xd->pmd != VNET_DPDK_PMD_IXGBEVF)
+
+ /* currently we program VLANS only for IXGBE VF and I40E VF */
+ if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) &&
+ (xd->pmd != VNET_DPDK_PMD_I40EVF))
return 0;
if (t->sub.eth.flags.no_tags == 1)
}
return 0; // no override
}
+
+/*
+ * Return a copy of the DPDK port stats in dest.
+ */
+clib_error_t*
+dpdk_get_hw_interface_stats (u32 hw_if_index, struct rte_eth_stats* dest)
+{
+ dpdk_main_t * dm = &dpdk_main;
+ vnet_main_t * vnm = vnet_get_main();
+ vnet_hw_interface_t * hi = vnet_get_hw_interface (vnm, hw_if_index);
+ dpdk_device_t * xd = vec_elt_at_index (dm->devices, hi->dev_instance);
+
+ if (!dest) {
+ return clib_error_return (0, "Missing or NULL argument");
+ }
+ if (!xd) {
+ return clib_error_return (0, "Unable to get DPDK device from HW interface");
+ }
+
+ dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
+
+ memcpy(dest, &xd->stats, sizeof(xd->stats));
+ return (0);
+}