X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fdpdk%2Fdevice%2Fdevice.c;h=c355edf6a4b788e183f9a74b54171f6076ed5a42;hb=548d70de6;hp=987596ead9b9f8f21072c762c7cdecd2c5fdf9a7;hpb=04f3db3847d242857b9d9d858bcdca538a1be7d7;p=vpp.git diff --git a/src/plugins/dpdk/device/device.c b/src/plugins/dpdk/device/device.c index 987596ead9b..c355edf6a4b 100644 --- a/src/plugins/dpdk/device/device.c +++ b/src/plugins/dpdk/device/device.c @@ -19,16 +19,14 @@ #include #include +#include #include - #include #include #define foreach_dpdk_tx_func_error \ _(BAD_RETVAL, "DPDK tx function returned an error") \ - _(RING_FULL, "Tx packet drops (ring full)") \ - _(PKT_DROP, "Tx packet drops (dpdk tx failure)") \ - _(REPL_FAIL, "Tx packet drops (replication failure)") + _(PKT_DROP, "Tx packet drops (dpdk tx failure)") typedef enum { @@ -38,7 +36,6 @@ typedef enum DPDK_TX_FUNC_N_ERROR, } dpdk_tx_func_error_t; -#ifndef CLIB_MULTIARCH_VARIANT static char *dpdk_tx_func_error_strings[] = { #define _(n,s) s, foreach_dpdk_tx_func_error @@ -46,78 +43,57 @@ static char *dpdk_tx_func_error_strings[] = { }; static clib_error_t * -dpdk_set_mac_address (vnet_hw_interface_t * hi, char *address) +dpdk_add_del_mac_address (vnet_hw_interface_t * hi, + const u8 * address, u8 is_add) { int error; dpdk_main_t *dm = &dpdk_main; dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance); - error = rte_eth_dev_default_mac_addr_set (xd->device_index, - (struct ether_addr *) address); + if (is_add) + error = rte_eth_dev_mac_addr_add (xd->port_id, + (struct rte_ether_addr *) address, 0); + else + error = rte_eth_dev_mac_addr_remove (xd->port_id, + (struct rte_ether_addr *) address); if (error) { - return clib_error_return (0, "mac address set failed: %d", error); - } - else - { - vec_reset_length (xd->default_mac_address); - vec_add (xd->default_mac_address, address, sizeof (address)); - return NULL; + return clib_error_return (0, "mac address add/del failed: %d", error); } + + return NULL; } -#endif -static struct rte_mbuf * -dpdk_replicate_packet_mb (vlib_buffer_t * b) +static clib_error_t * +dpdk_set_mac_address (vnet_hw_interface_t * hi, + const u8 * old_address, const u8 * address) { + int error; dpdk_main_t *dm = &dpdk_main; - struct rte_mbuf **mbufs = 0, *s, *d; - u8 nb_segs; - unsigned socket_id = rte_socket_id (); - int i; + dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance); - ASSERT (dm->pktmbuf_pools[socket_id]); - s = rte_mbuf_from_vlib_buffer (b); - nb_segs = s->nb_segs; - vec_validate (mbufs, nb_segs - 1); + error = rte_eth_dev_default_mac_addr_set (xd->port_id, (void *) address); - if (rte_pktmbuf_alloc_bulk (dm->pktmbuf_pools[socket_id], mbufs, nb_segs)) + if (error) { - vec_free (mbufs); - return 0; + return clib_error_return (0, "mac address set failed: %d", error); } - - d = mbufs[0]; - d->nb_segs = s->nb_segs; - d->data_len = s->data_len; - d->pkt_len = s->pkt_len; - d->data_off = s->data_off; - clib_memcpy (d->buf_addr, s->buf_addr, RTE_PKTMBUF_HEADROOM + s->data_len); - - for (i = 1; i < nb_segs; i++) + else { - d->next = mbufs[i]; - d = mbufs[i]; - s = s->next; - d->data_len = s->data_len; - clib_memcpy (d->buf_addr, s->buf_addr, - RTE_PKTMBUF_HEADROOM + s->data_len); + vec_reset_length (xd->default_mac_address); + vec_add (xd->default_mac_address, address, sizeof (address)); + return NULL; } - - d = mbufs[0]; - vec_free (mbufs); - return d; } static void -dpdk_tx_trace_buffer (dpdk_main_t * dm, - vlib_node_runtime_t * node, - dpdk_device_t * xd, - u16 queue_id, u32 buffer_index, vlib_buffer_t * buffer) +dpdk_tx_trace_buffer (dpdk_main_t * dm, vlib_node_runtime_t * node, + dpdk_device_t * xd, u16 queue_id, + vlib_buffer_t * buffer) { vlib_main_t *vm = vlib_get_main (); - dpdk_tx_dma_trace_t *t0; + dpdk_tx_trace_t *t0; struct rte_mbuf *mb; mb = rte_mbuf_from_vlib_buffer (buffer); @@ -125,12 +101,14 @@ dpdk_tx_trace_buffer (dpdk_main_t * dm, t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0])); t0->queue_index = queue_id; t0->device_index = xd->device_index; - t0->buffer_index = buffer_index; - clib_memcpy (&t0->mb, mb, sizeof (t0->mb)); - clib_memcpy (&t0->buffer, buffer, - sizeof (buffer[0]) - sizeof (buffer->pre_data)); - clib_memcpy (t0->buffer.pre_data, buffer->data + buffer->current_data, - sizeof (t0->buffer.pre_data)); + t0->buffer_index = vlib_get_buffer_index (vm, buffer); + clib_memcpy_fast (&t0->mb, mb, sizeof (t0->mb)); + clib_memcpy_fast (&t0->buffer, buffer, + sizeof (buffer[0]) - sizeof (buffer->pre_data)); + clib_memcpy_fast (t0->buffer.pre_data, buffer->data + buffer->current_data, + sizeof (t0->buffer.pre_data)); + clib_memcpy_fast (&t0->data, mb->buf_addr + mb->data_off, + sizeof (t0->data)); } static_always_inline void @@ -138,23 +116,13 @@ dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b, int maybe_multiseg) { struct rte_mbuf *mb, *first_mb, *last_mb; + last_mb = first_mb = mb = rte_mbuf_from_vlib_buffer (b); /* buffer is coming from non-dpdk source so we need to init rte_mbuf header */ if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0)) - { - vlib_buffer_t *b2 = b; - last_mb = mb = rte_mbuf_from_vlib_buffer (b2); - rte_pktmbuf_reset (mb); - while (maybe_multiseg && (b2->flags & VLIB_BUFFER_NEXT_PRESENT)) - { - b2 = vlib_get_buffer (vm, b2->next_buffer); - mb = rte_mbuf_from_vlib_buffer (b2); - rte_pktmbuf_reset (mb); - } - } + rte_pktmbuf_reset (mb); - last_mb = first_mb = mb = rte_mbuf_from_vlib_buffer (b); first_mb->nb_segs = 1; mb->data_len = b->current_length; mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) : @@ -165,73 +133,41 @@ dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b, { b = vlib_get_buffer (vm, b->next_buffer); mb = rte_mbuf_from_vlib_buffer (b); + if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0)) + rte_pktmbuf_reset (mb); last_mb->next = mb; last_mb = mb; mb->data_len = b->current_length; mb->pkt_len = b->current_length; mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data; first_mb->nb_segs++; - if (PREDICT_FALSE (b->n_add_refs)) - { - rte_mbuf_refcnt_update (mb, b->n_add_refs); - b->n_add_refs = 0; - } + if (PREDICT_FALSE (b->ref_count > 1)) + mb->pool = + dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index]; } } /* - * This function calls the dpdk's tx_burst function to transmit the packets - * on the tx_vector. It manages a lock per-device if the device does not + * This function calls the dpdk's tx_burst function to transmit the packets. + * It manages a lock per-device if the device does not * support multiple queues. It returns the number of packets untransmitted - * on the tx_vector. If all packets are transmitted (the normal case), the - * function returns 0. - * - * The function assumes there is at least one packet on the tx_vector. + * If all packets are transmitted (the normal case), the function returns 0. */ static_always_inline u32 tx_burst_vector_internal (vlib_main_t * vm, dpdk_device_t * xd, - struct rte_mbuf **tx_vector) + struct rte_mbuf **mb, u32 n_left) { dpdk_main_t *dm = &dpdk_main; - u32 n_packets; - u32 tx_head; - u32 tx_tail; u32 n_retry; - int rv; + int n_sent = 0; int queue_id; - tx_ring_hdr_t *ring; - - ring = vec_header (tx_vector, sizeof (*ring)); - - n_packets = ring->tx_head - ring->tx_tail; - - tx_head = ring->tx_head % xd->nb_tx_desc; - - /* - * Ensure rte_eth_tx_burst is not called with 0 packets, which can lead to - * unpredictable results. - */ - ASSERT (n_packets > 0); - - /* - * Check for tx_vector overflow. If this fails it is a system configuration - * error. The ring should be sized big enough to handle the largest un-flowed - * off burst from a traffic manager. A larger size also helps performance - * a bit because it decreases the probability of having to issue two tx_burst - * calls due to a ring wrap. - */ - ASSERT (n_packets < xd->nb_tx_desc); - ASSERT (ring->tx_tail == 0); n_retry = 16; queue_id = vm->thread_index; do { - /* start the burst at the tail */ - tx_tail = ring->tx_tail % xd->nb_tx_desc; - /* * This device only supports one TX queue, * and we're running multi-threaded... @@ -239,43 +175,27 @@ static_always_inline if (PREDICT_FALSE (xd->lockp != 0)) { queue_id = queue_id % xd->tx_q_used; - while (__sync_lock_test_and_set (xd->lockp[queue_id], 1)) + while (clib_atomic_test_and_set (xd->lockp[queue_id])) /* zzzz */ queue_id = (queue_id + 1) % xd->tx_q_used; } - if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_HQOS)) /* HQoS ON */ + if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD)) { /* no wrap, transmit in one burst */ - dpdk_device_hqos_per_worker_thread_t *hqos = - &xd->hqos_wt[vm->thread_index]; - - ASSERT (hqos->swq != NULL); - - dpdk_hqos_metadata_set (hqos, - &tx_vector[tx_tail], tx_head - tx_tail); - rv = rte_ring_sp_enqueue_burst (hqos->swq, - (void **) &tx_vector[tx_tail], - (uint16_t) (tx_head - tx_tail), 0); - } - else if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD)) - { - /* no wrap, transmit in one burst */ - rv = rte_eth_tx_burst (xd->device_index, - (uint16_t) queue_id, - &tx_vector[tx_tail], - (uint16_t) (tx_head - tx_tail)); + n_sent = rte_eth_tx_burst (xd->port_id, queue_id, mb, n_left); + n_retry--; } else { ASSERT (0); - rv = 0; + n_sent = 0; } if (PREDICT_FALSE (xd->lockp != 0)) - *xd->lockp[queue_id] = 0; + clib_atomic_release (xd->lockp[queue_id]); - if (PREDICT_FALSE (rv < 0)) + if (PREDICT_FALSE (n_sent < 0)) { // emit non-fatal message, bump counter vnet_main_t *vnm = dm->vnet_main; @@ -286,53 +206,26 @@ static_always_inline xd->hw_if_index)->tx_node_index; vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1); - clib_warning ("rte_eth_tx_burst[%d]: error %d", xd->device_index, - rv); - return n_packets; // untransmitted packets + clib_warning ("rte_eth_tx_burst[%d]: error %d", + xd->port_id, n_sent); + return n_left; // untransmitted packets } - ring->tx_tail += (u16) rv; - n_packets -= (uint16_t) rv; + n_left -= n_sent; + mb += n_sent; } - while (rv && n_packets && (n_retry > 0)); + while (n_sent && n_left && (n_retry > 0)); - return n_packets; + return n_left; } static_always_inline void -dpdk_prefetch_buffer_by_index (vlib_main_t * vm, u32 bi) +dpdk_prefetch_buffer (vlib_main_t * vm, struct rte_mbuf *mb) { - vlib_buffer_t *b; - struct rte_mbuf *mb; - b = vlib_get_buffer (vm, bi); - mb = rte_mbuf_from_vlib_buffer (b); - CLIB_PREFETCH (mb, 2 * CLIB_CACHE_LINE_BYTES, STORE); + vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb); + CLIB_PREFETCH (mb, sizeof (struct rte_mbuf), STORE); CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD); } -static_always_inline void -dpdk_buffer_recycle (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_buffer_t * b, u32 bi, struct rte_mbuf **mbp) -{ - dpdk_main_t *dm = &dpdk_main; - u32 my_cpu = vm->thread_index; - struct rte_mbuf *mb_new; - - if (PREDICT_FALSE (b->flags & VLIB_BUFFER_RECYCLE) == 0) - return; - - mb_new = dpdk_replicate_packet_mb (b); - if (PREDICT_FALSE (mb_new == 0)) - { - vlib_error_count (vm, node->node_index, - DPDK_TX_FUNC_ERROR_REPL_FAIL, 1); - b->flags |= VLIB_BUFFER_REPL_FAIL; - } - else - *mbp = mb_new; - - vec_add1 (dm->recycle[my_cpu], bi); -} - static_always_inline void dpdk_buffer_tx_offload (dpdk_device_t * xd, vlib_buffer_t * b, struct rte_mbuf *mb) @@ -341,10 +234,11 @@ dpdk_buffer_tx_offload (dpdk_device_t * xd, vlib_buffer_t * b, u32 tcp_cksum = b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; u32 udp_cksum = b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM; int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4; + u32 tso = b->flags & VNET_BUFFER_F_GSO; u64 ol_flags; /* Is there any work for us? */ - if (PREDICT_TRUE ((ip_cksum | tcp_cksum | udp_cksum) == 0)) + if (PREDICT_TRUE ((ip_cksum | tcp_cksum | udp_cksum | tso) == 0)) return; mb->l2_len = vnet_buffer (b)->l3_hdr_offset - b->current_data; @@ -356,6 +250,14 @@ dpdk_buffer_tx_offload (dpdk_device_t * xd, vlib_buffer_t * b, ol_flags |= ip_cksum ? PKT_TX_IP_CKSUM : 0; ol_flags |= tcp_cksum ? PKT_TX_TCP_CKSUM : 0; ol_flags |= udp_cksum ? PKT_TX_UDP_CKSUM : 0; + ol_flags |= tso ? (tcp_cksum ? PKT_TX_TCP_SEG : PKT_TX_UDP_SEG) : 0; + + if (tso) + { + mb->l4_len = vnet_buffer2 (b)->gso_l4_hdr_sz; + mb->tso_segsz = vnet_buffer2 (b)->gso_size; + } + mb->ol_flags |= ol_flags; /* we are trying to help compiler here by using local ol_flags with known @@ -366,293 +268,211 @@ dpdk_buffer_tx_offload (dpdk_device_t * xd, vlib_buffer_t * b, /* * Transmits the packets on the frame to the interface associated with the - * node. It first copies packets on the frame to a tx_vector containing the - * rte_mbuf pointers. It then passes this vector to tx_burst_vector_internal - * which calls the dpdk tx_burst function. + * node. It first copies packets on the frame to a per-thread arrays + * containing the rte_mbuf pointers. */ -uword -CLIB_MULTIARCH_FN (dpdk_interface_tx) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * f) +VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * f) { dpdk_main_t *dm = &dpdk_main; vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance); u32 n_packets = f->n_vectors; u32 n_left; - u32 *from; - struct rte_mbuf **tx_vector; - u16 i; - u16 nb_tx_desc = xd->nb_tx_desc; - int queue_id; - u32 my_cpu; - u32 tx_pkts = 0; - tx_ring_hdr_t *ring; - u32 n_on_ring; - - my_cpu = vm->thread_index; - - queue_id = my_cpu; - - tx_vector = xd->tx_vectors[queue_id]; - ring = vec_header (tx_vector, sizeof (*ring)); - - n_on_ring = ring->tx_head - ring->tx_tail; - from = vlib_frame_vector_args (f); + u32 thread_index = vm->thread_index; + int queue_id = thread_index; + u32 tx_pkts = 0, all_or_flags = 0; + dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data, + thread_index); + struct rte_mbuf **mb; + vlib_buffer_t *b[4]; ASSERT (n_packets <= VLIB_FRAME_SIZE); - if (PREDICT_FALSE (n_on_ring + n_packets > nb_tx_desc)) - { - /* - * Overflowing the ring should never happen. - * If it does then drop the whole frame. - */ - vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_RING_FULL, - n_packets); - - while (n_packets--) - { - u32 bi0 = from[n_packets]; - vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); - struct rte_mbuf *mb0 = rte_mbuf_from_vlib_buffer (b0); - rte_pktmbuf_free (mb0); - } - return n_on_ring; - } - - if (PREDICT_FALSE (dm->tx_pcap_enable)) - { - n_left = n_packets; - while (n_left > 0) - { - u32 bi0 = from[0]; - vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); - if (dm->pcap_sw_if_index == 0 || - dm->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_TX]) - pcap_add_buffer (&dm->pcap_main, vm, bi0, 512); - from++; - n_left--; - } - } + /* calculate rte_mbuf pointers out of buffer indices */ + vlib_get_buffers_with_offset (vm, vlib_frame_vector_args (f), + (void **) ptd->mbufs, n_packets, + -(i32) sizeof (struct rte_mbuf)); - from = vlib_frame_vector_args (f); n_left = n_packets; - i = ring->tx_head % nb_tx_desc; + mb = ptd->mbufs; +#if (CLIB_N_PREFETCHES >= 8) while (n_left >= 8) { - u32 bi0, bi1, bi2, bi3; - struct rte_mbuf *mb0, *mb1, *mb2, *mb3; - vlib_buffer_t *b0, *b1, *b2, *b3; u32 or_flags; - dpdk_prefetch_buffer_by_index (vm, from[4]); - dpdk_prefetch_buffer_by_index (vm, from[5]); - dpdk_prefetch_buffer_by_index (vm, from[6]); - dpdk_prefetch_buffer_by_index (vm, from[7]); + dpdk_prefetch_buffer (vm, mb[4]); + dpdk_prefetch_buffer (vm, mb[5]); + dpdk_prefetch_buffer (vm, mb[6]); + dpdk_prefetch_buffer (vm, mb[7]); - bi0 = from[0]; - bi1 = from[1]; - bi2 = from[2]; - bi3 = from[3]; - from += 4; + b[0] = vlib_buffer_from_rte_mbuf (mb[0]); + b[1] = vlib_buffer_from_rte_mbuf (mb[1]); + b[2] = vlib_buffer_from_rte_mbuf (mb[2]); + b[3] = vlib_buffer_from_rte_mbuf (mb[3]); - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); - b2 = vlib_get_buffer (vm, bi2); - b3 = vlib_get_buffer (vm, bi3); + or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags; + all_or_flags |= or_flags; - or_flags = b0->flags | b1->flags | b2->flags | b3->flags; - - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3); + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]); + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[2]); + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[3]); if (or_flags & VLIB_BUFFER_NEXT_PRESENT) { - dpdk_validate_rte_mbuf (vm, b0, 1); - dpdk_validate_rte_mbuf (vm, b1, 1); - dpdk_validate_rte_mbuf (vm, b2, 1); - dpdk_validate_rte_mbuf (vm, b3, 1); + dpdk_validate_rte_mbuf (vm, b[0], 1); + dpdk_validate_rte_mbuf (vm, b[1], 1); + dpdk_validate_rte_mbuf (vm, b[2], 1); + dpdk_validate_rte_mbuf (vm, b[3], 1); } else { - dpdk_validate_rte_mbuf (vm, b0, 0); - dpdk_validate_rte_mbuf (vm, b1, 0); - dpdk_validate_rte_mbuf (vm, b2, 0); - dpdk_validate_rte_mbuf (vm, b3, 0); + dpdk_validate_rte_mbuf (vm, b[0], 0); + dpdk_validate_rte_mbuf (vm, b[1], 0); + dpdk_validate_rte_mbuf (vm, b[2], 0); + dpdk_validate_rte_mbuf (vm, b[3], 0); } - mb0 = rte_mbuf_from_vlib_buffer (b0); - mb1 = rte_mbuf_from_vlib_buffer (b1); - mb2 = rte_mbuf_from_vlib_buffer (b2); - mb3 = rte_mbuf_from_vlib_buffer (b3); - if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) && (or_flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)))) { - dpdk_buffer_tx_offload (xd, b0, mb0); - dpdk_buffer_tx_offload (xd, b1, mb1); - dpdk_buffer_tx_offload (xd, b2, mb2); - dpdk_buffer_tx_offload (xd, b3, mb3); + dpdk_buffer_tx_offload (xd, b[0], mb[0]); + dpdk_buffer_tx_offload (xd, b[1], mb[1]); + dpdk_buffer_tx_offload (xd, b[2], mb[2]); + dpdk_buffer_tx_offload (xd, b[3], mb[3]); + } + + if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) + { + if (b[0]->flags & VLIB_BUFFER_IS_TRACED) + dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]); + if (b[1]->flags & VLIB_BUFFER_IS_TRACED) + dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]); + if (b[2]->flags & VLIB_BUFFER_IS_TRACED) + dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[2]); + if (b[3]->flags & VLIB_BUFFER_IS_TRACED) + dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[3]); } - if (PREDICT_FALSE (or_flags & VLIB_BUFFER_RECYCLE)) + mb += 4; + n_left -= 4; + } +#elif (CLIB_N_PREFETCHES >= 4) + while (n_left >= 4) + { + vlib_buffer_t *b2, *b3; + u32 or_flags; + + CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, STORE); + b2 = vlib_buffer_from_rte_mbuf (mb[2]); + CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, LOAD); + b3 = vlib_buffer_from_rte_mbuf (mb[3]); + CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, LOAD); + + b[0] = vlib_buffer_from_rte_mbuf (mb[0]); + b[1] = vlib_buffer_from_rte_mbuf (mb[1]); + + or_flags = b[0]->flags | b[1]->flags; + all_or_flags |= or_flags; + + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]); + + if (or_flags & VLIB_BUFFER_NEXT_PRESENT) { - dpdk_buffer_recycle (vm, node, b0, bi0, &mb0); - dpdk_buffer_recycle (vm, node, b1, bi1, &mb1); - dpdk_buffer_recycle (vm, node, b2, bi2, &mb2); - dpdk_buffer_recycle (vm, node, b3, bi3, &mb3); - - /* dont enqueue packets if replication failed as they must - be sent back to recycle */ - if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0)) - tx_vector[i++ % nb_tx_desc] = mb0; - if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_REPL_FAIL) == 0)) - tx_vector[i++ % nb_tx_desc] = mb1; - if (PREDICT_TRUE ((b2->flags & VLIB_BUFFER_REPL_FAIL) == 0)) - tx_vector[i++ % nb_tx_desc] = mb2; - if (PREDICT_TRUE ((b3->flags & VLIB_BUFFER_REPL_FAIL) == 0)) - tx_vector[i++ % nb_tx_desc] = mb3; + dpdk_validate_rte_mbuf (vm, b[0], 1); + dpdk_validate_rte_mbuf (vm, b[1], 1); } else { - if (PREDICT_FALSE (i + 3 >= nb_tx_desc)) - { - tx_vector[i++ % nb_tx_desc] = mb0; - tx_vector[i++ % nb_tx_desc] = mb1; - tx_vector[i++ % nb_tx_desc] = mb2; - tx_vector[i++ % nb_tx_desc] = mb3; - i %= nb_tx_desc; - } - else - { - tx_vector[i++] = mb0; - tx_vector[i++] = mb1; - tx_vector[i++] = mb2; - tx_vector[i++] = mb3; - } + dpdk_validate_rte_mbuf (vm, b[0], 0); + dpdk_validate_rte_mbuf (vm, b[1], 0); } + if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) && + (or_flags & + (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM + | VNET_BUFFER_F_OFFLOAD_IP_CKSUM + | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)))) + { + dpdk_buffer_tx_offload (xd, b[0], mb[0]); + dpdk_buffer_tx_offload (xd, b[1], mb[1]); + } if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) { - if (b0->flags & VLIB_BUFFER_IS_TRACED) - dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0); - if (b1->flags & VLIB_BUFFER_IS_TRACED) - dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi1, b1); - if (b2->flags & VLIB_BUFFER_IS_TRACED) - dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi2, b2); - if (b3->flags & VLIB_BUFFER_IS_TRACED) - dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi3, b3); + if (b[0]->flags & VLIB_BUFFER_IS_TRACED) + dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]); + if (b[1]->flags & VLIB_BUFFER_IS_TRACED) + dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]); } - n_left -= 4; + mb += 2; + n_left -= 2; } +#endif + while (n_left > 0) { - u32 bi0; - struct rte_mbuf *mb0; - vlib_buffer_t *b0; - - bi0 = from[0]; - from++; + b[0] = vlib_buffer_from_rte_mbuf (mb[0]); + all_or_flags |= b[0]->flags; + VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); - b0 = vlib_get_buffer (vm, bi0); - VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0); - - dpdk_validate_rte_mbuf (vm, b0, 1); - - mb0 = rte_mbuf_from_vlib_buffer (b0); - dpdk_buffer_tx_offload (xd, b0, mb0); - dpdk_buffer_recycle (vm, node, b0, bi0, &mb0); + dpdk_validate_rte_mbuf (vm, b[0], 1); + dpdk_buffer_tx_offload (xd, b[0], mb[0]); if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) - if (b0->flags & VLIB_BUFFER_IS_TRACED) - dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0); + if (b[0]->flags & VLIB_BUFFER_IS_TRACED) + dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]); - if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0)) - { - tx_vector[i % nb_tx_desc] = mb0; - i++; - } + mb++; n_left--; } - /* account for additional packets in the ring */ - ring->tx_head += n_packets; - n_on_ring = ring->tx_head - ring->tx_tail; - /* transmit as many packets as possible */ - n_packets = tx_burst_vector_internal (vm, xd, tx_vector); - - /* - * tx_pkts is the number of packets successfully transmitted - * This is the number originally on ring minus the number remaining on ring - */ - tx_pkts = n_on_ring - n_packets; + tx_pkts = n_packets = mb - ptd->mbufs; + n_left = tx_burst_vector_internal (vm, xd, ptd->mbufs, n_packets); { /* If there is no callback then drop any non-transmitted packets */ - if (PREDICT_FALSE (n_packets)) + if (PREDICT_FALSE (n_left)) { + tx_pkts -= n_left; vlib_simple_counter_main_t *cm; vnet_main_t *vnm = vnet_get_main (); cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_TX_ERROR); - vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index, - n_packets); + vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index, + n_left); vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP, - n_packets); + n_left); - while (n_packets--) - rte_pktmbuf_free (tx_vector[ring->tx_tail + n_packets]); + while (n_left--) + rte_pktmbuf_free (ptd->mbufs[n_packets - n_left - 1]); } - - /* Reset head/tail to avoid unnecessary wrap */ - ring->tx_head = 0; - ring->tx_tail = 0; } - /* Recycle replicated buffers */ - if (PREDICT_FALSE (vec_len (dm->recycle[my_cpu]))) - { - vlib_buffer_free (vm, dm->recycle[my_cpu], - vec_len (dm->recycle[my_cpu])); - _vec_len (dm->recycle[my_cpu]) = 0; - } - - ASSERT (ring->tx_head >= ring->tx_tail); - return tx_pkts; } -#ifndef CLIB_MULTIARCH_VARIANT static void dpdk_clear_hw_interface_counters (u32 instance) { dpdk_main_t *dm = &dpdk_main; dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance); - /* - * Set the "last_cleared_stats" to the current stats, so that - * things appear to clear from a display perspective. - */ - dpdk_update_counters (xd, vlib_time_now (dm->vlib_main)); - - clib_memcpy (&xd->last_cleared_stats, &xd->stats, sizeof (xd->stats)); - clib_memcpy (xd->last_cleared_xstats, xd->xstats, - vec_len (xd->last_cleared_xstats) * - sizeof (xd->last_cleared_xstats[0])); - + rte_eth_stats_reset (xd->port_id); + rte_eth_xstats_reset (xd->port_id); } static clib_error_t * @@ -668,8 +488,6 @@ dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) if (is_up) { - vnet_hw_interface_set_flags (vnm, xd->hw_if_index, - VNET_HW_INTERFACE_FLAG_LINK_UP); if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0) dpdk_device_start (xd); xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP; @@ -747,25 +565,25 @@ dpdk_subif_add_del_function (vnet_main_t * vnm, goto done; } - vlan_offload = rte_eth_dev_get_vlan_offload (xd->device_index); + vlan_offload = rte_eth_dev_get_vlan_offload (xd->port_id); vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; - if ((r = rte_eth_dev_set_vlan_offload (xd->device_index, vlan_offload))) + if ((r = rte_eth_dev_set_vlan_offload (xd->port_id, vlan_offload))) { xd->num_subifs = prev_subifs; err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d", - xd->device_index, r); + xd->port_id, r); goto done; } if ((r = - rte_eth_dev_vlan_filter (xd->device_index, t->sub.eth.outer_vlan_id, - is_add))) + rte_eth_dev_vlan_filter (xd->port_id, + t->sub.eth.outer_vlan_id, is_add))) { xd->num_subifs = prev_subifs; err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d", - xd->device_index, r); + xd->port_id, r); goto done; } @@ -781,38 +599,25 @@ done: /* *INDENT-OFF* */ VNET_DEVICE_CLASS (dpdk_device_class) = { .name = "dpdk", - .tx_function = dpdk_interface_tx, .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR, .tx_function_error_strings = dpdk_tx_func_error_strings, .format_device_name = format_dpdk_device_name, .format_device = format_dpdk_device, - .format_tx_trace = format_dpdk_tx_dma_trace, + .format_tx_trace = format_dpdk_tx_trace, .clear_counters = dpdk_clear_hw_interface_counters, .admin_up_down_function = dpdk_interface_admin_up_down, .subif_add_del_function = dpdk_subif_add_del_function, .rx_redirect_to_node = dpdk_set_interface_next_node, .mac_addr_change_function = dpdk_set_mac_address, + .mac_addr_add_del_function = dpdk_add_del_mac_address, + .format_flow = format_dpdk_flow, + .flow_ops_function = dpdk_flow_ops_fn, }; /* *INDENT-ON* */ -#if __x86_64__ -vlib_node_function_t __clib_weak dpdk_interface_tx_avx512; -vlib_node_function_t __clib_weak dpdk_interface_tx_avx2; -static void __clib_constructor -dpdk_interface_tx_multiarch_select (void) -{ - if (dpdk_interface_tx_avx512 && clib_cpu_supports_avx512f ()) - dpdk_device_class.tx_function = dpdk_interface_tx_avx512; - else if (dpdk_interface_tx_avx2 && clib_cpu_supports_avx2 ()) - dpdk_device_class.tx_function = dpdk_interface_tx_avx2; -} -#endif -#endif - #define UP_DOWN_FLAG_EVENT 1 -#ifndef CLIB_MULTIARCH_VARIANT -uword +static uword admin_up_down_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) { @@ -856,14 +661,13 @@ admin_up_down_process (vlib_main_t * vm, } /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (admin_up_down_process_node,static) = { +VLIB_REGISTER_NODE (admin_up_down_process_node) = { .function = admin_up_down_process, .type = VLIB_NODE_TYPE_PROCESS, .name = "admin-up-down-process", .process_log2_n_stack_bytes = 17, // 256KB }; /* *INDENT-ON* */ -#endif /* * fd.io coding-style-patch-verification: ON