X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fdevices%2Fvirtio%2Fvhost_user_output.c;h=797c1c5ff929d355104251df1234571a3c7b15bb;hb=4208a4ce8;hp=b41583708b3e03c6c2b94c2e6a769c8421025f0b;hpb=067cd6229a47ea3ba8b59a2a04090e80afb5bd2c;p=vpp.git diff --git a/src/vnet/devices/virtio/vhost_user_output.c b/src/vnet/devices/virtio/vhost_user_output.c index b41583708b3..797c1c5ff92 100644 --- a/src/vnet/devices/virtio/vhost_user_output.c +++ b/src/vnet/devices/virtio/vhost_user_output.c @@ -17,6 +17,7 @@ *------------------------------------------------------------------ */ +#include #include /* for open */ #include #include @@ -39,6 +40,7 @@ #include #include +#include #include #include @@ -53,7 +55,7 @@ */ #define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 40) -vnet_device_class_t vhost_user_device_class; +extern vnet_device_class_t vhost_user_device_class; #define foreach_vhost_user_tx_func_error \ _(NONE, "no error") \ @@ -100,14 +102,17 @@ vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance) { // FIXME: check if the new dev instance is already used vhost_user_main_t *vum = &vhost_user_main; + vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces, + hi->dev_instance); + vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance, hi->dev_instance, ~0); vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] = new_dev_instance; - DBG_SOCK ("renumbered vhost-user interface dev_instance %d to %d", - hi->dev_instance, new_dev_instance); + vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d", + hi->dev_instance, new_dev_instance); return 0; } @@ -119,7 +124,7 @@ vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance) static_always_inline int vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid) { - return __sync_lock_test_and_set (vui->vring_locks[qid], 1); + return clib_atomic_test_and_set (vui->vring_locks[qid]); } /** @@ -138,7 +143,7 @@ vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid) static_always_inline void vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid) { - *vui->vring_locks[qid] = 0; + clib_atomic_release (vui->vring_locks[qid]); } static_always_inline void @@ -152,7 +157,7 @@ vhost_user_tx_trace (vhost_trace_t * t, vring_desc_t *hdr_desc = 0; u32 hint = 0; - memset (t, 0, sizeof (*t)); + clib_memset (t, 0, sizeof (*t)); t->device_index = vui - vum->vhost_user_interfaces; t->qid = qid; @@ -202,8 +207,8 @@ vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy, CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD); CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD); - clib_memcpy (dst0, (void *) cpy[0].src, cpy[0].len); - clib_memcpy (dst1, (void *) cpy[1].src, cpy[1].len); + clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len); + clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len); vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1); vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1); @@ -215,7 +220,7 @@ vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy, { if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint)))) return 1; - clib_memcpy (dst0, (void *) cpy->src, cpy->len); + clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len); vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1); copy_len -= 1; cpy += 1; @@ -223,13 +228,56 @@ vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy, return 0; } +static_always_inline void +vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b, + virtio_net_hdr_t * hdr) +{ + /* checksum offload */ + if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) + { + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + hdr->csum_start = vnet_buffer (b)->l4_hdr_offset; + hdr->csum_offset = offsetof (udp_header_t, checksum); + } + else if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM) + { + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + hdr->csum_start = vnet_buffer (b)->l4_hdr_offset; + hdr->csum_offset = offsetof (tcp_header_t, checksum); + } -uword -CLIB_MULTIARCH_FN (vhost_user_tx) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) + /* GSO offload */ + if (b->flags & VNET_BUFFER_F_GSO) + { + if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM) + { + if ((b->flags & VNET_BUFFER_F_IS_IP4) && + (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO4))) + { + hdr->gso_size = vnet_buffer2 (b)->gso_size; + hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; + } + else if ((b->flags & VNET_BUFFER_F_IS_IP6) && + (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO6))) + { + hdr->gso_size = vnet_buffer2 (b)->gso_size; + hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + } + } + else if ((vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_UFO)) && + (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)) + { + hdr->gso_size = vnet_buffer2 (b)->gso_size; + hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; + } + } +} + +VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm, + vlib_node_runtime_t * + node, vlib_frame_t * frame) { - u32 *buffers = vlib_frame_args (frame); + u32 *buffers = vlib_frame_vector_args (frame); u32 n_left = frame->n_vectors; vhost_user_main_t *vum = &vhost_user_main; vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; @@ -239,6 +287,7 @@ CLIB_MULTIARCH_FN (vhost_user_tx) (vlib_main_t * vm, vhost_user_vring_t *rxvq; u8 error; u32 thread_index = vm->thread_index; + vhost_cpu_t *cpu = &vum->cpus[thread_index]; u32 map_hint = 0; u8 retry = 8; u16 copy_len; @@ -250,16 +299,21 @@ CLIB_MULTIARCH_FN (vhost_user_tx) (vlib_main_t * vm, goto done3; } - if (PREDICT_FALSE (!vui->is_up)) + if (PREDICT_FALSE (!vui->is_ready)) { error = VHOST_USER_TX_FUNC_ERROR_NOT_READY; goto done3; } - qid = - VHOST_VRING_IDX_RX (*vec_elt_at_index - (vui->per_cpu_tx_qid, thread_index)); + qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid, + thread_index)); rxvq = &vui->vrings[qid]; + if (PREDICT_FALSE (rxvq->avail == 0)) + { + error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL; + goto done3; + } + if (PREDICT_FALSE (vui->use_tx_spinlock)) vhost_user_vring_lock (vui, qid); @@ -283,11 +337,9 @@ retry: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - vum->cpus[thread_index].current_trace = - vlib_add_trace (vm, node, b0, - sizeof (*vum->cpus[thread_index].current_trace)); - vhost_user_tx_trace (vum->cpus[thread_index].current_trace, - vui, qid / 2, b0, rxvq); + cpu->current_trace = vlib_add_trace (vm, node, b0, + sizeof (*cpu->current_trace)); + vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq); } if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx)) @@ -327,15 +379,18 @@ retry: { // Get a header from the header array - virtio_net_hdr_mrg_rxbuf_t *hdr = - &vum->cpus[thread_index].tx_headers[tx_headers_len]; + virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len]; tx_headers_len++; hdr->hdr.flags = 0; - hdr->hdr.gso_type = 0; + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; hdr->num_buffers = 1; //This is local, no need to check + /* Guest supports csum offload? */ + if (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM)) + vhost_user_handle_tx_offload (vui, b0, &hdr->hdr); + // Prepare a copy order executed later for the header - vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len]; + vhost_copy_t *cpy = &cpu->copy[copy_len]; copy_len++; cpy->len = vui->virtio_net_hdr_sz; cpy->dst = buffer_map_addr; @@ -360,7 +415,7 @@ retry: else if (vui->virtio_net_hdr_sz == 12) //MRG is available { virtio_net_hdr_mrg_rxbuf_t *hdr = - &vum->cpus[thread_index].tx_headers[tx_headers_len - 1]; + &cpu->tx_headers[tx_headers_len - 1]; //Move from available to used buffer rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = @@ -422,7 +477,7 @@ retry: } { - vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len]; + vhost_copy_t *cpy = &cpu->copy[copy_len]; copy_len++; cpy->len = bytes_left; cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len; @@ -465,21 +520,19 @@ retry: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - vum->cpus[thread_index].current_trace->hdr = - vum->cpus[thread_index].tx_headers[tx_headers_len - 1]; + cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1]; } n_left--; //At the end for error counting when 'goto done' is invoked /* * Do the copy periodically to prevent - * vum->cpus[thread_index].copy array overflow and corrupt memory + * cpu->copy array overflow and corrupt memory */ if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD)) { - if (PREDICT_FALSE - (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy, - copy_len, &map_hint))) + if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len, + &map_hint))) { vlib_error_count (vm, node->node_index, VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1); @@ -496,9 +549,8 @@ retry: done: //Do the memory copies - if (PREDICT_FALSE - (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy, - copy_len, &map_hint))) + if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len, + &map_hint))) { vlib_error_count (vm, node->node_index, VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1); @@ -548,7 +600,7 @@ done3: thread_index, vui->sw_if_index, n_left); } - vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors); + vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors); return frame->n_vectors; } @@ -607,8 +659,8 @@ vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, txvq->used->flags = 0; else { - clib_warning ("BUG: unhandled mode %d changed for if %d queue %d", mode, - hw_if_index, qid); + vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode, + hw_if_index, qid); return clib_error_return (0, "unsupported"); } @@ -623,20 +675,24 @@ vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, vhost_user_main_t *vum = &vhost_user_main; vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance); - u32 hw_flags = 0; + u8 link_old, link_new; + + link_old = vui_is_link_up (vui); + vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0; - hw_flags = vui->admin_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0; - vnet_hw_interface_set_flags (vnm, vui->hw_if_index, hw_flags); + link_new = vui_is_link_up (vui); + + if (link_old != link_new) + vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ? + VNET_HW_INTERFACE_FLAG_LINK_UP : 0); return /* no error */ 0; } -#ifndef CLIB_MARCH_VARIANT /* *INDENT-OFF* */ VNET_DEVICE_CLASS (vhost_user_device_class) = { .name = "vhost-user", - .tx_function = vhost_user_tx, .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR, .tx_function_error_strings = vhost_user_tx_func_error_strings, .format_device_name = format_vhost_user_interface_name, @@ -646,20 +702,6 @@ VNET_DEVICE_CLASS (vhost_user_device_class) = { .format_tx_trace = format_vhost_trace, }; -#if __x86_64__ -vlib_node_function_t __clib_weak vhost_user_tx_avx512; -vlib_node_function_t __clib_weak vhost_user_tx_avx2; -static void __clib_constructor -vhost_user_tx_multiarch_select (void) -{ - if (vhost_user_tx_avx512 && clib_cpu_supports_avx512f ()) - vhost_user_device_class.tx_function = vhost_user_tx_avx512; - else if (vhost_user_tx_avx2 && clib_cpu_supports_avx2 ()) - vhost_user_device_class.tx_function = vhost_user_tx_avx2; -} -#endif -#endif - /* *INDENT-ON* */ /*