X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fmemif%2Fmemif.c;h=80cd9026c83f61c44bdbc68d44df01b30d3ee9ea;hb=refs%2Fchanges%2F40%2F29640%2F12;hp=05a7f83b3b112e43e8265106fb843713cf4e28cb;hpb=00b2d74d1f58b9357e8d955ad7410fb608490904;p=vpp.git diff --git a/src/plugins/memif/memif.c b/src/plugins/memif/memif.c index 05a7f83b3b1..80cd9026c83 100644 --- a/src/plugins/memif/memif.c +++ b/src/plugins/memif/memif.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -138,12 +139,6 @@ memif_disconnect (memif_if_t * mif, clib_error_t * err) mq = vec_elt_at_index (mif->rx_queues, i); if (mq->ring) { - int rv; - rv = vnet_hw_interface_unassign_rx_thread (vnm, mif->hw_if_index, i); - if (rv) - memif_log_warn (mif, - "Unable to unassign interface %d, queue %d: rc=%d", - mif->hw_if_index, i, rv); if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY) { memif_disconnect_free_zc_queue_buffer(mq, 1); @@ -151,6 +146,8 @@ memif_disconnect (memif_if_t * mif, clib_error_t * err) mq->ring = 0; } } + vnet_hw_if_unregister_all_rx_queues (vnm, mif->hw_if_index); + vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index); /* *INDENT-OFF* */ vec_foreach_index (i, mif->tx_queues) @@ -193,6 +190,17 @@ memif_disconnect (memif_if_t * mif, clib_error_t * err) clib_fifo_free (mif->msg_queue); } +static clib_error_t * +memif_int_fd_write_ready (clib_file_t * uf) +{ + memif_main_t *mm = &memif_main; + u16 qid = uf->private_data & 0xFFFF; + memif_if_t *mif = vec_elt_at_index (mm->interfaces, uf->private_data >> 16); + + memif_log_warn (mif, "unexpected EPOLLOUT on RX for queue %u", qid); + return 0; +} + static clib_error_t * memif_int_fd_read_ready (clib_file_t * uf) { @@ -207,11 +215,11 @@ memif_int_fd_read_ready (clib_file_t * uf) size = read (uf->file_descriptor, &b, sizeof (b)); if (size < 0) { - memif_log_debug (mif, "Failed to read form socket"); + memif_log_debug (mif, "Failed to read from socket"); return 0; } - vnet_device_input_set_interrupt_pending (vnm, mif->hw_if_index, qid); + vnet_hw_if_rx_queue_set_int_pending (vnm, mq->queue_index); mq->int_count++; return 0; @@ -255,6 +263,7 @@ memif_connect (memif_if_t * mif) /* *INDENT-ON* */ template.read_function = memif_int_fd_read_ready; + template.write_function = memif_int_fd_write_ready; /* *INDENT-OFF* */ vec_foreach_index (i, mif->tx_queues) @@ -273,6 +282,7 @@ memif_connect (memif_if_t * mif) { memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, i); u32 ti; + u32 qi; int rv; mq->ring = mif->regions[mq->region].shm + mq->offset; @@ -281,7 +291,9 @@ memif_connect (memif_if_t * mif) err = clib_error_return (0, "wrong cookie on tx ring %u", i); goto error; } - + qi = vnet_hw_if_register_rx_queue (vnm, mif->hw_if_index, i, + VNET_HW_IF_RXQ_THREAD_ANY); + mq->queue_index = qi; if (mq->int_fd > -1) { template.file_descriptor = mq->int_fd; @@ -290,26 +302,27 @@ memif_connect (memif_if_t * mif) format_memif_device_name, mif->dev_instance, i); memif_file_add (&mq->int_clib_file_index, &template); + vnet_hw_if_set_rx_queue_file_index (vnm, qi, + mq->int_clib_file_index); } - vnet_hw_interface_assign_rx_thread (vnm, mif->hw_if_index, i, ~0); - ti = vnet_get_device_input_thread_index (vnm, mif->hw_if_index, i); + ti = vnet_hw_if_get_rx_queue_thread_index (vnm, qi); mq->buffer_pool_index = vlib_buffer_pool_get_default_for_numa (vm, vlib_mains[ti]->numa_node); - rv = vnet_hw_interface_set_rx_mode (vnm, mif->hw_if_index, i, - VNET_HW_INTERFACE_RX_MODE_DEFAULT); + rv = vnet_hw_if_set_rx_queue_mode (vnm, qi, VNET_HW_IF_RX_MODE_DEFAULT); + vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index); + if (rv) memif_log_err (mif, "Warning: unable to set rx mode for interface %d queue %d: " "rc=%d", mif->hw_if_index, i, rv); else { - vnet_hw_interface_rx_mode rxmode; - vnet_hw_interface_get_rx_mode (vnm, mif->hw_if_index, i, &rxmode); + vnet_hw_if_rx_mode rxmode = vnet_hw_if_get_rx_queue_mode (vnm, qi); - if (rxmode == VNET_HW_INTERFACE_RX_MODE_POLLING) + if (rxmode == VNET_HW_IF_RX_MODE_POLLING) mq->ring->flags |= MEMIF_RING_FLAG_MASK_INT; else - vnet_device_input_set_interrupt_pending (vnm, mif->hw_if_index, i); + vnet_hw_if_rx_queue_set_int_pending (vnm, qi); } } /* *INDENT-ON* */ @@ -344,11 +357,11 @@ clib_error_t * memif_init_regions_and_queues (memif_if_t * mif) { vlib_main_t *vm = vlib_get_main (); + memif_socket_file_t *msf; memif_ring_t *ring = NULL; - int i, j; + int fd, i, j; u64 buffer_offset; memif_region_t *r; - clib_mem_vm_alloc_t alloc = { 0 }; clib_error_t *err; ASSERT (vec_len (mif->regions) == 0); @@ -364,16 +377,31 @@ memif_init_regions_and_queues (memif_if_t * mif) r->region_size += mif->run.buffer_size * (1 << mif->run.log2_ring_size) * (mif->run.num_s2m_rings + mif->run.num_m2s_rings); - alloc.name = "memif region"; - alloc.size = r->region_size; - alloc.flags = CLIB_MEM_VM_F_SHARED; + if ((fd = clib_mem_vm_create_fd (CLIB_MEM_PAGE_SZ_DEFAULT, "%U region 0", + format_memif_device_name, + mif->dev_instance)) == -1) + { + err = clib_mem_get_last_error (); + goto error; + } - err = clib_mem_vm_ext_alloc (&alloc); - if (err) - goto error; + if ((ftruncate (fd, r->region_size)) == -1) + { + err = clib_error_return_unix (0, "ftruncate"); + goto error; + } - r->fd = alloc.fd; - r->shm = alloc.addr; + msf = pool_elt_at_index (memif_main.socket_files, mif->socket_file_index); + r->shm = clib_mem_vm_map_shared (0, r->region_size, fd, 0, "memif%lu/%lu:0", + msf->socket_id, mif->id); + + if (r->shm == CLIB_MEM_VM_MAP_FAILED) + { + err = clib_error_return_unix (0, "memif shared region map failed"); + goto error; + } + + r->fd = fd; if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY) { @@ -524,14 +552,16 @@ memif_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) case MEMIF_PROCESS_EVENT_STOP: enabled = 0; continue; + case MEMIF_PROCESS_EVENT_ADMIN_UP_DOWN: + break; default: ASSERT (0); } last_run_duration = start_time = vlib_time_now (vm); /* *INDENT-OFF* */ - pool_foreach (mif, mm->interfaces, - ({ + pool_foreach (mif, mm->interfaces) + { memif_socket_file_t * msf = vec_elt_at_index (mm->socket_files, mif->socket_file_index); /* Allow no more than 10us without a pause */ now = vlib_time_now (vm); @@ -580,7 +610,7 @@ memif_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) sock = clib_mem_alloc (sizeof(clib_socket_t)); } } - })); + } /* *INDENT-ON* */ last_run_duration = vlib_time_now (vm) - last_run_duration; } @@ -1018,10 +1048,8 @@ memif_create_if (vlib_main_t * vm, memif_create_if_args_t * args) } hw = vnet_get_hw_interface (vnm, mif->hw_if_index); - hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE; - vnet_hw_interface_set_input_node (vnm, mif->hw_if_index, - memif_input_node.index); - + hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE; + vnet_hw_if_set_input_node (vnm, mif->hw_if_index, memif_input_node.index); mhash_set (&msf->dev_instance_by_id, &mif->id, mif->dev_instance, 0); if (pool_elts (mm->interfaces) == 1) @@ -1052,6 +1080,24 @@ done: return rv; } +clib_error_t * +memif_interface_admin_up_down (vnet_main_t *vnm, u32 hw_if_index, u32 flags) +{ + memif_main_t *mm = &memif_main; + vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); + memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance); + static clib_error_t *error = 0; + + if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) + mif->flags |= MEMIF_IF_FLAG_ADMIN_UP; + else + mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP; + + vlib_process_signal_event (vnm->vlib_main, memif_process_node.index, + MEMIF_PROCESS_EVENT_ADMIN_UP_DOWN, 0); + return error; +} + static clib_error_t * memif_init (vlib_main_t * vm) {