#include <vppinfra/linux/syscall.h>
#include <vnet/plugin/plugin.h>
#include <vnet/ethernet/ethernet.h>
+#include <vnet/interface/rx_queue_funcs.h>
#include <vpp/app/version.h>
#include <memif/memif.h>
#include <memif/private.h>
}
}
+static void
+memif_disconnect_free_zc_queue_buffer (memif_queue_t * mq, u8 is_rx)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u16 ring_size, n_slots, mask, start;
+
+ ring_size = 1 << mq->log2_ring_size;
+ mask = ring_size - 1;
+ n_slots = mq->ring->head - mq->last_tail;
+ start = mq->last_tail & mask;
+ if (is_rx)
+ vlib_buffer_free_from_ring (vm, mq->buffers, start, ring_size, n_slots);
+ else
+ vlib_buffer_free_from_ring_no_next (vm, mq->buffers, start, ring_size,
+ n_slots);
+ vec_free (mq->buffers);
+}
+
void
memif_disconnect (memif_if_t * mif, clib_error_t * err)
{
mq = vec_elt_at_index (mif->rx_queues, i);
if (mq->ring)
{
- int rv;
- rv = vnet_hw_interface_unassign_rx_thread (vnm, mif->hw_if_index, i);
- if (rv)
- memif_log_warn (mif,
- "Unable to unassign interface %d, queue %d: rc=%d",
- mif->hw_if_index, i, rv);
+ if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
+ {
+ memif_disconnect_free_zc_queue_buffer(mq, 1);
+ }
mq->ring = 0;
}
}
+ vnet_hw_if_unregister_all_rx_queues (vnm, mif->hw_if_index);
+ vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index);
+
+ /* *INDENT-OFF* */
+ vec_foreach_index (i, mif->tx_queues)
+ {
+ mq = vec_elt_at_index (mif->tx_queues, i);
+ if (mq->ring)
+ {
+ if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
+ {
+ memif_disconnect_free_zc_queue_buffer(mq, 0);
+ }
+ }
+ mq->ring = 0;
+ }
/* free tx and rx queues */
vec_foreach (mq, mif->rx_queues)
clib_fifo_free (mif->msg_queue);
}
+static clib_error_t *
+memif_int_fd_write_ready (clib_file_t * uf)
+{
+ memif_main_t *mm = &memif_main;
+ u16 qid = uf->private_data & 0xFFFF;
+ memif_if_t *mif = vec_elt_at_index (mm->interfaces, uf->private_data >> 16);
+
+ memif_log_warn (mif, "unexpected EPOLLOUT on RX for queue %u", qid);
+ return 0;
+}
+
static clib_error_t *
memif_int_fd_read_ready (clib_file_t * uf)
{
size = read (uf->file_descriptor, &b, sizeof (b));
if (size < 0)
{
- memif_log_debug (mif, "Failed to read form socket");
+ memif_log_debug (mif, "Failed to read from socket");
return 0;
}
- vnet_device_input_set_interrupt_pending (vnm, mif->hw_if_index, qid);
+ vnet_hw_if_rx_queue_set_int_pending (vnm, mq->queue_index);
mq->int_count++;
return 0;
clib_error_t *
memif_connect (memif_if_t * mif)
{
+ vlib_main_t *vm = vlib_get_main ();
vnet_main_t *vnm = vnet_get_main ();
clib_file_t template = { 0 };
memif_region_t *mr;
/* *INDENT-ON* */
template.read_function = memif_int_fd_read_ready;
+ template.write_function = memif_int_fd_write_ready;
/* *INDENT-OFF* */
vec_foreach_index (i, mif->tx_queues)
vec_foreach_index (i, mif->rx_queues)
{
memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, i);
+ u32 ti;
+ u32 qi;
int rv;
mq->ring = mif->regions[mq->region].shm + mq->offset;
err = clib_error_return (0, "wrong cookie on tx ring %u", i);
goto error;
}
-
+ qi = vnet_hw_if_register_rx_queue (vnm, mif->hw_if_index, i,
+ VNET_HW_IF_RXQ_THREAD_ANY);
+ mq->queue_index = qi;
if (mq->int_fd > -1)
{
template.file_descriptor = mq->int_fd;
format_memif_device_name,
mif->dev_instance, i);
memif_file_add (&mq->int_clib_file_index, &template);
+ vnet_hw_if_set_rx_queue_file_index (vnm, qi,
+ mq->int_clib_file_index);
}
- vnet_hw_interface_assign_rx_thread (vnm, mif->hw_if_index, i, ~0);
- rv = vnet_hw_interface_set_rx_mode (vnm, mif->hw_if_index, i,
- VNET_HW_INTERFACE_RX_MODE_DEFAULT);
+ ti = vnet_hw_if_get_rx_queue_thread_index (vnm, qi);
+ mq->buffer_pool_index = vlib_buffer_pool_get_default_for_numa (
+ vm, vlib_get_main_by_index (ti)->numa_node);
+ rv = vnet_hw_if_set_rx_queue_mode (vnm, qi, VNET_HW_IF_RX_MODE_DEFAULT);
+ vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index);
+
if (rv)
memif_log_err
(mif, "Warning: unable to set rx mode for interface %d queue %d: "
"rc=%d", mif->hw_if_index, i, rv);
else
{
- vnet_hw_interface_rx_mode rxmode;
- vnet_hw_interface_get_rx_mode (vnm, mif->hw_if_index, i, &rxmode);
+ vnet_hw_if_rx_mode rxmode = vnet_hw_if_get_rx_queue_mode (vnm, qi);
- if (rxmode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+ if (rxmode == VNET_HW_IF_RX_MODE_POLLING)
mq->ring->flags |= MEMIF_RING_FLAG_MASK_INT;
else
- vnet_device_input_set_interrupt_pending (vnm, mif->hw_if_index, i);
+ vnet_hw_if_rx_queue_set_int_pending (vnm, qi);
}
}
/* *INDENT-ON* */
memif_init_regions_and_queues (memif_if_t * mif)
{
vlib_main_t *vm = vlib_get_main ();
+ memif_socket_file_t *msf;
memif_ring_t *ring = NULL;
- int i, j;
+ int fd, i, j;
u64 buffer_offset;
memif_region_t *r;
- clib_mem_vm_alloc_t alloc = { 0 };
clib_error_t *err;
ASSERT (vec_len (mif->regions) == 0);
r->region_size += mif->run.buffer_size * (1 << mif->run.log2_ring_size) *
(mif->run.num_s2m_rings + mif->run.num_m2s_rings);
- alloc.name = "memif region";
- alloc.size = r->region_size;
- alloc.flags = CLIB_MEM_VM_F_SHARED;
+ if ((fd = clib_mem_vm_create_fd (CLIB_MEM_PAGE_SZ_DEFAULT, "%U region 0",
+ format_memif_device_name,
+ mif->dev_instance)) == -1)
+ {
+ err = clib_mem_get_last_error ();
+ goto error;
+ }
- err = clib_mem_vm_ext_alloc (&alloc);
- if (err)
- goto error;
+ if ((ftruncate (fd, r->region_size)) == -1)
+ {
+ err = clib_error_return_unix (0, "ftruncate");
+ goto error;
+ }
+
+ msf = pool_elt_at_index (memif_main.socket_files, mif->socket_file_index);
+ r->shm = clib_mem_vm_map_shared (0, r->region_size, fd, 0, "memif%lu/%lu:0",
+ msf->socket_id, mif->id);
- r->fd = alloc.fd;
- r->shm = alloc.addr;
+ if (r->shm == CLIB_MEM_VM_MAP_FAILED)
+ {
+ err = clib_error_return_unix (0, "memif shared region map failed");
+ goto error;
+ }
+
+ r->fd = fd;
if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
{
vlib_buffer_pool_t *bp;
/* *INDENT-OFF* */
- vec_foreach (bp, buffer_main.buffer_pools)
+ vec_foreach (bp, vm->buffer_main->buffer_pools)
{
- vlib_physmem_region_t *pr;
- pr = vlib_physmem_get_region (vm, bp->physmem_region);
+ vlib_physmem_map_t *pm;
+ pm = vlib_physmem_get_map (vm, bp->physmem_map_index);
vec_add2_aligned (mif->regions, r, 1, CLIB_CACHE_LINE_BYTES);
- r->fd = pr->fd;
- r->region_size = pr->size;
- r->shm = pr->mem;
+ r->fd = pm->fd;
+ r->region_size = pm->n_pages << pm->log2_page_size;
+ r->shm = pm->base;
r->is_external = 1;
}
/* *INDENT-ON* */
case MEMIF_PROCESS_EVENT_STOP:
enabled = 0;
continue;
+ case MEMIF_PROCESS_EVENT_ADMIN_UP_DOWN:
+ break;
default:
ASSERT (0);
}
last_run_duration = start_time = vlib_time_now (vm);
/* *INDENT-OFF* */
- pool_foreach (mif, mm->interfaces,
- ({
+ pool_foreach (mif, mm->interfaces)
+ {
memif_socket_file_t * msf = vec_elt_at_index (mm->socket_files, mif->socket_file_index);
/* Allow no more than 10us without a pause */
now = vlib_time_now (vm);
sock = clib_mem_alloc (sizeof(clib_socket_t));
}
}
- }));
+ }
/* *INDENT-ON* */
last_run_duration = vlib_time_now (vm) - last_run_duration;
}
if (mm->per_thread_data == 0)
{
int i;
- vlib_buffer_free_list_t *fl;
vec_validate_aligned (mm->per_thread_data, tm->n_vlib_mains - 1,
CLIB_CACHE_LINE_BYTES);
- fl =
- vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
for (i = 0; i < tm->n_vlib_mains; i++)
{
memif_per_thread_data_t *ptd =
vec_elt_at_index (mm->per_thread_data, i);
vlib_buffer_t *bt = &ptd->buffer_template;
- vlib_buffer_init_for_free_list (bt, fl);
+ clib_memset (bt, 0, sizeof (vlib_buffer_t));
bt->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
bt->total_length_not_including_first_buffer = 0;
vnet_buffer (bt)->sw_if_index[VLIB_TX] = (u32) ~ 0;
}
hw = vnet_get_hw_interface (vnm, mif->hw_if_index);
- hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
- vnet_hw_interface_set_input_node (vnm, mif->hw_if_index,
- memif_input_node.index);
-
+ hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE;
+ vnet_hw_if_set_input_node (vnm, mif->hw_if_index, memif_input_node.index);
mhash_set (&msf->dev_instance_by_id, &mif->id, mif->dev_instance, 0);
if (pool_elts (mm->interfaces) == 1)
return rv;
}
+clib_error_t *
+memif_interface_admin_up_down (vnet_main_t *vnm, u32 hw_if_index, u32 flags)
+{
+ memif_main_t *mm = &memif_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
+ static clib_error_t *error = 0;
+
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ mif->flags |= MEMIF_IF_FLAG_ADMIN_UP;
+ else
+ mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
+
+ vlib_process_signal_event (vnm->vlib_main, memif_process_node.index,
+ MEMIF_PROCESS_EVENT_ADMIN_UP_DOWN, 0);
+ return error;
+}
+
static clib_error_t *
memif_init (vlib_main_t * vm)
{
/* *INDENT-OFF* */
VLIB_PLUGIN_REGISTER () = {
.version = VPP_BUILD_VER,
- .description = "Packet Memory Interface (experimental)",
+ .description = "Packet Memory Interface (memif) -- Experimental",
};
/* *INDENT-ON* */