#include <vlib/vlib.h>
#include <vlib/unix/unix.h>
-#include <vppinfra/linux/syscall.h>
#include <vnet/plugin/plugin.h>
#include <vnet/ethernet/ethernet.h>
+#include <vnet/interface/rx_queue_funcs.h>
+#include <vnet/interface/tx_queue_funcs.h>
#include <vpp/app/version.h>
#include <memif/memif.h>
#include <memif/private.h>
}
}
+static void
+memif_disconnect_free_zc_queue_buffer (memif_queue_t * mq, u8 is_rx)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u16 ring_size, n_slots, mask, start;
+
+ ring_size = 1 << mq->log2_ring_size;
+ mask = ring_size - 1;
+ n_slots = mq->ring->head - mq->last_tail;
+ start = mq->last_tail & mask;
+ if (is_rx)
+ vlib_buffer_free_from_ring (vm, mq->buffers, start, ring_size, n_slots);
+ else
+ vlib_buffer_free_from_ring_no_next (vm, mq->buffers, start, ring_size,
+ n_slots);
+ vec_free (mq->buffers);
+}
+
void
memif_disconnect (memif_if_t * mif, clib_error_t * err)
{
if (mif == 0)
return;
- vlib_log_debug (mm->log_class, "disconnect %u (%v)", mif->dev_instance,
- err ? err->what : 0);
+ memif_log_debug (mif, "disconnect %u (%v)", mif->dev_instance,
+ err ? err->what : 0);
if (err)
{
err = clib_socket_close (mif->sock);
if (err)
{
- vlib_log_err (mm->log_class, "%U", format_clib_error, err);
+ memif_log_err (mif, "%U", format_clib_error, err);
clib_error_free (err);
}
clib_mem_free (mif->sock);
mq = vec_elt_at_index (mif->rx_queues, i);
if (mq->ring)
{
- int rv;
- rv = vnet_hw_interface_unassign_rx_thread (vnm, mif->hw_if_index, i);
- if (rv)
- vlib_log_warn (mm->log_class,
- "Unable to unassign interface %d, queue %d: rc=%d",
- mif->hw_if_index, i, rv);
+ if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
+ {
+ memif_disconnect_free_zc_queue_buffer(mq, 1);
+ }
mq->ring = 0;
}
}
+ vnet_hw_if_unregister_all_rx_queues (vnm, mif->hw_if_index);
+
+ /* *INDENT-OFF* */
+ vec_foreach_index (i, mif->tx_queues)
+ {
+ mq = vec_elt_at_index (mif->tx_queues, i);
+ if (mq->ring)
+ {
+ if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
+ {
+ memif_disconnect_free_zc_queue_buffer(mq, 0);
+ }
+ clib_spinlock_free (&mq->lockp);
+ }
+ mq->ring = 0;
+ }
+ vnet_hw_if_unregister_all_tx_queues (vnm, mif->hw_if_index);
+ vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index);
/* free tx and rx queues */
vec_foreach (mq, mif->rx_queues)
if (mr->is_external)
continue;
if ((rv = munmap (mr->shm, mr->region_size)))
- clib_warning ("munmap failed, rv = %d", rv);
+ memif_log_err (mif, "munmap failed, rv = %d", rv);
if (mr->fd > -1)
close (mr->fd);
}
clib_fifo_free (mif->msg_queue);
}
+static clib_error_t *
+memif_int_fd_write_ready (clib_file_t * uf)
+{
+ memif_main_t *mm = &memif_main;
+ u16 qid = uf->private_data & 0xFFFF;
+ memif_if_t *mif = vec_elt_at_index (mm->interfaces, uf->private_data >> 16);
+
+ memif_log_warn (mif, "unexpected EPOLLOUT on RX for queue %u", qid);
+ return 0;
+}
+
static clib_error_t *
memif_int_fd_read_ready (clib_file_t * uf)
{
size = read (uf->file_descriptor, &b, sizeof (b));
if (size < 0)
{
- vlib_log_debug (mm->log_class, "Failed to read form socket");
+ memif_log_debug (mif, "Failed to read from socket");
return 0;
}
- vnet_device_input_set_interrupt_pending (vnm, mif->hw_if_index, qid);
+ vnet_hw_if_rx_queue_set_int_pending (vnm, mq->queue_index);
mq->int_count++;
return 0;
memif_connect (memif_if_t * mif)
{
memif_main_t *mm = &memif_main;
+ vlib_main_t *vm = vlib_get_main ();
vnet_main_t *vnm = vnet_get_main ();
clib_file_t template = { 0 };
memif_region_t *mr;
- int i;
+ int i, j;
+ u32 n_txqs = 0, n_threads = vlib_get_n_threads ();
clib_error_t *err = NULL;
+ u8 max_log2_ring_sz = 0;
- vlib_log_debug (mm->log_class, "connect %u", mif->dev_instance);
+ memif_log_debug (mif, "connect %u", mif->dev_instance);
vec_free (mif->local_disc_string);
vec_free (mif->remote_disc_string);
/* *INDENT-ON* */
template.read_function = memif_int_fd_read_ready;
+ template.write_function = memif_int_fd_write_ready;
/* *INDENT-OFF* */
vec_foreach_index (i, mif->tx_queues)
{
memif_queue_t *mq = vec_elt_at_index (mif->tx_queues, i);
+ max_log2_ring_sz = clib_max (max_log2_ring_sz, mq->log2_ring_size);
mq->ring = mif->regions[mq->region].shm + mq->offset;
if (mq->ring->cookie != MEMIF_COOKIE)
err = clib_error_return (0, "wrong cookie on tx ring %u", i);
goto error;
}
+ mq->queue_index =
+ vnet_hw_if_register_tx_queue (vnm, mif->hw_if_index, i);
+ clib_spinlock_init (&mq->lockp);
+ }
+
+ if (vec_len (mif->tx_queues) > 0)
+ {
+ n_txqs = vec_len (mif->tx_queues);
+ for (j = 0; j < n_threads; j++)
+ {
+ u32 qi = mif->tx_queues[j % n_txqs].queue_index;
+ vnet_hw_if_tx_queue_assign_thread (vnm, qi, j);
+ }
}
vec_foreach_index (i, mif->rx_queues)
{
memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, i);
+ u32 ti;
+ u32 qi;
int rv;
+ max_log2_ring_sz = clib_max (max_log2_ring_sz, mq->log2_ring_size);
+
mq->ring = mif->regions[mq->region].shm + mq->offset;
if (mq->ring->cookie != MEMIF_COOKIE)
{
err = clib_error_return (0, "wrong cookie on tx ring %u", i);
goto error;
}
-
+ qi = vnet_hw_if_register_rx_queue (vnm, mif->hw_if_index, i,
+ VNET_HW_IF_RXQ_THREAD_ANY);
+ mq->queue_index = qi;
if (mq->int_fd > -1)
{
template.file_descriptor = mq->int_fd;
format_memif_device_name,
mif->dev_instance, i);
memif_file_add (&mq->int_clib_file_index, &template);
+ vnet_hw_if_set_rx_queue_file_index (vnm, qi,
+ mq->int_clib_file_index);
}
- vnet_hw_interface_assign_rx_thread (vnm, mif->hw_if_index, i, ~0);
- rv = vnet_hw_interface_set_rx_mode (vnm, mif->hw_if_index, i,
- VNET_HW_INTERFACE_RX_MODE_DEFAULT);
+ ti = vnet_hw_if_get_rx_queue_thread_index (vnm, qi);
+ mq->buffer_pool_index = vlib_buffer_pool_get_default_for_numa (
+ vm, vlib_get_main_by_index (ti)->numa_node);
+ rv = vnet_hw_if_set_rx_queue_mode (vnm, qi, VNET_HW_IF_RX_MODE_DEFAULT);
+ vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index);
+
if (rv)
- clib_warning
- ("Warning: unable to set rx mode for interface %d queue %d: "
+ memif_log_err
+ (mif, "Warning: unable to set rx mode for interface %d queue %d: "
"rc=%d", mif->hw_if_index, i, rv);
else
{
- vnet_hw_interface_rx_mode rxmode;
- vnet_hw_interface_get_rx_mode (vnm, mif->hw_if_index, i, &rxmode);
+ vnet_hw_if_rx_mode rxmode = vnet_hw_if_get_rx_queue_mode (vnm, qi);
- if (rxmode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+ if (rxmode == VNET_HW_IF_RX_MODE_POLLING)
mq->ring->flags |= MEMIF_RING_FLAG_MASK_INT;
else
- vnet_device_input_set_interrupt_pending (vnm, mif->hw_if_index, i);
+ vnet_hw_if_rx_queue_set_int_pending (vnm, qi);
}
}
/* *INDENT-ON* */
+ if (1 << max_log2_ring_sz > vec_len (mm->per_thread_data[0].desc_data))
+ {
+ memif_per_thread_data_t *ptd;
+ int with_barrier = 1;
+
+ if (vlib_worker_thread_barrier_held ())
+ with_barrier = 0;
+
+ if (with_barrier)
+ vlib_worker_thread_barrier_sync (vm);
+
+ vec_foreach (ptd, mm->per_thread_data)
+ {
+ vec_validate_aligned (ptd->desc_data, pow2_mask (max_log2_ring_sz),
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (ptd->desc_len, pow2_mask (max_log2_ring_sz),
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (ptd->desc_status, pow2_mask (max_log2_ring_sz),
+ CLIB_CACHE_LINE_BYTES);
+ }
+ if (with_barrier)
+ vlib_worker_thread_barrier_release (vm);
+ }
+
mif->flags &= ~MEMIF_IF_FLAG_CONNECTING;
mif->flags |= MEMIF_IF_FLAG_CONNECTED;
return 0;
error:
- vlib_log_err (mm->log_class, "%U", format_clib_error, err);
+ memif_log_err (mif, "%U", format_clib_error, err);
return err;
}
memif_init_regions_and_queues (memif_if_t * mif)
{
vlib_main_t *vm = vlib_get_main ();
- memif_main_t *mm = &memif_main;
+ memif_socket_file_t *msf;
memif_ring_t *ring = NULL;
- int i, j;
+ int fd, i, j;
u64 buffer_offset;
memif_region_t *r;
- clib_mem_vm_alloc_t alloc = { 0 };
clib_error_t *err;
ASSERT (vec_len (mif->regions) == 0);
r->region_size += mif->run.buffer_size * (1 << mif->run.log2_ring_size) *
(mif->run.num_s2m_rings + mif->run.num_m2s_rings);
- alloc.name = "memif region";
- alloc.size = r->region_size;
- alloc.flags = CLIB_MEM_VM_F_SHARED;
+ if ((fd = clib_mem_vm_create_fd (CLIB_MEM_PAGE_SZ_DEFAULT, "%U region 0",
+ format_memif_device_name,
+ mif->dev_instance)) == -1)
+ {
+ err = clib_mem_get_last_error ();
+ goto error;
+ }
- err = clib_mem_vm_ext_alloc (&alloc);
- if (err)
- goto error;
+ if ((ftruncate (fd, r->region_size)) == -1)
+ {
+ err = clib_error_return_unix (0, "ftruncate");
+ goto error;
+ }
- r->fd = alloc.fd;
- r->shm = alloc.addr;
+ msf = pool_elt_at_index (memif_main.socket_files, mif->socket_file_index);
+ r->shm = clib_mem_vm_map_shared (0, r->region_size, fd, 0, "memif%lu/%lu:0",
+ msf->socket_id, mif->id);
+
+ if (r->shm == CLIB_MEM_VM_MAP_FAILED)
+ {
+ err = clib_error_return_unix (0, "memif shared region map failed");
+ goto error;
+ }
+
+ r->fd = fd;
if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
{
vlib_buffer_pool_t *bp;
/* *INDENT-OFF* */
- vec_foreach (bp, buffer_main.buffer_pools)
+ vec_foreach (bp, vm->buffer_main->buffer_pools)
{
- vlib_physmem_region_t *pr;
- pr = vlib_physmem_get_region (vm, bp->physmem_region);
+ vlib_physmem_map_t *pm;
+ pm = vlib_physmem_get_map (vm, bp->physmem_map_index);
vec_add2_aligned (mif->regions, r, 1, CLIB_CACHE_LINE_BYTES);
- r->fd = pr->fd;
- r->region_size = pr->size;
- r->shm = pr->mem;
+ r->fd = pm->fd;
+ r->region_size = pm->n_pages << pm->log2_page_size;
+ r->shm = pm->base;
r->is_external = 1;
}
/* *INDENT-ON* */
err = clib_error_return_unix (0, "eventfd[tx queue %u]", i);
goto error;
}
+
mq->int_clib_file_index = ~0;
mq->ring = memif_get_ring (mif, MEMIF_RING_S2M, i);
mq->log2_ring_size = mif->cfg.log2_ring_size;
return 0;
error:
- vlib_log_err (mm->log_class, "%U", format_clib_error, err);
+ memif_log_err (mif, "%U", format_clib_error, err);
return err;
}
clib_error_t *err;
sock = clib_mem_alloc (sizeof (clib_socket_t));
- memset (sock, 0, sizeof (clib_socket_t));
+ clib_memset (sock, 0, sizeof (clib_socket_t));
while (1)
{
case MEMIF_PROCESS_EVENT_STOP:
enabled = 0;
continue;
+ case MEMIF_PROCESS_EVENT_ADMIN_UP_DOWN:
+ break;
default:
ASSERT (0);
}
last_run_duration = start_time = vlib_time_now (vm);
/* *INDENT-OFF* */
- pool_foreach (mif, mm->interfaces,
- ({
+ pool_foreach (mif, mm->interfaces)
+ {
memif_socket_file_t * msf = vec_elt_at_index (mm->socket_files, mif->socket_file_index);
/* Allow no more than 10us without a pause */
now = vlib_time_now (vm);
if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
{
- memset (sock, 0, sizeof(clib_socket_t));
+ clib_memset (sock, 0, sizeof(clib_socket_t));
sock->config = (char *) msf->filename;
- sock->flags = CLIB_SOCKET_F_IS_CLIENT| CLIB_SOCKET_F_SEQPACKET;
+ sock->flags = CLIB_SOCKET_F_IS_CLIENT | CLIB_SOCKET_F_SEQPACKET |
+ CLIB_SOCKET_F_BLOCKING;
- if ((err = clib_socket_init (sock)))
+ if ((err = clib_socket_init (sock)))
{
clib_error_free (err);
}
sock = clib_mem_alloc (sizeof(clib_socket_t));
}
}
- }));
+ }
/* *INDENT-ON* */
last_run_duration = vlib_time_now (vm) - last_run_duration;
}
}
pool_get (mm->socket_files, msf);
- memset (msf, 0, sizeof (memif_socket_file_t));
+ clib_memset (msf, 0, sizeof (memif_socket_file_t));
msf->filename = socket_filename;
msf->socket_id = sock_id;
int
memif_socket_filename_add_del (u8 is_add, u32 sock_id, u8 * sock_filename)
{
- struct stat file_stat;
char *dir = 0, *tmp;
u32 idx = 0;
vec_add1 (dir, '\0');
}
- if (((dir == 0) || (stat (dir, &file_stat) == -1)
- || (!S_ISDIR (file_stat.st_mode))) && (idx != 0))
+ /* check dir existance and access rights for effective user/group IDs */
+ if ((dir == NULL)
+ ||
+ (faccessat ( /* ignored */ -1, dir, F_OK | R_OK | W_OK, AT_EACCESS)
+ < 0))
{
vec_free (dir);
return VNET_API_ERROR_INVALID_ARGUMENT;
}
/* free interface data structures */
- clib_spinlock_free (&mif->lockp);
mhash_unset (&msf->dev_instance_by_id, &mif->id, 0);
/* remove socket file */
err = clib_socket_close (msf->sock);
if (err)
{
- vlib_log_err (mm->log_class, "%U", format_clib_error, err);
+ memif_log_err (mif, "%U", format_clib_error, err);
clib_error_free (err);
}
clib_mem_free (msf->sock);
}
}
- memset (mif, 0, sizeof (*mif));
+ vec_free (mif->local_disc_string);
+ clib_memset (mif, 0, sizeof (*mif));
pool_put (mm->interfaces, mif);
if (pool_elts (mm->interfaces) == 0)
}
/* *INDENT-OFF* */
-VNET_HW_INTERFACE_CLASS (memif_ip_hw_if_class, static) =
-{
+VNET_HW_INTERFACE_CLASS (memif_ip_hw_if_class, static) = {
.name = "memif-ip",
.flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+ .tx_hash_fn_type = VNET_HASH_FN_TYPE_IP,
};
/* *INDENT-ON* */
memif_main_t *mm = &memif_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
vnet_main_t *vnm = vnet_get_main ();
+ vnet_eth_interface_registration_t eir = {};
memif_if_t *mif = 0;
vnet_sw_interface_t *sw;
clib_error_t *error = 0;
int ret = 0;
uword *p;
- vnet_hw_interface_t *hw;
memif_socket_file_t *msf = 0;
int rv = 0;
msf->dev_instance_by_fd = hash_create (0, sizeof (uword));
msf->is_listener = (args->is_master != 0);
- vlib_log_debug (mm->log_class, "initializing socket file %s",
- msf->filename);
+ memif_log_debug (0, "initializing socket file %s", msf->filename);
}
if (mm->per_thread_data == 0)
{
int i;
- vlib_buffer_free_list_t *fl;
vec_validate_aligned (mm->per_thread_data, tm->n_vlib_mains - 1,
CLIB_CACHE_LINE_BYTES);
- fl =
- vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
for (i = 0; i < tm->n_vlib_mains; i++)
{
memif_per_thread_data_t *ptd =
vec_elt_at_index (mm->per_thread_data, i);
vlib_buffer_t *bt = &ptd->buffer_template;
- vlib_buffer_init_for_free_list (bt, fl);
+ clib_memset (bt, 0, sizeof (vlib_buffer_t));
bt->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
bt->total_length_not_including_first_buffer = 0;
vnet_buffer (bt)->sw_if_index[VLIB_TX] = (u32) ~ 0;
}
pool_get (mm->interfaces, mif);
- memset (mif, 0, sizeof (*mif));
+ clib_memset (mif, 0, sizeof (*mif));
mif->dev_instance = mif - mm->interfaces;
mif->socket_file_index = msf - mm->socket_files;
mif->id = args->id;
if (args->secret)
mif->secret = vec_dup (args->secret);
- if (tm->n_vlib_mains > 1)
- clib_spinlock_init (&mif->lockp);
-
if (mif->mode == MEMIF_INTERFACE_MODE_ETHERNET)
{
args->hw_addr[0] = 2;
args->hw_addr[1] = 0xfe;
}
- error = ethernet_register_interface (vnm, memif_device_class.index,
- mif->dev_instance, args->hw_addr,
- &mif->hw_if_index,
- memif_eth_flag_change);
+
+ eir.dev_class_index = memif_device_class.index;
+ eir.dev_instance = mif->dev_instance;
+ eir.address = args->hw_addr;
+ eir.cb.flag_change = memif_eth_flag_change;
+ mif->hw_if_index = vnet_eth_register_interface (vnm, &eir);
}
else if (mif->mode == MEMIF_INTERFACE_MODE_IP)
{
ASSERT (msf->sock == 0);
msf->sock = s;
- memset (s, 0, sizeof (clib_socket_t));
+ clib_memset (s, 0, sizeof (clib_socket_t));
s->config = (char *) msf->filename;
s->flags = CLIB_SOCKET_F_IS_SERVER |
CLIB_SOCKET_F_ALLOW_GROUP_WRITE |
mif->flags |= MEMIF_IF_FLAG_ZERO_COPY;
}
- hw = vnet_get_hw_interface (vnm, mif->hw_if_index);
- hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
- vnet_hw_interface_set_input_node (vnm, mif->hw_if_index,
- memif_input_node.index);
-
+ vnet_hw_if_set_caps (vnm, mif->hw_if_index, VNET_HW_IF_CAP_INT_MODE);
+ vnet_hw_if_set_input_node (vnm, mif->hw_if_index, memif_input_node.index);
mhash_set (&msf->dev_instance_by_id, &mif->id, mif->dev_instance, 0);
if (pool_elts (mm->interfaces) == 1)
goto done;
error:
- if (mif->hw_if_index != ~0)
- {
- if (mif->mode == MEMIF_INTERFACE_MODE_IP)
- vnet_delete_hw_interface (vnm, mif->hw_if_index);
- else
- ethernet_delete_interface (vnm, mif->hw_if_index);
- mif->hw_if_index = ~0;
- }
memif_delete_if (vm, mif);
if (error)
{
- vlib_log_err (mm->log_class, "%U", format_clib_error, error);
+ memif_log_err (mif, "%U", format_clib_error, error);
clib_error_free (error);
}
return ret;
return rv;
}
+clib_error_t *
+memif_interface_admin_up_down (vnet_main_t *vnm, u32 hw_if_index, u32 flags)
+{
+ memif_main_t *mm = &memif_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
+ static clib_error_t *error = 0;
+
+ if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
+ {
+ if (mif->flags & MEMIF_IF_FLAG_CONNECTED)
+ {
+ vnet_hw_interface_set_flags (vnm, mif->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ }
+ mif->flags |= MEMIF_IF_FLAG_ADMIN_UP;
+ }
+ else
+ mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
+
+ vlib_process_signal_event (vnm->vlib_main, memif_process_node.index,
+ MEMIF_PROCESS_EVENT_ADMIN_UP_DOWN, 0);
+ return error;
+}
+
static clib_error_t *
memif_init (vlib_main_t * vm)
{
memif_main_t *mm = &memif_main;
- memset (mm, 0, sizeof (memif_main_t));
+ clib_memset (mm, 0, sizeof (memif_main_t));
mm->log_class = vlib_log_register_class ("memif_plugin", 0);
- vlib_log_debug (mm->log_class, "initialized");
+ memif_log_debug (0, "initialized");
/* initialize binary API */
memif_plugin_api_hookup (vm);
/* *INDENT-OFF* */
VLIB_PLUGIN_REGISTER () = {
.version = VPP_BUILD_VER,
- .description = "Packet Memory Interface (experimental)",
+ .description = "Packet Memory Interface (memif) -- Experimental",
};
/* *INDENT-ON* */