2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
22 #include <sys/types.h>
24 #include <sys/ioctl.h>
25 #include <sys/socket.h>
29 #include <sys/prctl.h>
30 #include <sys/eventfd.h>
34 #include <vlib/vlib.h>
35 #include <vlib/unix/unix.h>
36 #include <vnet/plugin/plugin.h>
37 #include <vnet/ethernet/ethernet.h>
38 #include <vnet/interface/rx_queue_funcs.h>
39 #include <vnet/interface/tx_queue_funcs.h>
40 #include <vpp/app/version.h>
41 #include <memif/memif.h>
42 #include <memif/private.h>
44 memif_main_t memif_main;
47 memif_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags)
54 memif_eth_set_max_frame_size (vnet_main_t *vnm, vnet_hw_interface_t *hi,
62 memif_queue_intfd_close (memif_queue_t * mq)
64 if (mq->int_clib_file_index != ~0)
66 memif_file_del_by_index (mq->int_clib_file_index);
67 mq->int_clib_file_index = ~0;
70 else if (mq->int_fd > -1)
78 memif_disconnect_free_zc_queue_buffer (memif_queue_t * mq, u8 is_rx)
80 vlib_main_t *vm = vlib_get_main ();
81 u16 ring_size, n_slots, mask, start;
83 ring_size = 1 << mq->log2_ring_size;
85 n_slots = mq->ring->head - mq->last_tail;
86 start = mq->last_tail & mask;
88 vlib_buffer_free_from_ring (vm, mq->buffers, start, ring_size, n_slots);
90 vlib_buffer_free_from_ring_no_next (vm, mq->buffers, start, ring_size,
92 vec_free (mq->buffers);
96 memif_disconnect (memif_if_t * mif, clib_error_t * err)
98 memif_main_t *mm = &memif_main;
99 vnet_main_t *vnm = vnet_get_main ();
103 vlib_main_t *vm = vlib_get_main ();
104 int with_barrier = 0;
109 memif_log_debug (mif, "disconnect %u (%v)", mif->dev_instance,
110 err ? err->what : 0);
115 mif->local_disc_string = vec_dup (err->what);
116 if (mif->sock && clib_socket_is_connected (mif->sock))
117 e = memif_msg_send_disconnect (mif, err);
121 /* set interface down */
122 mif->flags &= ~(MEMIF_IF_FLAG_CONNECTED | MEMIF_IF_FLAG_CONNECTING);
123 if (mif->hw_if_index != ~0)
124 vnet_hw_interface_set_flags (vnm, mif->hw_if_index, 0);
126 /* close connection socket */
127 if (mif->sock && mif->sock->fd)
129 memif_socket_file_t *msf = vec_elt_at_index (mm->socket_files,
130 mif->socket_file_index);
131 hash_unset (msf->dev_instance_by_fd, mif->sock->fd);
132 memif_socket_close (&mif->sock);
137 err = clib_socket_close (mif->sock);
140 memif_log_err (mif, "%U", format_clib_error, err);
141 clib_error_free (err);
143 clib_mem_free (mif->sock);
146 if (vlib_worker_thread_barrier_held () == 0)
149 vlib_worker_thread_barrier_sync (vm);
153 vec_foreach_index (i, mif->rx_queues)
155 mq = vec_elt_at_index (mif->rx_queues, i);
158 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
160 memif_disconnect_free_zc_queue_buffer(mq, 1);
165 vnet_hw_if_unregister_all_rx_queues (vnm, mif->hw_if_index);
168 vec_foreach_index (i, mif->tx_queues)
170 mq = vec_elt_at_index (mif->tx_queues, i);
173 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
175 memif_disconnect_free_zc_queue_buffer(mq, 0);
177 clib_spinlock_free (&mq->lockp);
181 vnet_hw_if_unregister_all_tx_queues (vnm, mif->hw_if_index);
182 vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index);
184 /* free tx and rx queues */
185 vec_foreach (mq, mif->rx_queues)
186 memif_queue_intfd_close (mq);
187 vec_free (mif->rx_queues);
189 vec_foreach (mq, mif->tx_queues)
190 memif_queue_intfd_close (mq);
191 vec_free (mif->tx_queues);
193 /* free memory regions */
194 vec_foreach (mr, mif->regions)
199 if ((rv = munmap (mr->shm, mr->region_size)))
200 memif_log_err (mif, "munmap failed, rv = %d", rv);
205 vec_free (mif->regions);
206 vec_free (mif->remote_name);
207 vec_free (mif->remote_if_name);
208 clib_fifo_free (mif->msg_queue);
211 vlib_worker_thread_barrier_release (vm);
214 static clib_error_t *
215 memif_int_fd_write_ready (clib_file_t * uf)
217 memif_main_t *mm = &memif_main;
218 u16 qid = uf->private_data & 0xFFFF;
219 memif_if_t *mif = vec_elt_at_index (mm->interfaces, uf->private_data >> 16);
221 memif_log_warn (mif, "unexpected EPOLLOUT on RX for queue %u", qid);
225 static clib_error_t *
226 memif_int_fd_read_ready (clib_file_t * uf)
228 memif_main_t *mm = &memif_main;
229 vnet_main_t *vnm = vnet_get_main ();
230 u16 qid = uf->private_data & 0xFFFF;
231 memif_if_t *mif = vec_elt_at_index (mm->interfaces, uf->private_data >> 16);
232 memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid);
236 size = read (uf->file_descriptor, &b, sizeof (b));
239 memif_log_debug (mif, "Failed to read from socket");
243 vnet_hw_if_rx_queue_set_int_pending (vnm, mq->queue_index);
251 memif_connect (memif_if_t * mif)
253 memif_main_t *mm = &memif_main;
254 vlib_main_t *vm = vlib_get_main ();
255 vnet_main_t *vnm = vnet_get_main ();
256 clib_file_t template = { 0 };
259 u32 n_txqs = 0, n_threads = vlib_get_n_threads ();
260 clib_error_t *err = NULL;
261 u8 max_log2_ring_sz = 0;
262 int with_barrier = 0;
264 memif_log_debug (mif, "connect %u", mif->dev_instance);
266 vec_free (mif->local_disc_string);
267 vec_free (mif->remote_disc_string);
270 vec_foreach (mr, mif->regions)
277 err = clib_error_return (0, "no memory region fd");
281 if ((mr->shm = mmap (NULL, mr->region_size, PROT_READ | PROT_WRITE,
282 MAP_SHARED, mr->fd, 0)) == MAP_FAILED)
284 err = clib_error_return_unix (0, "mmap");
290 template.read_function = memif_int_fd_read_ready;
291 template.write_function = memif_int_fd_write_ready;
294 if (vlib_worker_thread_barrier_held ())
298 vlib_worker_thread_barrier_sync (vm);
301 vec_foreach_index (i, mif->tx_queues)
303 memif_queue_t *mq = vec_elt_at_index (mif->tx_queues, i);
304 max_log2_ring_sz = clib_max (max_log2_ring_sz, mq->log2_ring_size);
306 mq->ring = mif->regions[mq->region].shm + mq->offset;
307 if (mq->ring->cookie != MEMIF_COOKIE)
309 err = clib_error_return (0, "wrong cookie on tx ring %u", i);
313 vnet_hw_if_register_tx_queue (vnm, mif->hw_if_index, i);
314 clib_spinlock_init (&mq->lockp);
316 if (mif->flags & MEMIF_IF_FLAG_USE_DMA)
318 memif_dma_info_t *dma_info;
321 mq->dma_info_head = 0;
322 mq->dma_info_tail = 0;
323 mq->dma_info_size = MEMIF_DMA_INFO_SIZE;
324 vec_validate_aligned (mq->dma_info, MEMIF_DMA_INFO_SIZE,
325 CLIB_CACHE_LINE_BYTES);
327 vec_foreach (dma_info, mq->dma_info)
329 vec_validate_aligned (dma_info->data.desc_data,
330 pow2_mask (max_log2_ring_sz),
331 CLIB_CACHE_LINE_BYTES);
332 vec_validate_aligned (dma_info->data.desc_len,
333 pow2_mask (max_log2_ring_sz),
334 CLIB_CACHE_LINE_BYTES);
335 vec_validate_aligned (dma_info->data.desc_status,
336 pow2_mask (max_log2_ring_sz),
337 CLIB_CACHE_LINE_BYTES);
338 vec_validate_aligned (dma_info->data.copy_ops, 0,
339 CLIB_CACHE_LINE_BYTES);
340 vec_reset_length (dma_info->data.copy_ops);
341 vec_validate_aligned (dma_info->data.buffers, 0,
342 CLIB_CACHE_LINE_BYTES);
343 vec_reset_length (dma_info->data.buffers);
348 if (vec_len (mif->tx_queues) > 0)
350 n_txqs = vec_len (mif->tx_queues);
351 for (j = 0; j < n_threads; j++)
353 u32 qi = mif->tx_queues[j % n_txqs].queue_index;
354 vnet_hw_if_tx_queue_assign_thread (vnm, qi, j);
358 vec_foreach_index (i, mif->rx_queues)
360 memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, i);
365 max_log2_ring_sz = clib_max (max_log2_ring_sz, mq->log2_ring_size);
367 mq->ring = mif->regions[mq->region].shm + mq->offset;
368 if (mq->ring->cookie != MEMIF_COOKIE)
370 err = clib_error_return (0, "wrong cookie on tx ring %u", i);
373 qi = vnet_hw_if_register_rx_queue (vnm, mif->hw_if_index, i,
374 VNET_HW_IF_RXQ_THREAD_ANY);
375 mq->queue_index = qi;
377 if (mif->flags & MEMIF_IF_FLAG_USE_DMA)
379 memif_dma_info_t *dma_info;
382 mq->dma_info_head = 0;
383 mq->dma_info_tail = 0;
384 mq->dma_info_size = MEMIF_DMA_INFO_SIZE;
385 vec_validate_aligned (mq->dma_info, MEMIF_DMA_INFO_SIZE,
386 CLIB_CACHE_LINE_BYTES);
387 vec_foreach (dma_info, mq->dma_info)
389 vec_validate_aligned (dma_info->data.desc_data,
390 pow2_mask (max_log2_ring_sz),
391 CLIB_CACHE_LINE_BYTES);
392 vec_validate_aligned (dma_info->data.desc_len,
393 pow2_mask (max_log2_ring_sz),
394 CLIB_CACHE_LINE_BYTES);
395 vec_validate_aligned (dma_info->data.desc_status,
396 pow2_mask (max_log2_ring_sz),
397 CLIB_CACHE_LINE_BYTES);
398 vec_validate_aligned (dma_info->data.copy_ops, 0,
399 CLIB_CACHE_LINE_BYTES);
400 vec_reset_length (dma_info->data.copy_ops);
401 vec_validate_aligned (dma_info->data.buffers, 0,
402 CLIB_CACHE_LINE_BYTES);
403 vec_reset_length (dma_info->data.buffers);
409 template.file_descriptor = mq->int_fd;
410 template.private_data = (mif->dev_instance << 16) | (i & 0xFFFF);
411 template.description = format (0, "%U rx %u int",
412 format_memif_device_name,
413 mif->dev_instance, i);
414 memif_file_add (&mq->int_clib_file_index, &template);
415 vnet_hw_if_set_rx_queue_file_index (vnm, qi,
416 mq->int_clib_file_index);
418 ti = vnet_hw_if_get_rx_queue_thread_index (vnm, qi);
419 mq->buffer_pool_index = vlib_buffer_pool_get_default_for_numa (
420 vm, vlib_get_main_by_index (ti)->numa_node);
421 rv = vnet_hw_if_set_rx_queue_mode (vnm, qi, VNET_HW_IF_RX_MODE_DEFAULT);
422 vnet_hw_if_update_runtime_data (vnm, mif->hw_if_index);
426 (mif, "Warning: unable to set rx mode for interface %d queue %d: "
427 "rc=%d", mif->hw_if_index, i, rv);
430 vnet_hw_if_rx_mode rxmode = vnet_hw_if_get_rx_queue_mode (vnm, qi);
432 if (rxmode == VNET_HW_IF_RX_MODE_POLLING)
433 mq->ring->flags |= MEMIF_RING_FLAG_MASK_INT;
435 vnet_hw_if_rx_queue_set_int_pending (vnm, qi);
440 if (1 << max_log2_ring_sz > vec_len (mm->per_thread_data[0].desc_data))
442 memif_per_thread_data_t *ptd;
444 vec_foreach (ptd, mm->per_thread_data)
446 vec_validate_aligned (ptd->desc_data, pow2_mask (max_log2_ring_sz),
447 CLIB_CACHE_LINE_BYTES);
448 vec_validate_aligned (ptd->desc_len, pow2_mask (max_log2_ring_sz),
449 CLIB_CACHE_LINE_BYTES);
450 vec_validate_aligned (ptd->desc_status, pow2_mask (max_log2_ring_sz),
451 CLIB_CACHE_LINE_BYTES);
455 vlib_worker_thread_barrier_release (vm);
457 mif->flags &= ~MEMIF_IF_FLAG_CONNECTING;
458 mif->flags |= MEMIF_IF_FLAG_CONNECTED;
460 vnet_hw_interface_set_flags (vnm, mif->hw_if_index,
461 VNET_HW_INTERFACE_FLAG_LINK_UP);
466 vlib_worker_thread_barrier_release (vm);
467 memif_log_err (mif, "%U", format_clib_error, err);
471 static_always_inline memif_ring_t *
472 memif_get_ring (memif_if_t * mif, memif_ring_type_t type, u16 ring_num)
474 if (vec_len (mif->regions) == 0)
476 void *p = mif->regions[0].shm;
478 sizeof (memif_ring_t) +
479 sizeof (memif_desc_t) * (1 << mif->run.log2_ring_size);
480 p += (ring_num + type * mif->run.num_s2m_rings) * ring_size;
482 return (memif_ring_t *) p;
486 memif_init_regions_and_queues (memif_if_t * mif)
488 vlib_main_t *vm = vlib_get_main ();
489 memif_socket_file_t *msf;
490 memif_ring_t *ring = NULL;
496 ASSERT (vec_len (mif->regions) == 0);
497 vec_add2_aligned (mif->regions, r, 1, CLIB_CACHE_LINE_BYTES);
499 buffer_offset = (mif->run.num_s2m_rings + mif->run.num_m2s_rings) *
500 (sizeof (memif_ring_t) +
501 sizeof (memif_desc_t) * (1 << mif->run.log2_ring_size));
503 r->region_size = buffer_offset;
505 if ((mif->flags & MEMIF_IF_FLAG_ZERO_COPY) == 0)
506 r->region_size += mif->run.buffer_size * (1 << mif->run.log2_ring_size) *
507 (mif->run.num_s2m_rings + mif->run.num_m2s_rings);
509 if ((fd = clib_mem_vm_create_fd (CLIB_MEM_PAGE_SZ_DEFAULT, "%U region 0",
510 format_memif_device_name,
511 mif->dev_instance)) == -1)
513 err = clib_mem_get_last_error ();
517 if ((ftruncate (fd, r->region_size)) == -1)
519 err = clib_error_return_unix (0, "ftruncate");
523 msf = pool_elt_at_index (memif_main.socket_files, mif->socket_file_index);
524 r->shm = clib_mem_vm_map_shared (0, r->region_size, fd, 0, "memif%lu/%lu:0",
525 msf->socket_id, mif->id);
527 if (r->shm == CLIB_MEM_VM_MAP_FAILED)
529 err = clib_error_return_unix (0, "memif shared region map failed");
535 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
537 vlib_buffer_pool_t *bp;
539 vec_foreach (bp, vm->buffer_main->buffer_pools)
541 vlib_physmem_map_t *pm;
542 pm = vlib_physmem_get_map (vm, bp->physmem_map_index);
543 vec_add2_aligned (mif->regions, r, 1, CLIB_CACHE_LINE_BYTES);
545 r->region_size = pm->n_pages << pm->log2_page_size;
552 for (i = 0; i < mif->run.num_s2m_rings; i++)
554 ring = memif_get_ring (mif, MEMIF_RING_S2M, i);
555 ring->head = ring->tail = 0;
556 ring->cookie = MEMIF_COOKIE;
558 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
561 for (j = 0; j < (1 << mif->run.log2_ring_size); j++)
563 u16 slot = i * (1 << mif->run.log2_ring_size) + j;
564 ring->desc[j].region = 0;
565 ring->desc[j].offset =
566 buffer_offset + (u32) (slot * mif->run.buffer_size);
567 ring->desc[j].length = mif->run.buffer_size;
570 for (i = 0; i < mif->run.num_m2s_rings; i++)
572 ring = memif_get_ring (mif, MEMIF_RING_M2S, i);
573 ring->head = ring->tail = 0;
574 ring->cookie = MEMIF_COOKIE;
576 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
579 for (j = 0; j < (1 << mif->run.log2_ring_size); j++)
582 (i + mif->run.num_s2m_rings) * (1 << mif->run.log2_ring_size) + j;
583 ring->desc[j].region = 0;
584 ring->desc[j].offset =
585 buffer_offset + (u32) (slot * mif->run.buffer_size);
586 ring->desc[j].length = mif->run.buffer_size;
590 ASSERT (mif->tx_queues == 0);
591 vec_validate_aligned (mif->tx_queues, mif->run.num_s2m_rings - 1,
592 CLIB_CACHE_LINE_BYTES);
595 vec_foreach_index (i, mif->tx_queues)
597 memif_queue_t *mq = vec_elt_at_index (mif->tx_queues, i);
598 if ((mq->int_fd = eventfd (0, EFD_NONBLOCK)) < 0)
600 err = clib_error_return_unix (0, "eventfd[tx queue %u]", i);
604 mq->int_clib_file_index = ~0;
605 mq->ring = memif_get_ring (mif, MEMIF_RING_S2M, i);
606 mq->log2_ring_size = mif->cfg.log2_ring_size;
608 mq->offset = (void *) mq->ring - (void *) mif->regions[mq->region].shm;
610 mq->type = MEMIF_RING_S2M;
611 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
612 vec_validate_aligned (mq->buffers, 1 << mq->log2_ring_size,
613 CLIB_CACHE_LINE_BYTES);
617 ASSERT (mif->rx_queues == 0);
618 vec_validate_aligned (mif->rx_queues, mif->run.num_m2s_rings - 1,
619 CLIB_CACHE_LINE_BYTES);
622 vec_foreach_index (i, mif->rx_queues)
624 memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, i);
625 if ((mq->int_fd = eventfd (0, EFD_NONBLOCK)) < 0)
627 err = clib_error_return_unix (0, "eventfd[rx queue %u]", i);
630 mq->int_clib_file_index = ~0;
631 mq->ring = memif_get_ring (mif, MEMIF_RING_M2S, i);
632 mq->log2_ring_size = mif->cfg.log2_ring_size;
634 mq->offset = (void *) mq->ring - (void *) mif->regions[mq->region].shm;
636 mq->type = MEMIF_RING_M2S;
637 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
638 vec_validate_aligned (mq->buffers, 1 << mq->log2_ring_size,
639 CLIB_CACHE_LINE_BYTES);
646 memif_log_err (mif, "%U", format_clib_error, err);
651 memif_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
653 memif_main_t *mm = &memif_main;
656 uword *event_data = 0, event_type;
658 f64 start_time, last_run_duration = 0, now;
661 sock = clib_mem_alloc (sizeof (clib_socket_t));
662 clib_memset (sock, 0, sizeof (clib_socket_t));
667 vlib_process_wait_for_event_or_clock (vm, (f64) 3 -
670 vlib_process_wait_for_event (vm);
672 event_type = vlib_process_get_events (vm, &event_data);
673 vec_reset_length (event_data);
679 case MEMIF_PROCESS_EVENT_START:
682 case MEMIF_PROCESS_EVENT_STOP:
685 case MEMIF_PROCESS_EVENT_ADMIN_UP_DOWN:
691 last_run_duration = start_time = vlib_time_now (vm);
693 pool_foreach (mif, mm->interfaces)
695 memif_socket_file_t * msf = vec_elt_at_index (mm->socket_files, mif->socket_file_index);
696 /* Allow no more than 10us without a pause */
697 now = vlib_time_now (vm);
698 if (now > start_time + 10e-6)
700 vlib_process_suspend (vm, 100e-6); /* suspend for 100 us */
701 start_time = vlib_time_now (vm);
704 if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) == 0)
707 if (mif->flags & MEMIF_IF_FLAG_CONNECTING)
710 if (mif->flags & MEMIF_IF_FLAG_CONNECTED)
713 if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
715 clib_memset (sock, 0, sizeof(clib_socket_t));
716 sock->config = (char *) msf->filename;
717 sock->is_seqpacket = 1;
718 sock->is_blocking = 1;
720 if ((err = clib_socket_init (sock)))
722 clib_error_free (err);
726 clib_file_t t = { 0 };
728 t.read_function = memif_slave_conn_fd_read_ready;
729 t.write_function = memif_slave_conn_fd_write_ready;
730 t.error_function = memif_slave_conn_fd_error;
731 t.file_descriptor = sock->fd;
732 t.private_data = mif->dev_instance;
733 memif_file_add (&sock->private_data, &t);
734 t.description = format (0, "%U ctl",
735 format_memif_device_name,
737 hash_set (msf->dev_instance_by_fd, sock->fd, mif->dev_instance);
739 mif->flags |= MEMIF_IF_FLAG_CONNECTING;
741 sock = clib_mem_alloc (sizeof(clib_socket_t));
746 last_run_duration = vlib_time_now (vm) - last_run_duration;
751 VLIB_REGISTER_NODE (memif_process_node,static) = {
752 .function = memif_process,
753 .type = VLIB_NODE_TYPE_PROCESS,
754 .name = "memif-process",
758 * Returns an unused socket id, and ~0 if it can't find one.
761 memif_get_unused_socket_id ()
763 memif_main_t *mm = &memif_main;
768 /* limit to 1M tries */
769 for (j = 0; j < 1 << 10; j++)
771 seed = random_u32 (&seed);
772 for (i = 0; i < 1 << 10; i++)
774 /* look around randomly generated id */
775 seed += (2 * (i % 2) - 1) * i;
776 if (seed == (u32) ~0)
778 p = hash_get (mm->socket_file_index_by_sock_id, seed);
788 memif_socket_filename_add_del (u8 is_add, u32 sock_id, char *sock_filename)
790 memif_main_t *mm = &memif_main;
792 memif_socket_file_t *msf;
793 clib_error_t *err = 0;
798 /* allow adding socket id 0 */
799 if (sock_id == 0 && is_add == 0)
800 return vnet_error (VNET_ERR_INVALID_ARGUMENT, "cannot delete socket id 0");
803 return vnet_error (VNET_ERR_INVALID_ARGUMENT,
804 "socked id is not specified");
808 p = hash_get (mm->socket_file_index_by_sock_id, sock_id);
810 /* Don't delete non-existent entries. */
811 return vnet_error (VNET_ERR_INVALID_ARGUMENT,
812 "socket file with id %u does not exist", sock_id);
814 msf = pool_elt_at_index (mm->socket_files, *p);
815 if (msf->ref_cnt > 0)
816 return vnet_error (VNET_ERR_UNEXPECTED_INTF_STATE,
817 "socket file '%s' is in use", msf->filename);
819 vec_free (msf->filename);
820 pool_put (mm->socket_files, msf);
822 hash_unset (mm->socket_file_index_by_sock_id, sock_id);
827 if (sock_filename == 0 || sock_filename[0] == 0)
828 return vnet_error (VNET_ERR_INVALID_ARGUMENT,
829 "socket filename not specified");
831 if (clib_socket_prefix_is_valid (sock_filename))
833 name = format (0, "%s%c", sock_filename, 0);
835 else if (sock_filename[0] == '/')
837 name = format (0, "%s%c", sock_filename, 0);
841 /* copy runtime dir path */
842 vec_add (dir, vlib_unix_get_runtime_dir (),
843 strlen (vlib_unix_get_runtime_dir ()));
846 /* if sock_filename contains dirs, add them to path */
847 tmp = strrchr (sock_filename, '/');
850 idx = tmp - sock_filename;
851 vec_add (dir, sock_filename, idx);
854 vec_add1 (dir, '\0');
855 /* create socket dir */
856 if ((err = vlib_unix_recursive_mkdir (dir)))
858 clib_error_free (err);
859 err = vnet_error (VNET_ERR_SYSCALL_ERROR_1,
860 "unable to create socket dir");
865 format (0, "%s/%s%c", vlib_unix_get_runtime_dir (), sock_filename, 0);
868 p = hash_get (mm->socket_file_index_by_sock_id, sock_id);
871 msf = pool_elt_at_index (mm->socket_files, *p);
872 if (strcmp ((char *) msf->filename, (char *) name) == 0)
874 /* Silently accept identical "add". */
878 /* But don't allow a direct add of a different filename. */
879 err = vnet_error (VNET_ERR_ENTRY_ALREADY_EXISTS, "entry already exists");
883 pool_get (mm->socket_files, msf);
884 clib_memset (msf, 0, sizeof (memif_socket_file_t));
886 msf->filename = name;
887 msf->socket_id = sock_id;
890 hash_set (mm->socket_file_index_by_sock_id, sock_id, msf - mm->socket_files);
899 memif_delete_if (vlib_main_t *vm, memif_if_t *mif)
901 vnet_main_t *vnm = vnet_get_main ();
902 memif_main_t *mm = &memif_main;
903 memif_socket_file_t *msf =
904 vec_elt_at_index (mm->socket_files, mif->socket_file_index);
907 mif->flags |= MEMIF_IF_FLAG_DELETING;
908 vec_free (mif->local_disc_string);
909 vec_free (mif->remote_disc_string);
911 /* bring down the interface */
912 vnet_hw_interface_set_flags (vnm, mif->hw_if_index, 0);
913 vnet_sw_interface_set_flags (vnm, mif->sw_if_index, 0);
915 err = clib_error_return (0, "interface deleted");
916 memif_disconnect (mif, err);
917 clib_error_free (err);
919 if (mif->hw_if_index != ~0)
921 /* remove the interface */
922 if (mif->mode == MEMIF_INTERFACE_MODE_IP)
923 vnet_delete_hw_interface (vnm, mif->hw_if_index);
925 ethernet_delete_interface (vnm, mif->hw_if_index);
926 mif->hw_if_index = ~0;
929 /* free interface data structures */
930 mhash_unset (&msf->dev_instance_by_id, &mif->id, 0);
932 /* remove socket file */
933 if (--(msf->ref_cnt) == 0)
935 if (msf->is_listener)
939 vec_foreach_index (i, msf->pending_clients)
940 memif_socket_close (msf->pending_clients + i);
942 memif_socket_close (&msf->sock);
943 vec_free (msf->pending_clients);
945 mhash_free (&msf->dev_instance_by_id);
946 hash_free (msf->dev_instance_by_fd);
949 err = clib_socket_close (msf->sock);
952 memif_log_err (mif, "%U", format_clib_error, err);
953 clib_error_free (err);
955 clib_mem_free (msf->sock);
959 vec_free (mif->local_disc_string);
960 clib_memset (mif, 0, sizeof (*mif));
961 pool_put (mm->interfaces, mif);
963 if (pool_elts (mm->interfaces) == 0)
964 vlib_process_signal_event (vm, memif_process_node.index,
965 MEMIF_PROCESS_EVENT_STOP, 0);
971 VNET_HW_INTERFACE_CLASS (memif_ip_hw_if_class, static) = {
973 .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
974 .tx_hash_fn_type = VNET_HASH_FN_TYPE_IP,
979 memif_prepare_dma_args (vlib_dma_config_t *args)
981 args->max_batches = 256;
982 args->max_transfer_size = VLIB_BUFFER_DEFAULT_DATA_SIZE;
983 args->barrier_before_last = 1;
984 args->sw_fallback = 1;
985 args->callback_fn = NULL;
989 memif_create_if (vlib_main_t *vm, memif_create_if_args_t *args)
991 memif_main_t *mm = &memif_main;
992 vlib_thread_main_t *tm = vlib_get_thread_main ();
993 vnet_main_t *vnm = vnet_get_main ();
994 vnet_eth_interface_registration_t eir = {};
996 vnet_sw_interface_t *sw;
998 memif_socket_file_t *msf = 0;
999 clib_error_t *err = 0;
1001 p = hash_get (mm->socket_file_index_by_sock_id, args->socket_id);
1004 err = vnet_error (VNET_ERR_INVALID_ARGUMENT, "unknown socket id");
1008 msf = vec_elt_at_index (mm->socket_files, p[0]);
1010 /* existing socket file can be either master or slave but cannot be both */
1011 if (msf->ref_cnt > 0)
1013 if ((!msf->is_listener != !args->is_master))
1016 vnet_error (VNET_ERR_SUBIF_ALREADY_EXISTS,
1017 "socket file cannot be used by both master and slave");
1021 p = mhash_get (&msf->dev_instance_by_id, &args->id);
1024 err = vnet_error (VNET_ERR_SUBIF_ALREADY_EXISTS,
1025 "interface already exists");
1030 /* Create new socket file */
1031 if (msf->ref_cnt == 0)
1033 mhash_init (&msf->dev_instance_by_id, sizeof (uword),
1034 sizeof (memif_interface_id_t));
1035 msf->dev_instance_by_fd = hash_create (0, sizeof (uword));
1036 msf->is_listener = (args->is_master != 0);
1038 memif_log_debug (0, "initializing socket file %s", msf->filename);
1041 if (mm->per_thread_data == 0)
1045 vec_validate_aligned (mm->per_thread_data, tm->n_vlib_mains - 1,
1046 CLIB_CACHE_LINE_BYTES);
1048 for (i = 0; i < tm->n_vlib_mains; i++)
1050 memif_per_thread_data_t *ptd =
1051 vec_elt_at_index (mm->per_thread_data, i);
1052 vlib_buffer_t *bt = &ptd->buffer_template;
1053 clib_memset (bt, 0, sizeof (vlib_buffer_t));
1054 bt->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
1055 bt->total_length_not_including_first_buffer = 0;
1056 vnet_buffer (bt)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1058 vec_validate_aligned (ptd->copy_ops, 0, CLIB_CACHE_LINE_BYTES);
1059 vec_reset_length (ptd->copy_ops);
1060 vec_validate_aligned (ptd->buffers, 0, CLIB_CACHE_LINE_BYTES);
1061 vec_reset_length (ptd->buffers);
1065 pool_get (mm->interfaces, mif);
1066 clib_memset (mif, 0, sizeof (*mif));
1067 mif->dev_instance = mif - mm->interfaces;
1068 mif->socket_file_index = msf - mm->socket_files;
1070 mif->sw_if_index = mif->hw_if_index = mif->per_interface_next_index = ~0;
1071 mif->mode = args->mode;
1073 mif->secret = vec_dup (args->secret);
1075 /* register dma config if enabled */
1078 vlib_dma_config_t dma_args;
1079 bzero (&dma_args, sizeof (dma_args));
1080 memif_prepare_dma_args (&dma_args);
1082 dma_args.max_transfers = 1 << args->log2_ring_size;
1083 dma_args.callback_fn = memif_dma_completion_cb;
1084 mif->dma_input_config = vlib_dma_config_add (vm, &dma_args);
1085 dma_args.callback_fn = memif_tx_dma_completion_cb;
1086 mif->dma_tx_config = vlib_dma_config_add (vm, &dma_args);
1089 if (mif->mode == MEMIF_INTERFACE_MODE_ETHERNET)
1092 if (!args->hw_addr_set)
1094 f64 now = vlib_time_now (vm);
1096 rnd = (u32) (now * 1e6);
1097 rnd = random_u32 (&rnd);
1099 memcpy (args->hw_addr + 2, &rnd, sizeof (rnd));
1100 args->hw_addr[0] = 2;
1101 args->hw_addr[1] = 0xfe;
1104 eir.dev_class_index = memif_device_class.index;
1105 eir.dev_instance = mif->dev_instance;
1106 eir.address = args->hw_addr;
1107 eir.cb.flag_change = memif_eth_flag_change;
1108 eir.cb.set_max_frame_size = memif_eth_set_max_frame_size;
1109 mif->hw_if_index = vnet_eth_register_interface (vnm, &eir);
1111 else if (mif->mode == MEMIF_INTERFACE_MODE_IP)
1114 vnet_register_interface (vnm, memif_device_class.index,
1116 memif_ip_hw_if_class.index,
1122 vnet_error (VNET_ERR_SYSCALL_ERROR_2, "unsupported interface mode");
1126 sw = vnet_get_hw_sw_interface (vnm, mif->hw_if_index);
1127 mif->sw_if_index = sw->sw_if_index;
1129 mif->cfg.log2_ring_size = args->log2_ring_size;
1130 mif->cfg.buffer_size = args->buffer_size;
1131 mif->cfg.num_s2m_rings =
1132 args->is_master ? args->rx_queues : args->tx_queues;
1133 mif->cfg.num_m2s_rings =
1134 args->is_master ? args->tx_queues : args->rx_queues;
1136 args->sw_if_index = mif->sw_if_index;
1138 /* If this is new one, start listening */
1139 if (msf->is_listener && msf->ref_cnt == 0)
1141 clib_socket_t *s = clib_mem_alloc (sizeof (clib_socket_t));
1143 ASSERT (msf->sock == 0);
1146 clib_memset (s, 0, sizeof (clib_socket_t));
1147 s->config = (char *) msf->filename;
1150 s->allow_group_write = 1;
1151 s->is_seqpacket = 1;
1154 if ((err = clib_socket_init (s)))
1156 err->code = VNET_ERR_SYSCALL_ERROR_4;
1160 clib_file_t template = { 0 };
1161 template.read_function = memif_conn_fd_accept_ready;
1162 template.file_descriptor = msf->sock->fd;
1163 template.private_data = mif->socket_file_index;
1164 template.description = format (0, "memif listener %s", msf->filename);
1165 memif_file_add (&msf->sock->private_data, &template);
1170 if (args->is_master == 0)
1172 mif->flags |= MEMIF_IF_FLAG_IS_SLAVE;
1173 if (args->is_zero_copy)
1174 mif->flags |= MEMIF_IF_FLAG_ZERO_COPY;
1178 mif->flags |= MEMIF_IF_FLAG_USE_DMA;
1180 vnet_hw_if_set_caps (vnm, mif->hw_if_index, VNET_HW_IF_CAP_INT_MODE);
1181 vnet_hw_if_set_input_node (vnm, mif->hw_if_index, memif_input_node.index);
1182 mhash_set (&msf->dev_instance_by_id, &mif->id, mif->dev_instance, 0);
1184 if (pool_elts (mm->interfaces) == 1)
1186 vlib_process_signal_event (vm, memif_process_node.index,
1187 MEMIF_PROCESS_EVENT_START, 0);
1192 memif_delete_if (vm, mif);
1194 memif_log_err (mif, "%U", format_clib_error, err);
1202 memif_interface_admin_up_down (vnet_main_t *vnm, u32 hw_if_index, u32 flags)
1204 memif_main_t *mm = &memif_main;
1205 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1206 memif_if_t *mif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
1207 static clib_error_t *error = 0;
1209 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
1211 if (mif->flags & MEMIF_IF_FLAG_CONNECTED)
1213 vnet_hw_interface_set_flags (vnm, mif->hw_if_index,
1214 VNET_HW_INTERFACE_FLAG_LINK_UP);
1216 mif->flags |= MEMIF_IF_FLAG_ADMIN_UP;
1219 mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
1221 vlib_process_signal_event (vnm->vlib_main, memif_process_node.index,
1222 MEMIF_PROCESS_EVENT_ADMIN_UP_DOWN, 0);
1226 static clib_error_t *
1227 memif_init (vlib_main_t * vm)
1229 memif_main_t *mm = &memif_main;
1231 clib_memset (mm, 0, sizeof (memif_main_t));
1233 mm->log_class = vlib_log_register_class ("memif_plugin", 0);
1234 memif_log_debug (0, "initialized");
1236 /* initialize binary API */
1237 memif_plugin_api_hookup (vm);
1240 * Pre-stuff socket filename pool with a non-modifieable mapping
1241 * for socket-id 0 to MEMIF_DEFAULT_SOCKET_FILENAME in the
1242 * default run-time directory.
1244 return memif_socket_filename_add_del (1, 0, MEMIF_DEFAULT_SOCKET_FILENAME);
1247 VLIB_INIT_FUNCTION (memif_init);
1250 VLIB_PLUGIN_REGISTER () = {
1251 .version = VPP_BUILD_VER,
1252 .description = "Packet Memory Interface (memif) -- Experimental",
1257 * fd.io coding-style-patch-verification: ON
1260 * eval: (c-set-style "gnu")