2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __VIRTIO_VHOST_USER_INLINE_H__
16 #define __VIRTIO_VHOST_USER_INLINE_H__
17 /* vhost-user inline functions */
18 #include <vppinfra/elog.h>
20 static_always_inline void *
21 map_guest_mem (vhost_user_intf_t * vui, uword addr, u32 * hint)
24 if (PREDICT_TRUE ((vui->regions[i].guest_phys_addr <= addr) &&
25 ((vui->regions[i].guest_phys_addr +
26 vui->regions[i].memory_size) > addr)))
28 return (void *) (vui->region_mmap_addr[i] + addr -
29 vui->regions[i].guest_phys_addr);
32 __m128i rl, rh, al, ah, r;
33 al = _mm_set1_epi64x (addr + 1);
34 ah = _mm_set1_epi64x (addr);
36 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[0]);
37 rl = _mm_cmpgt_epi64 (al, rl);
38 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[0]);
39 rh = _mm_cmpgt_epi64 (rh, ah);
40 r = _mm_and_si128 (rl, rh);
42 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[2]);
43 rl = _mm_cmpgt_epi64 (al, rl);
44 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[2]);
45 rh = _mm_cmpgt_epi64 (rh, ah);
46 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
48 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[4]);
49 rl = _mm_cmpgt_epi64 (al, rl);
50 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[4]);
51 rh = _mm_cmpgt_epi64 (rh, ah);
52 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
54 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[6]);
55 rl = _mm_cmpgt_epi64 (al, rl);
56 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[6]);
57 rh = _mm_cmpgt_epi64 (rh, ah);
58 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
60 r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
61 i = count_trailing_zeros (_mm_movemask_epi8 (r) |
62 (1 << VHOST_MEMORY_MAX_NREGIONS));
64 if (i < vui->nregions)
67 return (void *) (vui->region_mmap_addr[i] + addr -
68 vui->regions[i].guest_phys_addr);
70 #elif __aarch64__ && __ARM_NEON
71 uint64x2_t al, ah, rl, rh, r;
74 al = vdupq_n_u64 (addr + 1);
75 ah = vdupq_n_u64 (addr);
78 rl = vld1q_u64 (&vui->region_guest_addr_lo[0]);
79 rl = vcgtq_u64 (al, rl);
80 rh = vld1q_u64 (&vui->region_guest_addr_hi[0]);
81 rh = vcgtq_u64 (rh, ah);
82 r = vandq_u64 (rl, rh);
83 u32 |= (vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1);
84 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 1);
88 i = count_trailing_zeros (u32);
89 goto vhost_map_guest_mem_done;
93 rl = vld1q_u64 (&vui->region_guest_addr_lo[2]);
94 rl = vcgtq_u64 (al, rl);
95 rh = vld1q_u64 (&vui->region_guest_addr_hi[2]);
96 rh = vcgtq_u64 (rh, ah);
97 r = vandq_u64 (rl, rh);
98 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 2);
99 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 3);
103 i = count_trailing_zeros (u32);
104 goto vhost_map_guest_mem_done;
108 rl = vld1q_u64 (&vui->region_guest_addr_lo[4]);
109 rl = vcgtq_u64 (al, rl);
110 rh = vld1q_u64 (&vui->region_guest_addr_hi[4]);
111 rh = vcgtq_u64 (rh, ah);
112 r = vandq_u64 (rl, rh);
113 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 6);
114 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 7);
116 i = count_trailing_zeros (u32 | (1 << VHOST_MEMORY_MAX_NREGIONS));
118 vhost_map_guest_mem_done:
119 if (i < vui->nregions)
122 return (void *) (vui->region_mmap_addr[i] + addr -
123 vui->regions[i].guest_phys_addr);
126 for (i = 0; i < vui->nregions; i++)
128 if ((vui->regions[i].guest_phys_addr <= addr) &&
129 ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
133 return (void *) (vui->region_mmap_addr[i] + addr -
134 vui->regions[i].guest_phys_addr);
138 ELOG_TYPE_DECLARE (el) =
140 .format = "failed to map guest mem addr %lx",
147 ed = ELOG_DATA (&vlib_global_main.elog_main, el);
153 static_always_inline void *
154 map_user_mem (vhost_user_intf_t * vui, uword addr)
157 for (i = 0; i < vui->nregions; i++)
159 if ((vui->regions[i].userspace_addr <= addr) &&
160 ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
163 return (void *) (vui->region_mmap_addr[i] + addr -
164 vui->regions[i].userspace_addr);
170 #define VHOST_LOG_PAGE 0x1000
172 static_always_inline void
173 vhost_user_log_dirty_pages_2 (vhost_user_intf_t * vui,
174 u64 addr, u64 len, u8 is_host_address)
176 if (PREDICT_TRUE (vui->log_base_addr == 0
177 || !(vui->features & VIRTIO_FEATURE (VHOST_F_LOG_ALL))))
183 addr = pointer_to_uword (map_user_mem (vui, (uword) addr));
185 if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
187 vu_log_debug (vui, "vhost_user_log_dirty_pages(): out of range\n");
191 CLIB_MEMORY_BARRIER ();
192 u64 page = addr / VHOST_LOG_PAGE;
193 while (page * VHOST_LOG_PAGE < addr + len)
195 ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
200 #define vhost_user_log_dirty_ring(vui, vq, member) \
201 if (PREDICT_FALSE (vq->log_used)) \
203 vhost_user_log_dirty_pages_2 ( \
205 vq->log_guest_addr + \
206 STRUCT_OFFSET_OF (vnet_virtio_vring_used_t, member), \
207 sizeof (vq->used->member), 0); \
210 static_always_inline u8 *
211 format_vhost_trace (u8 * s, va_list * va)
213 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
214 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
215 CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
216 vhost_user_main_t *vum = &vhost_user_main;
217 vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
218 vhost_user_intf_t *vui = vum->vhost_user_interfaces + t->device_index;
219 vnet_sw_interface_t *sw;
222 if (pool_is_free (vum->vhost_user_interfaces, vui))
224 s = format (s, "vhost-user interface is deleted");
227 sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
228 indent = format_get_indent (s);
229 s = format (s, "%U %U queue %d\n", format_white_space, indent,
230 format_vnet_sw_interface_name, vnm, sw, t->qid);
232 s = format (s, "%U virtio flags:\n", format_white_space, indent);
234 if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \
235 s = format (s, "%U %s %s\n", format_white_space, indent, #n, st);
236 foreach_virtio_trace_flags
238 s = format (s, "%U virtio_net_hdr first_desc_len %u\n",
239 format_white_space, indent, t->first_desc_len);
241 s = format (s, "%U flags 0x%02x gso_type %u\n",
242 format_white_space, indent,
243 t->hdr.hdr.flags, t->hdr.hdr.gso_type);
245 if (vui->virtio_net_hdr_sz == 12)
246 s = format (s, "%U num_buff %u",
247 format_white_space, indent, t->hdr.num_buffers);
252 static_always_inline u64
253 vhost_user_is_packed_ring_supported (vhost_user_intf_t * vui)
255 return (vui->features & VIRTIO_FEATURE (VIRTIO_F_RING_PACKED));
258 static_always_inline u64
259 vhost_user_is_event_idx_supported (vhost_user_intf_t * vui)
261 return (vui->features & VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX));
264 static_always_inline void
265 vhost_user_kick (vlib_main_t * vm, vhost_user_vring_t * vq)
267 vhost_user_main_t *vum = &vhost_user_main;
269 int fd = UNIX_GET_FD (vq->callfd_idx);
272 rv = write (fd, &x, sizeof (x));
273 if (PREDICT_FALSE (rv <= 0))
276 ("Error: Could not write to unix socket for callfd %d", fd);
280 vq->n_since_last_int = 0;
281 vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
284 static_always_inline u16
285 vhost_user_avail_event_idx (vhost_user_vring_t * vq)
287 volatile u16 *event_idx = (u16 *) & (vq->used->ring[vq->qsz_mask + 1]);
292 static_always_inline u16
293 vhost_user_used_event_idx (vhost_user_vring_t * vq)
295 volatile u16 *event_idx = (u16 *) & (vq->avail->ring[vq->qsz_mask + 1]);
300 static_always_inline u16
301 vhost_user_need_event (u16 event_idx, u16 new_idx, u16 old_idx)
303 return ((u16) (new_idx - event_idx - 1) < (u16) (new_idx - old_idx));
306 static_always_inline void
307 vhost_user_send_call_event_idx (vlib_main_t * vm, vhost_user_vring_t * vq)
309 vhost_user_main_t *vum = &vhost_user_main;
310 u8 first_kick = vq->first_kick;
311 u16 event_idx = vhost_user_used_event_idx (vq);
314 if (vhost_user_need_event (event_idx, vq->last_used_idx, vq->last_kick) ||
315 PREDICT_FALSE (!first_kick))
317 vhost_user_kick (vm, vq);
318 vq->last_kick = event_idx;
322 vq->n_since_last_int = 0;
323 vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
327 static_always_inline void
328 vhost_user_send_call_event_idx_packed (vlib_main_t * vm,
329 vhost_user_vring_t * vq)
331 vhost_user_main_t *vum = &vhost_user_main;
332 u8 first_kick = vq->first_kick;
335 u16 new_idx = vq->last_used_idx;
336 u16 old_idx = vq->last_kick;
338 if (PREDICT_TRUE (vq->avail_event->flags == VRING_EVENT_F_DESC))
340 CLIB_COMPILER_BARRIER ();
341 off_wrap = vq->avail_event->off_wrap;
342 event_idx = off_wrap & 0x7fff;
343 if (vq->used_wrap_counter != (off_wrap >> 15))
344 event_idx -= (vq->qsz_mask + 1);
346 if (new_idx <= old_idx)
347 old_idx -= (vq->qsz_mask + 1);
350 vq->last_kick = event_idx;
351 if (vhost_user_need_event (event_idx, new_idx, old_idx) ||
352 PREDICT_FALSE (!first_kick))
353 vhost_user_kick (vm, vq);
356 vq->n_since_last_int = 0;
357 vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
361 vhost_user_kick (vm, vq);
364 static_always_inline void
365 vhost_user_send_call (vlib_main_t * vm, vhost_user_intf_t * vui,
366 vhost_user_vring_t * vq)
368 if (vhost_user_is_event_idx_supported (vui))
370 if (vhost_user_is_packed_ring_supported (vui))
371 vhost_user_send_call_event_idx_packed (vm, vq);
373 vhost_user_send_call_event_idx (vm, vq);
376 vhost_user_kick (vm, vq);
379 static_always_inline u8
380 vui_is_link_up (vhost_user_intf_t * vui)
382 return vui->admin_up && vui->is_ready;
385 static_always_inline void
386 vhost_user_update_gso_interface_count (vhost_user_intf_t * vui, u8 add)
388 vhost_user_main_t *vum = &vhost_user_main;
398 ASSERT (vum->gso_count > 0);
404 static_always_inline u8
405 vhost_user_packed_desc_available (vhost_user_vring_t * vring, u16 idx)
407 return (((vring->packed_desc[idx].flags & VRING_DESC_F_AVAIL) ==
408 vring->avail_wrap_counter));
411 static_always_inline void
412 vhost_user_advance_last_avail_idx (vhost_user_vring_t * vring)
414 vring->last_avail_idx++;
415 if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
417 vring->avail_wrap_counter ^= VRING_DESC_F_AVAIL;
418 vring->last_avail_idx = 0;
422 static_always_inline void
423 vhost_user_advance_last_avail_table_idx (vhost_user_intf_t * vui,
424 vhost_user_vring_t * vring,
429 vnet_virtio_vring_packed_desc_t *desc_table = vring->packed_desc;
431 /* pick up the slot of the next avail idx */
432 while (desc_table[vring->last_avail_idx & vring->qsz_mask].flags &
434 vhost_user_advance_last_avail_idx (vring);
437 vhost_user_advance_last_avail_idx (vring);
440 static_always_inline void
441 vhost_user_undo_advanced_last_avail_idx (vhost_user_vring_t * vring)
443 if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
444 vring->avail_wrap_counter ^= VRING_DESC_F_AVAIL;
446 if (PREDICT_FALSE (vring->last_avail_idx == 0))
447 vring->last_avail_idx = vring->qsz_mask;
449 vring->last_avail_idx--;
452 static_always_inline void
453 vhost_user_dequeue_descs (vhost_user_vring_t *rxvq,
454 vnet_virtio_net_hdr_mrg_rxbuf_t *hdr,
455 u16 *n_descs_processed)
459 *n_descs_processed -= (hdr->num_buffers - 1);
460 for (i = 0; i < hdr->num_buffers - 1; i++)
461 vhost_user_undo_advanced_last_avail_idx (rxvq);
464 static_always_inline void
465 vhost_user_dequeue_chained_descs (vhost_user_vring_t * rxvq,
466 u16 * n_descs_processed)
468 while (*n_descs_processed)
470 vhost_user_undo_advanced_last_avail_idx (rxvq);
471 (*n_descs_processed)--;
475 static_always_inline void
476 vhost_user_advance_last_used_idx (vhost_user_vring_t * vring)
478 vring->last_used_idx++;
479 if (PREDICT_FALSE ((vring->last_used_idx & vring->qsz_mask) == 0))
481 vring->used_wrap_counter ^= 1;
482 vring->last_used_idx = 0;
489 * fd.io coding-style-patch-verification: ON
492 * eval: (c-set-style "gnu")