2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __VIRTIO_VHOST_USER_INLINE_H__
16 #define __VIRTIO_VHOST_USER_INLINE_H__
17 /* vhost-user inline functions */
18 #include <vppinfra/elog.h>
20 static_always_inline void *
21 map_guest_mem (vhost_user_intf_t * vui, uword addr, u32 * hint)
24 if (PREDICT_TRUE ((vui->regions[i].guest_phys_addr <= addr) &&
25 ((vui->regions[i].guest_phys_addr +
26 vui->regions[i].memory_size) > addr)))
28 return (void *) (vui->region_mmap_addr[i] + addr -
29 vui->regions[i].guest_phys_addr);
32 __m128i rl, rh, al, ah, r;
33 al = _mm_set1_epi64x (addr + 1);
34 ah = _mm_set1_epi64x (addr);
36 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[0]);
37 rl = _mm_cmpgt_epi64 (al, rl);
38 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[0]);
39 rh = _mm_cmpgt_epi64 (rh, ah);
40 r = _mm_and_si128 (rl, rh);
42 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[2]);
43 rl = _mm_cmpgt_epi64 (al, rl);
44 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[2]);
45 rh = _mm_cmpgt_epi64 (rh, ah);
46 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
48 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[4]);
49 rl = _mm_cmpgt_epi64 (al, rl);
50 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[4]);
51 rh = _mm_cmpgt_epi64 (rh, ah);
52 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
54 rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[6]);
55 rl = _mm_cmpgt_epi64 (al, rl);
56 rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[6]);
57 rh = _mm_cmpgt_epi64 (rh, ah);
58 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
60 r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
61 i = count_trailing_zeros (_mm_movemask_epi8 (r) |
62 (1 << VHOST_MEMORY_MAX_NREGIONS));
64 if (i < vui->nregions)
67 return (void *) (vui->region_mmap_addr[i] + addr -
68 vui->regions[i].guest_phys_addr);
70 #elif __aarch64__ && __ARM_NEON
71 uint64x2_t al, ah, rl, rh, r;
74 al = vdupq_n_u64 (addr + 1);
75 ah = vdupq_n_u64 (addr);
78 rl = vld1q_u64 (&vui->region_guest_addr_lo[0]);
79 rl = vcgtq_u64 (al, rl);
80 rh = vld1q_u64 (&vui->region_guest_addr_hi[0]);
81 rh = vcgtq_u64 (rh, ah);
82 r = vandq_u64 (rl, rh);
83 u32 |= (vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1);
84 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 1);
88 i = count_trailing_zeros (u32);
89 goto vhost_map_guest_mem_done;
93 rl = vld1q_u64 (&vui->region_guest_addr_lo[2]);
94 rl = vcgtq_u64 (al, rl);
95 rh = vld1q_u64 (&vui->region_guest_addr_hi[2]);
96 rh = vcgtq_u64 (rh, ah);
97 r = vandq_u64 (rl, rh);
98 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 2);
99 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 3);
103 i = count_trailing_zeros (u32);
104 goto vhost_map_guest_mem_done;
108 rl = vld1q_u64 (&vui->region_guest_addr_lo[4]);
109 rl = vcgtq_u64 (al, rl);
110 rh = vld1q_u64 (&vui->region_guest_addr_hi[4]);
111 rh = vcgtq_u64 (rh, ah);
112 r = vandq_u64 (rl, rh);
113 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 6);
114 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 7);
116 i = count_trailing_zeros (u32 | (1 << VHOST_MEMORY_MAX_NREGIONS));
118 vhost_map_guest_mem_done:
119 if (i < vui->nregions)
122 return (void *) (vui->region_mmap_addr[i] + addr -
123 vui->regions[i].guest_phys_addr);
126 for (i = 0; i < vui->nregions; i++)
128 if ((vui->regions[i].guest_phys_addr <= addr) &&
129 ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
133 return (void *) (vui->region_mmap_addr[i] + addr -
134 vui->regions[i].guest_phys_addr);
139 ELOG_TYPE_DECLARE (el) =
141 .format = "failed to map guest mem addr %lx",
149 ed = ELOG_DATA (&vlib_global_main.elog_main, el);
155 static_always_inline void *
156 map_user_mem (vhost_user_intf_t * vui, uword addr)
159 for (i = 0; i < vui->nregions; i++)
161 if ((vui->regions[i].userspace_addr <= addr) &&
162 ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
165 return (void *) (vui->region_mmap_addr[i] + addr -
166 vui->regions[i].userspace_addr);
172 #define VHOST_LOG_PAGE 0x1000
174 static_always_inline void
175 vhost_user_log_dirty_pages_2 (vhost_user_intf_t * vui,
176 u64 addr, u64 len, u8 is_host_address)
178 if (PREDICT_TRUE (vui->log_base_addr == 0
179 || !(vui->features & (1 << FEAT_VHOST_F_LOG_ALL))))
185 addr = pointer_to_uword (map_user_mem (vui, (uword) addr));
187 if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
189 vu_log_debug (vui, "vhost_user_log_dirty_pages(): out of range\n");
193 CLIB_MEMORY_BARRIER ();
194 u64 page = addr / VHOST_LOG_PAGE;
195 while (page * VHOST_LOG_PAGE < addr + len)
197 ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
203 #define vhost_user_log_dirty_ring(vui, vq, member) \
204 if (PREDICT_FALSE(vq->log_used)) { \
205 vhost_user_log_dirty_pages_2(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \
206 sizeof(vq->used->member), 0); \
209 static_always_inline u8 *
210 format_vhost_trace (u8 * s, va_list * va)
212 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
213 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
214 CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
215 vhost_user_main_t *vum = &vhost_user_main;
216 vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
217 vhost_user_intf_t *vui = vum->vhost_user_interfaces + t->device_index;
218 vnet_sw_interface_t *sw;
221 if (pool_is_free (vum->vhost_user_interfaces, vui))
223 s = format (s, "vhost-user interface is deleted");
226 sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
227 indent = format_get_indent (s);
228 s = format (s, "%U %U queue %d\n", format_white_space, indent,
229 format_vnet_sw_interface_name, vnm, sw, t->qid);
231 s = format (s, "%U virtio flags:\n", format_white_space, indent);
233 if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \
234 s = format (s, "%U %s %s\n", format_white_space, indent, #n, st);
235 foreach_virtio_trace_flags
237 s = format (s, "%U virtio_net_hdr first_desc_len %u\n",
238 format_white_space, indent, t->first_desc_len);
240 s = format (s, "%U flags 0x%02x gso_type %u\n",
241 format_white_space, indent,
242 t->hdr.hdr.flags, t->hdr.hdr.gso_type);
244 if (vui->virtio_net_hdr_sz == 12)
245 s = format (s, "%U num_buff %u",
246 format_white_space, indent, t->hdr.num_buffers);
251 static_always_inline void
252 vhost_user_send_call (vlib_main_t * vm, vhost_user_vring_t * vq)
254 vhost_user_main_t *vum = &vhost_user_main;
256 int fd = UNIX_GET_FD (vq->callfd_idx);
259 rv = write (fd, &x, sizeof (x));
263 ("Error: Could not write to unix socket for callfd %d", fd);
267 vq->n_since_last_int = 0;
268 vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
271 static_always_inline u8
272 vui_is_link_up (vhost_user_intf_t * vui)
274 return vui->admin_up && vui->is_ready;
277 static_always_inline void
278 vhost_user_update_gso_interface_count (vhost_user_intf_t * vui, u8 add)
280 vhost_user_main_t *vum = &vhost_user_main;
290 ASSERT (vum->gso_count > 0);
298 * fd.io coding-style-patch-verification: ON
301 * eval: (c-set-style "gnu")