1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2022 Cisco Systems, Inc.
3 * Copyright (c) 2022 Intel and/or its affiliates.
7 #include <vlib/pci/pci.h>
8 #include <vlib/dma/dma.h>
9 #include <vppinfra/heap.h>
10 #include <vppinfra/atomics.h>
11 #include <vnet/plugin/plugin.h>
12 #include <vpp/app/version.h>
13 #include <dma_intel/dsa_intel.h>
15 extern vlib_node_registration_t intel_dsa_node;
17 VLIB_REGISTER_LOG_CLASS (intel_dsa_log, static) = {
18 .class_name = "intel_dsa",
19 .subclass_name = "dsa",
23 intel_dsa_channel_lock (intel_dsa_channel_t *ch)
26 if (ch->n_threads < 2)
29 /* channel is used by multiple threads so we need to lock it */
30 while (!__atomic_compare_exchange_n (&ch->lock, &expected,
31 /* desired */ 1, /* weak */ 0,
32 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
34 while (__atomic_load_n (&ch->lock, __ATOMIC_RELAXED))
41 intel_dsa_channel_unlock (intel_dsa_channel_t *ch)
43 if (ch->n_threads < 2)
46 __atomic_store_n (&ch->lock, 0, __ATOMIC_RELEASE);
49 static vlib_dma_batch_t *
50 intel_dsa_batch_new (vlib_main_t *vm, struct vlib_dma_config_data *cd)
52 intel_dsa_main_t *idm = &intel_dsa_main;
53 intel_dsa_config_t *idc;
56 idc = vec_elt_at_index (idm->dsa_config_heap,
57 cd->private_data + vm->thread_index);
59 if (vec_len (idc->freelist) > 0)
60 b = vec_pop (idc->freelist);
63 clib_spinlock_lock (&idm->lock);
64 b = vlib_physmem_alloc (vm, idc->alloc_size);
65 clib_spinlock_unlock (&idm->lock);
66 /* if no free space in physmem, force quit */
68 *b = idc->batch_template;
69 b->max_transfers = idc->max_transfers;
71 u32 def_flags = (INTEL_DSA_OP_MEMMOVE << INTEL_DSA_OP_SHIFT) |
72 INTEL_DSA_FLAG_CACHE_CONTROL;
73 if (b->ch->block_on_fault)
74 def_flags |= INTEL_DSA_FLAG_BLOCK_ON_FAULT;
75 for (int i = 0; i < idc->max_transfers; i++)
77 intel_dsa_desc_t *dsa_desc = b->descs + i;
78 dsa_desc->op_flags = def_flags;
85 #if defined(__x86_64__) || defined(i386)
86 static_always_inline void
87 __movdir64b (volatile void *dst, const void *src)
89 asm volatile(".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
96 static_always_inline void
97 intel_dsa_batch_fallback (vlib_main_t *vm, intel_dsa_batch_t *b,
98 intel_dsa_channel_t *ch)
100 for (u16 i = 0; i < b->batch.n_enq; i++)
102 intel_dsa_desc_t *desc = &b->descs[i];
103 clib_memcpy_fast (desc->dst, desc->src, desc->size);
105 b->status = INTEL_DSA_STATUS_CPU_SUCCESS;
111 intel_dsa_batch_submit (vlib_main_t *vm, struct vlib_dma_batch *vb)
113 intel_dsa_main_t *idm = &intel_dsa_main;
114 intel_dsa_batch_t *b = (intel_dsa_batch_t *) vb;
115 intel_dsa_channel_t *ch = b->ch;
116 if (PREDICT_FALSE (vb->n_enq == 0))
118 vec_add1 (idm->dsa_config_heap[b->config_heap_index].freelist, b);
122 intel_dsa_channel_lock (ch);
123 if (ch->n_enq >= ch->size)
127 intel_dsa_channel_unlock (ch);
130 /* skip channel limitation if first pending finished */
131 intel_dsa_batch_t *lb = NULL;
133 vec_len (idm->dsa_threads[vm->thread_index].pending_batches);
136 idm->dsa_threads[vm->thread_index].pending_batches[n_pendings - 1];
138 if (!lb || lb->status != INTEL_DSA_STATUS_SUCCESS)
140 intel_dsa_batch_fallback (vm, b, ch);
145 b->status = INTEL_DSA_STATUS_BUSY;
146 if (PREDICT_FALSE (vb->n_enq == 1))
148 intel_dsa_desc_t *desc = &b->descs[0];
149 desc->completion = (u64) &b->completion_cl;
150 desc->op_flags |= INTEL_DSA_FLAG_COMPLETION_ADDR_VALID |
151 INTEL_DSA_FLAG_REQUEST_COMPLETION;
152 #if defined(__x86_64__) || defined(i386)
153 _mm_sfence (); /* fence before writing desc to device */
154 __movdir64b (ch->portal, (void *) desc);
159 intel_dsa_desc_t *batch_desc = &b->descs[b->max_transfers];
160 batch_desc->op_flags = (INTEL_DSA_OP_BATCH << INTEL_DSA_OP_SHIFT) |
161 INTEL_DSA_FLAG_COMPLETION_ADDR_VALID |
162 INTEL_DSA_FLAG_REQUEST_COMPLETION;
163 batch_desc->desc_addr = (void *) (b->descs);
164 batch_desc->size = vb->n_enq;
165 batch_desc->completion = (u64) &b->completion_cl;
166 #if defined(__x86_64__) || defined(i386)
167 _mm_sfence (); /* fence before writing desc to device */
168 __movdir64b (ch->portal, (void *) batch_desc);
176 intel_dsa_channel_unlock (ch);
177 vec_add1 (idm->dsa_threads[vm->thread_index].pending_batches, b);
178 vlib_node_set_interrupt_pending (vm, intel_dsa_node.index);
183 intel_dsa_check_channel (intel_dsa_channel_t *ch, vlib_dma_config_data_t *cd)
187 dsa_log_error ("no available dsa channel");
190 vlib_dma_config_t supported_cfg = {
191 .barrier_before_last = 1,
195 if (cd->cfg.features & ~supported_cfg.features)
197 dsa_log_error ("unsupported feature requested");
201 if (cd->cfg.max_transfers > ch->max_transfers)
203 dsa_log_error ("transfer number (%u) too big", cd->cfg.max_transfers);
207 if (cd->cfg.max_transfer_size > ch->max_transfer_size)
209 dsa_log_error ("transfer size (%u) too big", cd->cfg.max_transfer_size);
216 intel_dsa_config_add_fn (vlib_main_t *vm, vlib_dma_config_data_t *cd)
218 intel_dsa_main_t *idm = &intel_dsa_main;
219 intel_dsa_config_t *idc;
220 u32 index, n_threads = vlib_get_n_threads ();
222 vec_validate (idm->dsa_config_heap_handle_by_config_index, cd->config_index);
223 index = heap_alloc_aligned (
224 idm->dsa_config_heap, n_threads, CLIB_CACHE_LINE_BYTES,
225 idm->dsa_config_heap_handle_by_config_index[cd->config_index]);
227 cd->batch_new_fn = intel_dsa_batch_new;
228 cd->private_data = index;
230 for (u32 thread = 0; thread < n_threads; thread++)
232 intel_dsa_batch_t *idb;
234 idc = vec_elt_at_index (idm->dsa_config_heap, index + thread);
236 /* size of physmem allocation for this config */
237 idc->max_transfers = cd->cfg.max_transfers;
238 idc->alloc_size = sizeof (intel_dsa_batch_t) +
239 sizeof (intel_dsa_desc_t) * (idc->max_transfers + 1);
240 /* fill batch template */
241 idb = &idc->batch_template;
242 idb->ch = idm->dsa_threads[thread].ch;
243 if (intel_dsa_check_channel (idb->ch, cd))
246 dsa_log_debug ("config %d in thread %d using channel %u/%u",
247 cd->config_index, thread, idb->ch->did, idb->ch->qid);
248 idb->config_heap_index = index + thread;
249 idb->config_index = cd->config_index;
250 idb->batch.callback_fn = cd->cfg.callback_fn;
251 idb->features = cd->cfg.features;
253 b->stride = sizeof (intel_dsa_desc_t);
254 b->src_ptr_off = STRUCT_OFFSET_OF (intel_dsa_batch_t, descs[0].src);
255 b->dst_ptr_off = STRUCT_OFFSET_OF (intel_dsa_batch_t, descs[0].dst);
256 b->size_off = STRUCT_OFFSET_OF (intel_dsa_batch_t, descs[0].size);
257 b->submit_fn = intel_dsa_batch_submit;
259 "config %d in thread %d stride %d src/dst/size offset %d-%d-%d",
260 cd->config_index, thread, b->stride, b->src_ptr_off, b->dst_ptr_off,
264 dsa_log_info ("config %u added", cd->private_data);
270 intel_dsa_config_del_fn (vlib_main_t *vm, vlib_dma_config_data_t *cd)
272 intel_dsa_main_t *idm = &intel_dsa_main;
273 intel_dsa_thread_t *t =
274 vec_elt_at_index (idm->dsa_threads, vm->thread_index);
275 u32 n_pending, n_threads, config_heap_index, n = 0;
276 n_threads = vlib_get_n_threads ();
278 if (!t->pending_batches)
281 n_pending = vec_len (t->pending_batches);
282 intel_dsa_batch_t *b;
284 /* clean pending list and free list */
285 for (u32 i = 0; i < n_pending; i++)
287 b = t->pending_batches[i];
288 if (b->config_index == cd->config_index)
290 vec_add1 (idm->dsa_config_heap[b->config_heap_index].freelist, b);
291 if (b->status == INTEL_DSA_STATUS_SUCCESS ||
292 b->status == INTEL_DSA_STATUS_BUSY)
296 t->pending_batches[n++] = b;
299 vec_set_len (t->pending_batches, n);
302 for (u32 thread = 0; thread < n_threads; thread++)
304 config_heap_index = cd->private_data + thread;
305 while (vec_len (idm->dsa_config_heap[config_heap_index].freelist) > 0)
307 b = vec_pop (idm->dsa_config_heap[config_heap_index].freelist);
308 vlib_physmem_free (vm, b);
312 heap_dealloc (idm->dsa_config_heap,
313 idm->dsa_config_heap_handle_by_config_index[cd->config_index]);
315 dsa_log_debug ("config %u removed", cd->private_data);
319 intel_dsa_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
322 intel_dsa_main_t *idm = &intel_dsa_main;
323 intel_dsa_thread_t *t =
324 vec_elt_at_index (idm->dsa_threads, vm->thread_index);
325 u32 n_pending = 0, n = 0;
328 if (!t->pending_batches)
331 n_pending = vec_len (t->pending_batches);
333 for (u32 i = 0; i < n_pending; i++)
335 intel_dsa_batch_t *b = t->pending_batches[i];
336 intel_dsa_channel_t *ch = b->ch;
338 if ((b->status == INTEL_DSA_STATUS_SUCCESS ||
339 b->status == INTEL_DSA_STATUS_CPU_SUCCESS) &&
343 if (b->batch.callback_fn)
344 b->batch.callback_fn (vm, &b->batch);
346 /* restore last descriptor fields */
347 if (b->batch.n_enq == 1)
349 b->descs[0].completion = 0;
350 b->descs[0].op_flags =
351 (INTEL_DSA_OP_MEMMOVE << INTEL_DSA_OP_SHIFT) |
352 INTEL_DSA_FLAG_CACHE_CONTROL;
353 if (b->ch->block_on_fault)
354 b->descs[0].op_flags |= INTEL_DSA_FLAG_BLOCK_ON_FAULT;
356 /* add to freelist */
357 vec_add1 (idm->dsa_config_heap[b->config_heap_index].freelist, b);
359 intel_dsa_channel_lock (ch);
360 if (b->status == INTEL_DSA_STATUS_SUCCESS)
367 intel_dsa_channel_unlock (ch);
370 b->status = INTEL_DSA_STATUS_IDLE;
372 else if (b->status == INTEL_DSA_STATUS_BUSY)
374 glitch = 1 & b->barrier_before_last;
375 t->pending_batches[n++] = b;
379 /* fallback to software if exception happened */
380 intel_dsa_batch_fallback (vm, b, ch);
381 glitch = 1 & b->barrier_before_last;
385 t->pending_batches[n++] = b;
388 vec_set_len (t->pending_batches, n);
392 vlib_node_set_interrupt_pending (vm, intel_dsa_node.index);
395 return n_pending - n;
399 format_dsa_info (u8 *s, va_list *args)
401 intel_dsa_main_t *idm = &intel_dsa_main;
402 vlib_main_t *vm = va_arg (*args, vlib_main_t *);
403 intel_dsa_channel_t *ch;
404 ch = idm->dsa_threads[vm->thread_index].ch;
405 s = format (s, "thread %d dma %u/%u request %-16lld hw %-16lld cpu %-16lld",
406 vm->thread_index, ch->did, ch->qid, ch->submitted, ch->completed,
411 VLIB_REGISTER_NODE (intel_dsa_node) = {
412 .function = intel_dsa_node_fn,
414 .type = VLIB_NODE_TYPE_INPUT,
415 .state = VLIB_NODE_STATE_INTERRUPT,
419 vlib_dma_backend_t intel_dsa_backend = {
421 .config_add_fn = intel_dsa_config_add_fn,
422 .config_del_fn = intel_dsa_config_del_fn,
423 .info_fn = format_dsa_info,