1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright (c) 2022 Intel and/or its affiliates.
5 #ifndef __dma_intel_dsa_intel_h__
6 #define __dma_intel_dsa_intel_h__
9 #include <vlib/dma/dma.h>
10 #include <vlib/pci/pci.h>
11 #include <vppinfra/format.h>
25 /* remaining 26 bytes are reserved */
29 STATIC_ASSERT_SIZEOF (intel_dsa_desc_t, 64);
31 #define DSA_DEV_PATH "/dev/dsa"
32 #define SYS_DSA_PATH "/sys/bus/dsa/devices"
36 INTEL_DSA_DEVICE_TYPE_UNKNOWN,
37 INTEL_DSA_DEVICE_TYPE_KERNEL,
38 INTEL_DSA_DEVICE_TYPE_USER,
39 INTEL_DSA_DEVICE_TYPE_MDEV,
40 } intel_dsa_wq_type_t;
50 #define INTEL_DSA_OP_SHIFT 24
51 #define INTEL_DSA_FLAG_FENCE (1 << 0)
52 #define INTEL_DSA_FLAG_BLOCK_ON_FAULT (1 << 1)
53 #define INTEL_DSA_FLAG_COMPLETION_ADDR_VALID (1 << 2)
54 #define INTEL_DSA_FLAG_REQUEST_COMPLETION (1 << 3)
55 #define INTEL_DSA_FLAG_CACHE_CONTROL (1 << 8)
59 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
60 volatile void *portal; /* portal exposed by dedicated work queue */
64 u32 max_transfer_size; /* maximum size of each transfer */
65 u16 max_transfers; /* maximum number referenced in a batch */
66 u16 n_threads; /* number of threads using this channel */
67 u16 n_enq; /* number of batches currently enqueued */
76 u16 block_on_fault : 1;
80 u8 lock; /* spinlock, only used if m_threads > 1 */
81 u8 numa; /* numa node */
82 u8 size; /* size of work queue */
83 u8 did; /* dsa device id */
84 u8 qid; /* work queue id */
85 } intel_dsa_channel_t;
87 typedef struct intel_dsa_batch
89 CLIB_CACHE_LINE_ALIGN_MARK (start);
90 vlib_dma_batch_t batch; /* must be first */
91 intel_dsa_channel_t *ch;
92 u32 config_heap_index;
99 u32 barrier_before_last : 1;
104 CLIB_CACHE_LINE_ALIGN_MARK (completion_cl);
105 #define INTEL_DSA_STATUS_IDLE 0x0
106 #define INTEL_DSA_STATUS_SUCCESS 0x1
107 #define INTEL_DSA_STATUS_BUSY 0xa
108 #define INTEL_DSA_STATUS_CPU_SUCCESS 0xb
110 /* to avoid read-modify-write completion is written as 64-byte
111 * DMA FILL operation */
112 CLIB_CACHE_LINE_ALIGN_MARK (descriptors);
113 intel_dsa_desc_t descs[0];
116 STATIC_ASSERT_OFFSET_OF (intel_dsa_batch_t, batch, 0);
120 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
121 intel_dsa_batch_t batch_template;
124 intel_dsa_batch_t **freelist;
125 } intel_dsa_config_t;
129 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
130 intel_dsa_channel_t *ch; /* channel used by this thread */
131 intel_dsa_batch_t **pending_batches;
132 } intel_dsa_thread_t;
136 intel_dsa_channel_t ***channels;
137 intel_dsa_thread_t *dsa_threads;
138 intel_dsa_config_t *dsa_config_heap;
139 uword *dsa_config_heap_handle_by_config_index;
140 /* spin lock protect pmem */
141 clib_spinlock_t lock;
144 extern intel_dsa_main_t intel_dsa_main;
145 extern vlib_dma_backend_t intel_dsa_backend;
146 format_function_t format_intel_dsa_addr;
148 #define dsa_log_debug(f, ...) \
149 vlib_log (VLIB_LOG_LEVEL_DEBUG, intel_dsa_log.class, "%s: " f, __func__, \
152 #define dsa_log_info(f, ...) \
153 vlib_log (VLIB_LOG_LEVEL_INFO, intel_dsa_log.class, "%s: " f, __func__, \
156 #define dsa_log_error(f, ...) \
157 vlib_log (VLIB_LOG_LEVEL_ERR, intel_dsa_log.class, "%s: " f, __func__, \