4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _DPAA2_HW_PVT_H_
35 #define _DPAA2_HW_PVT_H_
37 #include <rte_eventdev.h>
39 #include <mc/fsl_mc_sys.h>
40 #include <fsl_qbman_portal.h>
48 #define lower_32_bits(x) ((uint32_t)(x))
49 #define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
51 #define SVR_LS1080A 0x87030000
52 #define SVR_LS2080A 0x87010000
53 #define SVR_LS2088A 0x87090000
54 #define SVR_LX2160A 0x87360000
57 #define ETH_VLAN_HLEN 4 /** < Vlan Header Length */
60 #define MAX_TX_RING_SLOTS 8
61 /** <Maximum number of slots available in TX ring*/
63 #define DPAA2_DQRR_RING_SIZE 16
64 /** <Maximum number of slots available in RX ring*/
66 #define MC_PORTAL_INDEX 0
67 #define NUM_DPIO_REGIONS 2
68 #define NUM_DQS_PER_QUEUE 2
70 /* Maximum release/acquire from QBMAN */
71 #define DPAA2_MBUF_MAX_ACQ_REL 7
74 #define DPAA2_MBUF_HW_ANNOTATION 64
75 #define DPAA2_FD_PTA_SIZE 0
77 #if (DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > RTE_PKTMBUF_HEADROOM
78 #error "Annotation requirement is more than RTE_PKTMBUF_HEADROOM"
81 /* we will re-use the HEADROOM for annotation in RX */
82 #define DPAA2_HW_BUF_RESERVE 0
83 #define DPAA2_PACKET_LAYOUT_ALIGN 64 /*changing from 256 */
85 #define DPAA2_DPCI_MAX_QUEUES 2
87 struct dpaa2_dpio_dev {
88 TAILQ_ENTRY(dpaa2_dpio_dev) next;
89 /**< Pointer to Next device instance */
90 uint16_t index; /**< Index of a instance in the list */
91 rte_atomic16_t ref_count;
92 /**< How many thread contexts are sharing this.*/
93 struct fsl_mc_io *dpio; /** handle to DPIO portal object */
95 struct qbman_swp *sw_portal; /** SW portal object */
96 const struct qbman_result *dqrr[4];
97 /**< DQRR Entry for this SW portal */
98 void *mc_portal; /**< MC Portal for configuring this device */
99 uintptr_t qbman_portal_ce_paddr;
100 /**< Physical address of Cache Enabled Area */
101 uintptr_t ce_size; /**< Size of the CE region */
102 uintptr_t qbman_portal_ci_paddr;
103 /**< Physical address of Cache Inhibit Area */
104 uintptr_t ci_size; /**< Size of the CI region */
105 struct rte_intr_handle intr_handle; /* Interrupt related info */
106 int32_t epoll_fd; /**< File descriptor created for interrupt polling */
107 int32_t hw_id; /**< An unique ID of this DPIO device instance */
112 struct dpaa2_dpbp_dev {
113 TAILQ_ENTRY(dpaa2_dpbp_dev) next;
114 /**< Pointer to Next device instance */
115 struct fsl_mc_io dpbp; /** handle to DPBP portal object */
117 rte_atomic16_t in_use;
118 uint32_t dpbp_id; /*HW ID for DPBP object */
121 struct queue_storage_info_t {
122 struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
123 struct qbman_result *active_dqs;
130 typedef void (dpaa2_queue_cb_dqrr_t)(struct qbman_swp *swp,
131 const struct qbman_fd *fd,
132 const struct qbman_result *dq,
133 struct dpaa2_queue *rxq,
134 struct rte_event *ev);
137 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
139 int32_t eventfd; /*!< Event Fd of this queue */
140 uint32_t fqid; /*!< Unique ID of this queue */
141 uint8_t tc_index; /*!< traffic class identifier */
142 uint16_t flow_id; /*!< To be used by DPAA2 frmework */
147 struct queue_storage_info_t *q_storage;
148 struct qbman_result *cscn;
151 dpaa2_queue_cb_dqrr_t *cb;
154 struct swp_active_dqs {
155 struct qbman_result *global_active_dqs;
156 uint64_t reserved[7];
159 #define NUM_MAX_SWP 64
161 extern struct swp_active_dqs rte_global_active_dqs_list[NUM_MAX_SWP];
163 struct dpaa2_dpci_dev {
164 TAILQ_ENTRY(dpaa2_dpci_dev) next;
165 /**< Pointer to Next device instance */
166 struct fsl_mc_io dpci; /** handle to DPCI portal object */
168 rte_atomic16_t in_use;
169 uint32_t dpci_id; /*HW ID for DPCI object */
170 struct dpaa2_queue queue[DPAA2_DPCI_MAX_QUEUES];
173 /*! Global MCP list */
174 extern void *(*rte_mcp_ptr_list);
176 /* Refer to Table 7-3 in SEC BG */
181 /* FMT must be 00, MSB is final bit */
182 uint32_t fin_bpid_offset;
184 uint32_t reserved[3]; /* Not used currently */
191 uint32_t fin_bpid_offset;
194 /* There are three types of frames: Single, Scatter Gather and Frame Lists */
195 enum qbman_fd_format {
200 /*Macros to define operations on FD*/
201 #define DPAA2_SET_FD_ADDR(fd, addr) do { \
202 fd->simple.addr_lo = lower_32_bits((uint64_t)(addr)); \
203 fd->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \
205 #define DPAA2_SET_FD_LEN(fd, length) (fd)->simple.len = length
206 #define DPAA2_SET_FD_BPID(fd, bpid) ((fd)->simple.bpid_offset |= bpid)
207 #define DPAA2_SET_FD_IVP(fd) ((fd->simple.bpid_offset |= 0x00004000))
208 #define DPAA2_SET_FD_OFFSET(fd, offset) \
209 ((fd->simple.bpid_offset |= (uint32_t)(offset) << 16))
210 #define DPAA2_SET_FD_INTERNAL_JD(fd, len) fd->simple.frc = (0x80000000 | (len))
211 #define DPAA2_SET_FD_FRC(fd, frc) fd->simple.frc = frc
212 #define DPAA2_RESET_FD_CTRL(fd) (fd)->simple.ctrl = 0
214 #define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
215 #define DPAA2_SET_FD_FLC(fd, addr) do { \
216 fd->simple.flc_lo = lower_32_bits((uint64_t)(addr)); \
217 fd->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
219 #define DPAA2_SET_FLE_INTERNAL_JD(fle, len) (fle->frc = (0x80000000 | (len)))
220 #define DPAA2_GET_FLE_ADDR(fle) \
221 (uint64_t)((((uint64_t)(fle->addr_hi)) << 32) + fle->addr_lo)
222 #define DPAA2_SET_FLE_ADDR(fle, addr) do { \
223 fle->addr_lo = lower_32_bits((uint64_t)addr); \
224 fle->addr_hi = upper_32_bits((uint64_t)addr); \
226 #define DPAA2_GET_FLE_CTXT(fle) \
227 (uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \
229 #define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
230 fle->reserved[0] = lower_32_bits((uint64_t)addr); \
231 fle->reserved[1] = upper_32_bits((uint64_t)addr); \
233 #define DPAA2_SET_FLE_OFFSET(fle, offset) \
234 ((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
235 #define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
236 #define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
237 #define DPAA2_SET_FLE_FIN(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 31)
238 #define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
239 #define DPAA2_SET_FD_COMPOUND_FMT(fd) \
240 (fd->simple.bpid_offset |= (uint32_t)1 << 28)
241 #define DPAA2_GET_FD_ADDR(fd) \
242 ((uint64_t)((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
244 #define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len)
245 #define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
246 #define DPAA2_GET_FD_IVP(fd) ((fd->simple.bpid_offset & 0x00004000) >> 14)
247 #define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
248 #define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
249 #define DPAA2_SET_FLE_SG_EXT(fle) (fle->fin_bpid_offset |= (uint64_t)1 << 29)
250 #define DPAA2_IS_SET_FLE_SG_EXT(fle) \
251 ((fle->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
253 #define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
254 ((struct rte_mbuf *)((uint64_t)(buf) - (meta_data_size)))
256 #define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
258 #define DPAA2_FD_SET_FORMAT(fd, format) do { \
259 (fd)->simple.bpid_offset &= 0xCFFFFFFF; \
260 (fd)->simple.bpid_offset |= (uint32_t)format << 28; \
262 #define DPAA2_FD_GET_FORMAT(fd) (((fd)->simple.bpid_offset >> 28) & 0x3)
264 #define DPAA2_SG_SET_FINAL(sg, fin) do { \
265 (sg)->fin_bpid_offset &= 0x7FFFFFFF; \
266 (sg)->fin_bpid_offset |= (uint32_t)fin << 31; \
268 #define DPAA2_SG_IS_FINAL(sg) (!!((sg)->fin_bpid_offset >> 31))
269 /* Only Enqueue Error responses will be
270 * pushed on FQID_ERR of Enqueue FQ
272 #define DPAA2_EQ_RESP_ERR_FQ 0
273 /* All Enqueue responses will be pushed on address
274 * set with qbman_eq_desc_set_response
276 #define DPAA2_EQ_RESP_ALWAYS 1
278 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
279 static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
280 /* todo - this is costly, need to write a fast coversion routine */
281 static void *dpaa2_mem_ptov(phys_addr_t paddr)
283 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
286 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
287 if (paddr >= memseg[i].iova &&
288 (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
289 return (void *)(memseg[i].addr_64
290 + (paddr - memseg[i].iova));
295 static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
296 static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
298 const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
301 for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
302 if (vaddr >= memseg[i].addr_64 &&
303 vaddr < memseg[i].addr_64 + memseg[i].len)
304 return memseg[i].iova
305 + (vaddr - memseg[i].addr_64);
307 return (phys_addr_t)(NULL);
311 * When we are using Physical addresses as IO Virtual Addresses,
312 * Need to call conversion routines dpaa2_mem_vtop & dpaa2_mem_ptov
314 * These routines are called with help of below MACRO's
317 #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
318 #define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr)
321 * macro to convert Virtual address to IOVA
323 #define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr))
326 * macro to convert IOVA to Virtual address
328 #define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova))
331 * macro to convert modify the memory containing IOVA to Virtual address
333 #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
334 {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); }
336 #else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
338 #define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
339 #define DPAA2_OP_VADDR_TO_IOVA(op) (op)
340 #define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
341 #define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
342 #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
344 #endif /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
347 int check_swp_active_dqs(uint16_t dpio_index)
349 if (rte_global_active_dqs_list[dpio_index].global_active_dqs != NULL)
355 void clear_swp_active_dqs(uint16_t dpio_index)
357 rte_global_active_dqs_list[dpio_index].global_active_dqs = NULL;
361 struct qbman_result *get_swp_active_dqs(uint16_t dpio_index)
363 return rte_global_active_dqs_list[dpio_index].global_active_dqs;
367 void set_swp_active_dqs(uint16_t dpio_index, struct qbman_result *dqs)
369 rte_global_active_dqs_list[dpio_index].global_active_dqs = dqs;
371 struct dpaa2_dpbp_dev *dpaa2_alloc_dpbp_dev(void);
372 void dpaa2_free_dpbp_dev(struct dpaa2_dpbp_dev *dpbp);
373 int dpaa2_dpbp_supported(void);
375 struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void);
376 void rte_dpaa2_free_dpci_dev(struct dpaa2_dpci_dev *dpci);