New upstream version 18.02
[deb_dpdk.git] / drivers / bus / dpaa / base / qbman / bman_driver.c
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  *
3  * Copyright 2008-2016 Freescale Semiconductor Inc.
4  * Copyright 2017 NXP
5  *
6  */
7
8 #include <rte_branch_prediction.h>
9
10 #include <fsl_usd.h>
11 #include <process.h>
12 #include "bman_priv.h"
13 #include <sys/ioctl.h>
14
15 /*
16  * Global variables of the max portal/pool number this bman version supported
17  */
18 u16 bman_ip_rev;
19 u16 bman_pool_max;
20 void *bman_ccsr_map;
21
22 /*****************/
23 /* Portal driver */
24 /*****************/
25
26 static __thread int fd = -1;
27 static __thread struct bm_portal_config pcfg;
28 static __thread struct dpaa_ioctl_portal_map map = {
29         .type = dpaa_portal_bman
30 };
31
32 static int fsl_bman_portal_init(uint32_t idx, int is_shared)
33 {
34         cpu_set_t cpuset;
35         struct bman_portal *portal;
36         int loop, ret;
37         struct dpaa_ioctl_irq_map irq_map;
38
39         /* Verify the thread's cpu-affinity */
40         ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
41                                      &cpuset);
42         if (ret) {
43                 error(0, ret, "pthread_getaffinity_np()");
44                 return ret;
45         }
46         pcfg.cpu = -1;
47         for (loop = 0; loop < CPU_SETSIZE; loop++)
48                 if (CPU_ISSET(loop, &cpuset)) {
49                         if (pcfg.cpu != -1) {
50                                 pr_err("Thread is not affine to 1 cpu");
51                                 return -EINVAL;
52                         }
53                         pcfg.cpu = loop;
54                 }
55         if (pcfg.cpu == -1) {
56                 pr_err("Bug in getaffinity handling!");
57                 return -EINVAL;
58         }
59         /* Allocate and map a bman portal */
60         map.index = idx;
61         ret = process_portal_map(&map);
62         if (ret) {
63                 error(0, ret, "process_portal_map()");
64                 return ret;
65         }
66         /* Make the portal's cache-[enabled|inhibited] regions */
67         pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
68         pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
69         pcfg.is_shared = is_shared;
70         pcfg.index = map.index;
71         bman_depletion_fill(&pcfg.mask);
72
73         fd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
74         if (fd == -1) {
75                 pr_err("BMan irq init failed");
76                 process_portal_unmap(&map.addr);
77                 return -EBUSY;
78         }
79         /* Use the IRQ FD as a unique IRQ number */
80         pcfg.irq = fd;
81
82         portal = bman_create_affine_portal(&pcfg);
83         if (!portal) {
84                 pr_err("Bman portal initialisation failed (%d)",
85                        pcfg.cpu);
86                 process_portal_unmap(&map.addr);
87                 return -EBUSY;
88         }
89
90         /* Set the IRQ number */
91         irq_map.type = dpaa_portal_bman;
92         irq_map.portal_cinh = map.addr.cinh;
93         process_portal_irq_map(fd, &irq_map);
94         return 0;
95 }
96
97 static int fsl_bman_portal_finish(void)
98 {
99         __maybe_unused const struct bm_portal_config *cfg;
100         int ret;
101
102         process_portal_irq_unmap(fd);
103
104         cfg = bman_destroy_affine_portal();
105         DPAA_BUG_ON(cfg != &pcfg);
106         ret = process_portal_unmap(&map.addr);
107         if (ret)
108                 error(0, ret, "process_portal_unmap()");
109         return ret;
110 }
111
112 int bman_thread_init(void)
113 {
114         /* Convert from contiguous/virtual cpu numbering to real cpu when
115          * calling into the code that is dependent on the device naming.
116          */
117         return fsl_bman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
118 }
119
120 int bman_thread_finish(void)
121 {
122         return fsl_bman_portal_finish();
123 }
124
125 void bman_thread_irq(void)
126 {
127         qbman_invoke_irq(pcfg.irq);
128         /* Now we need to uninhibit interrupts. This is the only code outside
129          * the regular portal driver that manipulates any portal register, so
130          * rather than breaking that encapsulation I am simply hard-coding the
131          * offset to the inhibit register here.
132          */
133         out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
134 }
135
136 int bman_init_ccsr(const struct device_node *node)
137 {
138         static int ccsr_map_fd;
139         uint64_t phys_addr;
140         const uint32_t *bman_addr;
141         uint64_t regs_size;
142
143         bman_addr = of_get_address(node, 0, &regs_size, NULL);
144         if (!bman_addr) {
145                 pr_err("of_get_address cannot return BMan address");
146                 return -EINVAL;
147         }
148         phys_addr = of_translate_address(node, bman_addr);
149         if (!phys_addr) {
150                 pr_err("of_translate_address failed");
151                 return -EINVAL;
152         }
153
154         ccsr_map_fd = open(BMAN_CCSR_MAP, O_RDWR);
155         if (unlikely(ccsr_map_fd < 0)) {
156                 pr_err("Can not open /dev/mem for BMan CCSR map");
157                 return ccsr_map_fd;
158         }
159
160         bman_ccsr_map = mmap(NULL, regs_size, PROT_READ |
161                              PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr);
162         if (bman_ccsr_map == MAP_FAILED) {
163                 pr_err("Can not map BMan CCSR base Bman: "
164                        "0x%x Phys: 0x%lx size 0x%lx",
165                        *bman_addr, phys_addr, regs_size);
166                 return -EINVAL;
167         }
168
169         return 0;
170 }
171
172 int bman_global_init(void)
173 {
174         const struct device_node *dt_node;
175         static int done;
176
177         if (done)
178                 return -EBUSY;
179         /* Use the device-tree to determine IP revision until something better
180          * is devised.
181          */
182         dt_node = of_find_compatible_node(NULL, NULL, "fsl,bman-portal");
183         if (!dt_node) {
184                 pr_err("No bman portals available for any CPU\n");
185                 return -ENODEV;
186         }
187         if (of_device_is_compatible(dt_node, "fsl,bman-portal-1.0") ||
188             of_device_is_compatible(dt_node, "fsl,bman-portal-1.0.0")) {
189                 bman_ip_rev = BMAN_REV10;
190                 bman_pool_max = 64;
191         } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.0") ||
192                 of_device_is_compatible(dt_node, "fsl,bman-portal-2.0.8")) {
193                 bman_ip_rev = BMAN_REV20;
194                 bman_pool_max = 8;
195         } else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.0") ||
196                 of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.1") ||
197                 of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.2") ||
198                 of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.3")) {
199                 bman_ip_rev = BMAN_REV21;
200                 bman_pool_max = 64;
201         } else {
202                 pr_warn("unknown BMan version in portal node,default "
203                         "to rev1.0");
204                 bman_ip_rev = BMAN_REV10;
205                 bman_pool_max = 64;
206         }
207
208         if (!bman_ip_rev) {
209                 pr_err("Unknown bman portal version\n");
210                 return -ENODEV;
211         }
212         {
213                 const struct device_node *dn = of_find_compatible_node(NULL,
214                                                         NULL, "fsl,bman");
215                 if (!dn)
216                         pr_err("No bman device node available");
217
218                 if (bman_init_ccsr(dn))
219                         pr_err("BMan CCSR map failed.");
220         }
221
222         done = 1;
223         return 0;
224 }
225
226 #define BMAN_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
227 u32 bm_pool_free_buffers(u32 bpid)
228 {
229         return in_be32(bman_ccsr_map + BMAN_POOL_CONTENT(bpid));
230 }
231
232 static u32 __generate_thresh(u32 val, int roundup)
233 {
234         u32 e = 0;      /* co-efficient, exponent */
235         int oddbit = 0;
236
237         while (val > 0xff) {
238                 oddbit = val & 1;
239                 val >>= 1;
240                 e++;
241                 if (roundup && oddbit)
242                         val++;
243         }
244         DPAA_ASSERT(e < 0x10);
245         return (val | (e << 8));
246 }
247
248 #define POOL_SWDET(n)       (0x0000 + ((n) * 0x04))
249 #define POOL_HWDET(n)       (0x0100 + ((n) * 0x04))
250 #define POOL_SWDXT(n)       (0x0200 + ((n) * 0x04))
251 #define POOL_HWDXT(n)       (0x0300 + ((n) * 0x04))
252 int bm_pool_set(u32 bpid, const u32 *thresholds)
253 {
254         if (!bman_ccsr_map)
255                 return -ENODEV;
256         if (bpid >= bman_pool_max)
257                 return -EINVAL;
258         out_be32(bman_ccsr_map + POOL_SWDET(bpid),
259                  __generate_thresh(thresholds[0], 0));
260         out_be32(bman_ccsr_map + POOL_SWDXT(bpid),
261                  __generate_thresh(thresholds[1], 1));
262         out_be32(bman_ccsr_map + POOL_HWDET(bpid),
263                  __generate_thresh(thresholds[2], 0));
264         out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
265                  __generate_thresh(thresholds[3], 1));
266         return 0;
267 }
268
269 #define BMAN_LOW_DEFAULT_THRESH         0x40
270 #define BMAN_HIGH_DEFAULT_THRESH                0x80
271 int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
272                              const u32 high_thresh)
273 {
274         if (!bman_ccsr_map)
275                 return -ENODEV;
276         if (bpid >= bman_pool_max)
277                 return -EINVAL;
278         if (low_thresh && high_thresh) {
279                 out_be32(bman_ccsr_map + POOL_HWDET(bpid),
280                          __generate_thresh(low_thresh, 0));
281                 out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
282                          __generate_thresh(high_thresh, 1));
283         } else {
284                 out_be32(bman_ccsr_map + POOL_HWDET(bpid),
285                          __generate_thresh(BMAN_LOW_DEFAULT_THRESH, 0));
286                 out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
287                          __generate_thresh(BMAN_HIGH_DEFAULT_THRESH, 1));
288         }
289         return 0;
290 }