New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / net / qede / base / bcm_osal.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include <rte_memzone.h>
8 #include <rte_errno.h>
9
10 #include "bcm_osal.h"
11 #include "ecore.h"
12 #include "ecore_hw.h"
13 #include "ecore_iov_api.h"
14 #include "ecore_mcp_api.h"
15 #include "ecore_l2_api.h"
16
17 /* Array of memzone pointers */
18 static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
19 /* Counter to track current memzone allocated */
20 static uint16_t ecore_mz_count;
21
22 unsigned long qede_log2_align(unsigned long n)
23 {
24         unsigned long ret = n ? 1 : 0;
25         unsigned long _n = n >> 1;
26
27         while (_n) {
28                 _n >>= 1;
29                 ret <<= 1;
30         }
31
32         if (ret < n)
33                 ret <<= 1;
34
35         return ret;
36 }
37
38 u32 qede_osal_log2(u32 val)
39 {
40         u32 log = 0;
41
42         while (val >>= 1)
43                 log++;
44
45         return log;
46 }
47
48 inline void qede_set_bit(u32 nr, unsigned long *addr)
49 {
50         __sync_fetch_and_or(addr, (1UL << nr));
51 }
52
53 inline void qede_clr_bit(u32 nr, unsigned long *addr)
54 {
55         __sync_fetch_and_and(addr, ~(1UL << nr));
56 }
57
58 inline bool qede_test_bit(u32 nr, unsigned long *addr)
59 {
60         bool res;
61
62         rte_mb();
63         res = ((*addr) & (1UL << nr)) != 0;
64         rte_mb();
65         return res;
66 }
67
68 static inline u32 qede_ffb(unsigned long word)
69 {
70         unsigned long first_bit;
71
72         first_bit = __builtin_ffsl(word);
73         return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL;
74 }
75
76 inline u32 qede_find_first_bit(unsigned long *addr, u32 limit)
77 {
78         u32 i;
79         u32 nwords = 0;
80         OSAL_BUILD_BUG_ON(!limit);
81         nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
82         for (i = 0; i < nwords; i++)
83                 if (addr[i] != 0)
84                         break;
85
86         return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]);
87 }
88
89 static inline u32 qede_ffz(unsigned long word)
90 {
91         unsigned long first_zero;
92
93         first_zero = __builtin_ffsl(~word);
94         return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
95 }
96
97 inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
98 {
99         u32 i;
100         u32 nwords = 0;
101         OSAL_BUILD_BUG_ON(!limit);
102         nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
103         for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
104         return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
105 }
106
107 void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
108                               __rte_unused struct vf_pf_resc_request *resc_req,
109                               struct ecore_vf_acquire_sw_info *vf_sw_info)
110 {
111         vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
112         vf_sw_info->override_fw_version = 1;
113 }
114
115 void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
116                               dma_addr_t *phys, size_t size)
117 {
118         const struct rte_memzone *mz;
119         char mz_name[RTE_MEMZONE_NAMESIZE];
120         uint32_t core_id = rte_lcore_id();
121         unsigned int socket_id;
122
123         if (ecore_mz_count >= RTE_MAX_MEMZONE) {
124                 DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
125                        RTE_MAX_MEMZONE);
126                 *phys = 0;
127                 return OSAL_NULL;
128         }
129
130         OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
131         snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
132                                         (unsigned long)rte_get_timer_cycles());
133         if (core_id == (unsigned int)LCORE_ID_ANY)
134                 core_id = rte_get_master_lcore();
135         socket_id = rte_lcore_to_socket_id(core_id);
136         mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
137                         RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
138         if (!mz) {
139                 DP_ERR(p_dev, "Unable to allocate DMA memory "
140                        "of size %zu bytes - %s\n",
141                        size, rte_strerror(rte_errno));
142                 *phys = 0;
143                 return OSAL_NULL;
144         }
145         *phys = mz->iova;
146         ecore_mz_mapping[ecore_mz_count++] = mz;
147         DP_VERBOSE(p_dev, ECORE_MSG_SP,
148                    "Allocated dma memory size=%zu phys=0x%lx"
149                    " virt=%p core=%d\n",
150                    mz->len, (unsigned long)mz->iova, mz->addr, core_id);
151         return mz->addr;
152 }
153
154 void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
155                                       dma_addr_t *phys, size_t size, int align)
156 {
157         const struct rte_memzone *mz;
158         char mz_name[RTE_MEMZONE_NAMESIZE];
159         uint32_t core_id = rte_lcore_id();
160         unsigned int socket_id;
161
162         if (ecore_mz_count >= RTE_MAX_MEMZONE) {
163                 DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
164                        RTE_MAX_MEMZONE);
165                 *phys = 0;
166                 return OSAL_NULL;
167         }
168
169         OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
170         snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
171                                         (unsigned long)rte_get_timer_cycles());
172         if (core_id == (unsigned int)LCORE_ID_ANY)
173                 core_id = rte_get_master_lcore();
174         socket_id = rte_lcore_to_socket_id(core_id);
175         mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
176                         RTE_MEMZONE_IOVA_CONTIG, align);
177         if (!mz) {
178                 DP_ERR(p_dev, "Unable to allocate DMA memory "
179                        "of size %zu bytes - %s\n",
180                        size, rte_strerror(rte_errno));
181                 *phys = 0;
182                 return OSAL_NULL;
183         }
184         *phys = mz->iova;
185         ecore_mz_mapping[ecore_mz_count++] = mz;
186         DP_VERBOSE(p_dev, ECORE_MSG_SP,
187                    "Allocated aligned dma memory size=%zu phys=0x%lx"
188                    " virt=%p core=%d\n",
189                    mz->len, (unsigned long)mz->iova, mz->addr, core_id);
190         return mz->addr;
191 }
192
193 void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
194 {
195         uint16_t j;
196
197         for (j = 0 ; j < ecore_mz_count; j++) {
198                 if (phys == ecore_mz_mapping[j]->iova) {
199                         DP_VERBOSE(p_dev, ECORE_MSG_SP,
200                                 "Free memzone %s\n", ecore_mz_mapping[j]->name);
201                         rte_memzone_free(ecore_mz_mapping[j]);
202                         while (j < ecore_mz_count - 1) {
203                                 ecore_mz_mapping[j] = ecore_mz_mapping[j + 1];
204                                 j++;
205                         }
206                         ecore_mz_count--;
207                         return;
208                 }
209         }
210
211         DP_ERR(p_dev, "Unexpected memory free request\n");
212 }
213
214 #ifdef CONFIG_ECORE_ZIPPED_FW
215 u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
216                     u8 *input_buf, u32 max_size, u8 *unzip_buf)
217 {
218         int rc;
219
220         p_hwfn->stream->next_in = input_buf;
221         p_hwfn->stream->avail_in = input_len;
222         p_hwfn->stream->next_out = unzip_buf;
223         p_hwfn->stream->avail_out = max_size;
224
225         rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
226
227         if (rc != Z_OK) {
228                 DP_ERR(p_hwfn,
229                            "zlib init failed, rc = %d\n", rc);
230                 return 0;
231         }
232
233         rc = inflate(p_hwfn->stream, Z_FINISH);
234         inflateEnd(p_hwfn->stream);
235
236         if (rc != Z_OK && rc != Z_STREAM_END) {
237                 DP_ERR(p_hwfn,
238                            "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
239                            rc);
240                 return 0;
241         }
242
243         return p_hwfn->stream->total_out / 4;
244 }
245 #endif
246
247 void
248 qede_get_mcp_proto_stats(struct ecore_dev *edev,
249                          enum ecore_mcp_protocol_type type,
250                          union ecore_mcp_protocol_stats *stats)
251 {
252         struct ecore_eth_stats lan_stats;
253
254         if (type == ECORE_MCP_LAN_STATS) {
255                 ecore_get_vport_stats(edev, &lan_stats);
256
257                 /* @DPDK */
258                 stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
259                 stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
260
261                 stats->lan_stats.fcs_err = -1;
262         } else {
263                 DP_INFO(edev, "Statistics request type %d not supported\n",
264                        type);
265         }
266 }
267
268 void
269 qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
270 {
271         char err_str[64];
272
273         switch (err_type) {
274         case ECORE_HW_ERR_FAN_FAIL:
275                 strcpy(err_str, "Fan Failure");
276                 break;
277         case ECORE_HW_ERR_MFW_RESP_FAIL:
278                 strcpy(err_str, "MFW Response Failure");
279                 break;
280         case ECORE_HW_ERR_HW_ATTN:
281                 strcpy(err_str, "HW Attention");
282                 break;
283         case ECORE_HW_ERR_DMAE_FAIL:
284                 strcpy(err_str, "DMAE Failure");
285                 break;
286         case ECORE_HW_ERR_RAMROD_FAIL:
287                 strcpy(err_str, "Ramrod Failure");
288                 break;
289         case ECORE_HW_ERR_FW_ASSERT:
290                 strcpy(err_str, "FW Assertion");
291                 break;
292         default:
293                 strcpy(err_str, "Unknown");
294         }
295
296         DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
297         ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
298 }
299
300 u32 qede_crc32(u32 crc, u8 *ptr, u32 length)
301 {
302         int i;
303
304         while (length--) {
305                 crc ^= *ptr++;
306                 for (i = 0; i < 8; i++)
307                         crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
308         }
309         return crc;
310 }