New upstream version 16.11.8
[deb_dpdk.git] / drivers / net / cxgbe / base / t4_hw.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2014-2016 Chelsio Communications.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Chelsio Communications nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <netinet/in.h>
35
36 #include <rte_interrupts.h>
37 #include <rte_log.h>
38 #include <rte_debug.h>
39 #include <rte_pci.h>
40 #include <rte_atomic.h>
41 #include <rte_branch_prediction.h>
42 #include <rte_memory.h>
43 #include <rte_memzone.h>
44 #include <rte_tailq.h>
45 #include <rte_eal.h>
46 #include <rte_alarm.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
49 #include <rte_atomic.h>
50 #include <rte_malloc.h>
51 #include <rte_random.h>
52 #include <rte_dev.h>
53 #include <rte_byteorder.h>
54
55 #include "common.h"
56 #include "t4_regs.h"
57 #include "t4_regs_values.h"
58 #include "t4fw_interface.h"
59
60 static void init_link_config(struct link_config *lc, unsigned int caps);
61
62 /**
63  * t4_read_mtu_tbl - returns the values in the HW path MTU table
64  * @adap: the adapter
65  * @mtus: where to store the MTU values
66  * @mtu_log: where to store the MTU base-2 log (may be %NULL)
67  *
68  * Reads the HW path MTU table.
69  */
70 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
71 {
72         u32 v;
73         int i;
74
75         for (i = 0; i < NMTUS; ++i) {
76                 t4_write_reg(adap, A_TP_MTU_TABLE,
77                              V_MTUINDEX(0xff) | V_MTUVALUE(i));
78                 v = t4_read_reg(adap, A_TP_MTU_TABLE);
79                 mtus[i] = G_MTUVALUE(v);
80                 if (mtu_log)
81                         mtu_log[i] = G_MTUWIDTH(v);
82         }
83 }
84
85 /**
86  * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
87  * @adap: the adapter
88  * @addr: the indirect TP register address
89  * @mask: specifies the field within the register to modify
90  * @val: new value for the field
91  *
92  * Sets a field of an indirect TP register to the given value.
93  */
94 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
95                             unsigned int mask, unsigned int val)
96 {
97         t4_write_reg(adap, A_TP_PIO_ADDR, addr);
98         val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
99         t4_write_reg(adap, A_TP_PIO_DATA, val);
100 }
101
102 /* The minimum additive increment value for the congestion control table */
103 #define CC_MIN_INCR 2U
104
105 /**
106  * t4_load_mtus - write the MTU and congestion control HW tables
107  * @adap: the adapter
108  * @mtus: the values for the MTU table
109  * @alpha: the values for the congestion control alpha parameter
110  * @beta: the values for the congestion control beta parameter
111  *
112  * Write the HW MTU table with the supplied MTUs and the high-speed
113  * congestion control table with the supplied alpha, beta, and MTUs.
114  * We write the two tables together because the additive increments
115  * depend on the MTUs.
116  */
117 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
118                   const unsigned short *alpha, const unsigned short *beta)
119 {
120         static const unsigned int avg_pkts[NCCTRL_WIN] = {
121                 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
122                 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
123                 28672, 40960, 57344, 81920, 114688, 163840, 229376
124         };
125
126         unsigned int i, w;
127
128         for (i = 0; i < NMTUS; ++i) {
129                 unsigned int mtu = mtus[i];
130                 unsigned int log2 = cxgbe_fls(mtu);
131
132                 if (!(mtu & ((1 << log2) >> 2)))     /* round */
133                         log2--;
134                 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
135                              V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
136
137                 for (w = 0; w < NCCTRL_WIN; ++w) {
138                         unsigned int inc;
139
140                         inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
141                                   CC_MIN_INCR);
142
143                         t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
144                                      (w << 16) | (beta[w] << 13) | inc);
145                 }
146         }
147 }
148
149 /**
150  * t4_wait_op_done_val - wait until an operation is completed
151  * @adapter: the adapter performing the operation
152  * @reg: the register to check for completion
153  * @mask: a single-bit field within @reg that indicates completion
154  * @polarity: the value of the field when the operation is completed
155  * @attempts: number of check iterations
156  * @delay: delay in usecs between iterations
157  * @valp: where to store the value of the register at completion time
158  *
159  * Wait until an operation is completed by checking a bit in a register
160  * up to @attempts times.  If @valp is not NULL the value of the register
161  * at the time it indicated completion is stored there.  Returns 0 if the
162  * operation completes and -EAGAIN otherwise.
163  */
164 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
165                         int polarity, int attempts, int delay, u32 *valp)
166 {
167         while (1) {
168                 u32 val = t4_read_reg(adapter, reg);
169
170                 if (!!(val & mask) == polarity) {
171                         if (valp)
172                                 *valp = val;
173                         return 0;
174                 }
175                 if (--attempts == 0)
176                         return -EAGAIN;
177                 if (delay)
178                         udelay(delay);
179         }
180 }
181
182 /**
183  * t4_set_reg_field - set a register field to a value
184  * @adapter: the adapter to program
185  * @addr: the register address
186  * @mask: specifies the portion of the register to modify
187  * @val: the new value for the register field
188  *
189  * Sets a register field specified by the supplied mask to the
190  * given value.
191  */
192 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
193                       u32 val)
194 {
195         u32 v = t4_read_reg(adapter, addr) & ~mask;
196
197         t4_write_reg(adapter, addr, v | val);
198         (void)t4_read_reg(adapter, addr);      /* flush */
199 }
200
201 /**
202  * t4_read_indirect - read indirectly addressed registers
203  * @adap: the adapter
204  * @addr_reg: register holding the indirect address
205  * @data_reg: register holding the value of the indirect register
206  * @vals: where the read register values are stored
207  * @nregs: how many indirect registers to read
208  * @start_idx: index of first indirect register to read
209  *
210  * Reads registers that are accessed indirectly through an address/data
211  * register pair.
212  */
213 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
214                       unsigned int data_reg, u32 *vals, unsigned int nregs,
215                       unsigned int start_idx)
216 {
217         while (nregs--) {
218                 t4_write_reg(adap, addr_reg, start_idx);
219                 *vals++ = t4_read_reg(adap, data_reg);
220                 start_idx++;
221         }
222 }
223
224 /**
225  * t4_write_indirect - write indirectly addressed registers
226  * @adap: the adapter
227  * @addr_reg: register holding the indirect addresses
228  * @data_reg: register holding the value for the indirect registers
229  * @vals: values to write
230  * @nregs: how many indirect registers to write
231  * @start_idx: address of first indirect register to write
232  *
233  * Writes a sequential block of registers that are accessed indirectly
234  * through an address/data register pair.
235  */
236 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
237                        unsigned int data_reg, const u32 *vals,
238                        unsigned int nregs, unsigned int start_idx)
239 {
240         while (nregs--) {
241                 t4_write_reg(adap, addr_reg, start_idx++);
242                 t4_write_reg(adap, data_reg, *vals++);
243         }
244 }
245
246 /**
247  * t4_report_fw_error - report firmware error
248  * @adap: the adapter
249  *
250  * The adapter firmware can indicate error conditions to the host.
251  * If the firmware has indicated an error, print out the reason for
252  * the firmware error.
253  */
254 static void t4_report_fw_error(struct adapter *adap)
255 {
256         static const char * const reason[] = {
257                 "Crash",                        /* PCIE_FW_EVAL_CRASH */
258                 "During Device Preparation",    /* PCIE_FW_EVAL_PREP */
259                 "During Device Configuration",  /* PCIE_FW_EVAL_CONF */
260                 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
261                 "Unexpected Event",     /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
262                 "Insufficient Airflow",         /* PCIE_FW_EVAL_OVERHEAT */
263                 "Device Shutdown",      /* PCIE_FW_EVAL_DEVICESHUTDOWN */
264                 "Reserved",                     /* reserved */
265         };
266         u32 pcie_fw;
267
268         pcie_fw = t4_read_reg(adap, A_PCIE_FW);
269         if (pcie_fw & F_PCIE_FW_ERR)
270                 pr_err("%s: Firmware reports adapter error: %s\n",
271                        __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
272 }
273
274 /*
275  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
276  */
277 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
278                          u32 mbox_addr)
279 {
280         for ( ; nflit; nflit--, mbox_addr += 8)
281                 *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
282 }
283
284 /*
285  * Handle a FW assertion reported in a mailbox.
286  */
287 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
288 {
289         struct fw_debug_cmd asrt;
290
291         get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
292         pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
293                 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
294                 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
295 }
296
297 #define X_CIM_PF_NOACCESS 0xeeeeeeee
298
299 /*
300  * If the Host OS Driver needs locking arround accesses to the mailbox, this
301  * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
302  */
303 /* makes single-statement usage a bit cleaner ... */
304 #ifdef T4_OS_NEEDS_MBOX_LOCKING
305 #define T4_OS_MBOX_LOCKING(x) x
306 #else
307 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
308 #endif
309
310 /**
311  * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
312  * @adap: the adapter
313  * @mbox: index of the mailbox to use
314  * @cmd: the command to write
315  * @size: command length in bytes
316  * @rpl: where to optionally store the reply
317  * @sleep_ok: if true we may sleep while awaiting command completion
318  * @timeout: time to wait for command to finish before timing out
319  *           (negative implies @sleep_ok=false)
320  *
321  * Sends the given command to FW through the selected mailbox and waits
322  * for the FW to execute the command.  If @rpl is not %NULL it is used to
323  * store the FW's reply to the command.  The command and its optional
324  * reply are of the same length.  Some FW commands like RESET and
325  * INITIALIZE can take a considerable amount of time to execute.
326  * @sleep_ok determines whether we may sleep while awaiting the response.
327  * If sleeping is allowed we use progressive backoff otherwise we spin.
328  * Note that passing in a negative @timeout is an alternate mechanism
329  * for specifying @sleep_ok=false.  This is useful when a higher level
330  * interface allows for specification of @timeout but not @sleep_ok ...
331  *
332  * Returns 0 on success or a negative errno on failure.  A
333  * failure can happen either because we are not able to execute the
334  * command or FW executes it but signals an error.  In the latter case
335  * the return value is the error code indicated by FW (negated).
336  */
337 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
338                             const void __attribute__((__may_alias__)) *cmd,
339                             int size, void *rpl, bool sleep_ok, int timeout)
340 {
341         /*
342          * We delay in small increments at first in an effort to maintain
343          * responsiveness for simple, fast executing commands but then back
344          * off to larger delays to a maximum retry delay.
345          */
346         static const int delay[] = {
347                 1, 1, 3, 5, 10, 10, 20, 50, 100
348         };
349
350         u32 v;
351         u64 res;
352         int i, ms;
353         unsigned int delay_idx;
354         __be64 *temp = (__be64 *)malloc(size * sizeof(char));
355         __be64 *p = temp;
356         u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
357         u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
358         u32 ctl;
359         struct mbox_entry entry;
360         u32 pcie_fw = 0;
361
362         if (!temp)
363                 return -ENOMEM;
364
365         if ((size & 15) || size > MBOX_LEN) {
366                 free(temp);
367                 return -EINVAL;
368         }
369
370         bzero(p, size);
371         memcpy(p, (const __be64 *)cmd, size);
372
373         /*
374          * If we have a negative timeout, that implies that we can't sleep.
375          */
376         if (timeout < 0) {
377                 sleep_ok = false;
378                 timeout = -timeout;
379         }
380
381 #ifdef T4_OS_NEEDS_MBOX_LOCKING
382         /*
383          * Queue ourselves onto the mailbox access list.  When our entry is at
384          * the front of the list, we have rights to access the mailbox.  So we
385          * wait [for a while] till we're at the front [or bail out with an
386          * EBUSY] ...
387          */
388         t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
389
390         delay_idx = 0;
391         ms = delay[0];
392
393         for (i = 0; ; i += ms) {
394                 /*
395                  * If we've waited too long, return a busy indication.  This
396                  * really ought to be based on our initial position in the
397                  * mailbox access list but this is a start.  We very rarely
398                  * contend on access to the mailbox ...  Also check for a
399                  * firmware error which we'll report as a device error.
400                  */
401                 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
402                 if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
403                         t4_os_atomic_list_del(&entry, &adap->mbox_list,
404                                               &adap->mbox_lock);
405                         t4_report_fw_error(adap);
406                         free(temp);
407                         return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
408                 }
409
410                 /*
411                  * If we're at the head, break out and start the mailbox
412                  * protocol.
413                  */
414                 if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
415                         break;
416
417                 /*
418                  * Delay for a bit before checking again ...
419                  */
420                 if (sleep_ok) {
421                         ms = delay[delay_idx];  /* last element may repeat */
422                         if (delay_idx < ARRAY_SIZE(delay) - 1)
423                                 delay_idx++;
424                         msleep(ms);
425                 } else {
426                         rte_delay_ms(ms);
427                 }
428         }
429 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
430
431         /*
432          * Attempt to gain access to the mailbox.
433          */
434         for (i = 0; i < 4; i++) {
435                 ctl = t4_read_reg(adap, ctl_reg);
436                 v = G_MBOWNER(ctl);
437                 if (v != X_MBOWNER_NONE)
438                         break;
439         }
440
441         /*
442          * If we were unable to gain access, dequeue ourselves from the
443          * mailbox atomic access list and report the error to our caller.
444          */
445         if (v != X_MBOWNER_PL) {
446                 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
447                                                          &adap->mbox_list,
448                                                          &adap->mbox_lock));
449                 t4_report_fw_error(adap);
450                 free(temp);
451                 return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
452         }
453
454         /*
455          * If we gain ownership of the mailbox and there's a "valid" message
456          * in it, this is likely an asynchronous error message from the
457          * firmware.  So we'll report that and then proceed on with attempting
458          * to issue our own command ... which may well fail if the error
459          * presaged the firmware crashing ...
460          */
461         if (ctl & F_MBMSGVALID) {
462                 dev_err(adap, "found VALID command in mbox %u: "
463                         "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
464                         (unsigned long long)t4_read_reg64(adap, data_reg),
465                         (unsigned long long)t4_read_reg64(adap, data_reg + 8),
466                         (unsigned long long)t4_read_reg64(adap, data_reg + 16),
467                         (unsigned long long)t4_read_reg64(adap, data_reg + 24),
468                         (unsigned long long)t4_read_reg64(adap, data_reg + 32),
469                         (unsigned long long)t4_read_reg64(adap, data_reg + 40),
470                         (unsigned long long)t4_read_reg64(adap, data_reg + 48),
471                         (unsigned long long)t4_read_reg64(adap, data_reg + 56));
472         }
473
474         /*
475          * Copy in the new mailbox command and send it on its way ...
476          */
477         for (i = 0; i < size; i += 8, p++)
478                 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
479
480         CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
481                         "%016llx %016llx %016llx %016llx\n", __func__,  (mbox),
482                         (unsigned long long)t4_read_reg64(adap, data_reg),
483                         (unsigned long long)t4_read_reg64(adap, data_reg + 8),
484                         (unsigned long long)t4_read_reg64(adap, data_reg + 16),
485                         (unsigned long long)t4_read_reg64(adap, data_reg + 24),
486                         (unsigned long long)t4_read_reg64(adap, data_reg + 32),
487                         (unsigned long long)t4_read_reg64(adap, data_reg + 40),
488                         (unsigned long long)t4_read_reg64(adap, data_reg + 48),
489                         (unsigned long long)t4_read_reg64(adap, data_reg + 56));
490
491         t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
492         t4_read_reg(adap, ctl_reg);          /* flush write */
493
494         delay_idx = 0;
495         ms = delay[0];
496
497         /*
498          * Loop waiting for the reply; bail out if we time out or the firmware
499          * reports an error.
500          */
501         pcie_fw = t4_read_reg(adap, A_PCIE_FW);
502         for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
503                 if (sleep_ok) {
504                         ms = delay[delay_idx];  /* last element may repeat */
505                         if (delay_idx < ARRAY_SIZE(delay) - 1)
506                                 delay_idx++;
507                         msleep(ms);
508                 } else {
509                         msleep(ms);
510                 }
511
512                 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
513                 v = t4_read_reg(adap, ctl_reg);
514                 if (v == X_CIM_PF_NOACCESS)
515                         continue;
516                 if (G_MBOWNER(v) == X_MBOWNER_PL) {
517                         if (!(v & F_MBMSGVALID)) {
518                                 t4_write_reg(adap, ctl_reg,
519                                              V_MBOWNER(X_MBOWNER_NONE));
520                                 continue;
521                         }
522
523                         CXGBE_DEBUG_MBOX(adap,
524                         "%s: mbox %u: %016llx %016llx %016llx %016llx "
525                         "%016llx %016llx %016llx %016llx\n", __func__,  (mbox),
526                         (unsigned long long)t4_read_reg64(adap, data_reg),
527                         (unsigned long long)t4_read_reg64(adap, data_reg + 8),
528                         (unsigned long long)t4_read_reg64(adap, data_reg + 16),
529                         (unsigned long long)t4_read_reg64(adap, data_reg + 24),
530                         (unsigned long long)t4_read_reg64(adap, data_reg + 32),
531                         (unsigned long long)t4_read_reg64(adap, data_reg + 40),
532                         (unsigned long long)t4_read_reg64(adap, data_reg + 48),
533                         (unsigned long long)t4_read_reg64(adap, data_reg + 56));
534
535                         CXGBE_DEBUG_MBOX(adap,
536                                 "command %#x completed in %d ms (%ssleeping)\n",
537                                 *(const u8 *)cmd,
538                                 i + ms, sleep_ok ? "" : "non-");
539
540                         res = t4_read_reg64(adap, data_reg);
541                         if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
542                                 fw_asrt(adap, data_reg);
543                                 res = V_FW_CMD_RETVAL(EIO);
544                         } else if (rpl) {
545                                 get_mbox_rpl(adap, rpl, size / 8, data_reg);
546                         }
547                         t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
548                         T4_OS_MBOX_LOCKING(
549                                 t4_os_atomic_list_del(&entry, &adap->mbox_list,
550                                                       &adap->mbox_lock));
551                         free(temp);
552                         return -G_FW_CMD_RETVAL((int)res);
553                 }
554         }
555
556         /*
557          * We timed out waiting for a reply to our mailbox command.  Report
558          * the error and also check to see if the firmware reported any
559          * errors ...
560          */
561         dev_err(adap, "command %#x in mailbox %d timed out\n",
562                 *(const u8 *)cmd, mbox);
563         T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
564                                                  &adap->mbox_list,
565                                                  &adap->mbox_lock));
566         t4_report_fw_error(adap);
567         free(temp);
568         return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
569 }
570
571 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
572                     void *rpl, bool sleep_ok)
573 {
574         return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
575                                        FW_CMD_MAX_TIMEOUT);
576 }
577
578 /**
579  * t4_get_regs_len - return the size of the chips register set
580  * @adapter: the adapter
581  *
582  * Returns the size of the chip's BAR0 register space.
583  */
584 unsigned int t4_get_regs_len(struct adapter *adapter)
585 {
586         unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
587
588         switch (chip_version) {
589         case CHELSIO_T5:
590                 return T5_REGMAP_SIZE;
591         }
592
593         dev_err(adapter,
594                 "Unsupported chip version %d\n", chip_version);
595         return 0;
596 }
597
598 /**
599  * t4_get_regs - read chip registers into provided buffer
600  * @adap: the adapter
601  * @buf: register buffer
602  * @buf_size: size (in bytes) of register buffer
603  *
604  * If the provided register buffer isn't large enough for the chip's
605  * full register range, the register dump will be truncated to the
606  * register buffer's size.
607  */
608 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
609 {
610         static const unsigned int t5_reg_ranges[] = {
611                 0x1008, 0x10c0,
612                 0x10cc, 0x10f8,
613                 0x1100, 0x1100,
614                 0x110c, 0x1148,
615                 0x1180, 0x1184,
616                 0x1190, 0x1194,
617                 0x11a0, 0x11a4,
618                 0x11b0, 0x11b4,
619                 0x11fc, 0x123c,
620                 0x1280, 0x173c,
621                 0x1800, 0x18fc,
622                 0x3000, 0x3028,
623                 0x3060, 0x30b0,
624                 0x30b8, 0x30d8,
625                 0x30e0, 0x30fc,
626                 0x3140, 0x357c,
627                 0x35a8, 0x35cc,
628                 0x35ec, 0x35ec,
629                 0x3600, 0x5624,
630                 0x56cc, 0x56ec,
631                 0x56f4, 0x5720,
632                 0x5728, 0x575c,
633                 0x580c, 0x5814,
634                 0x5890, 0x589c,
635                 0x58a4, 0x58ac,
636                 0x58b8, 0x58bc,
637                 0x5940, 0x59c8,
638                 0x59d0, 0x59dc,
639                 0x59fc, 0x5a18,
640                 0x5a60, 0x5a70,
641                 0x5a80, 0x5a9c,
642                 0x5b94, 0x5bfc,
643                 0x6000, 0x6020,
644                 0x6028, 0x6040,
645                 0x6058, 0x609c,
646                 0x60a8, 0x614c,
647                 0x7700, 0x7798,
648                 0x77c0, 0x78fc,
649                 0x7b00, 0x7b58,
650                 0x7b60, 0x7b84,
651                 0x7b8c, 0x7c54,
652                 0x7d00, 0x7d38,
653                 0x7d40, 0x7d80,
654                 0x7d8c, 0x7ddc,
655                 0x7de4, 0x7e04,
656                 0x7e10, 0x7e1c,
657                 0x7e24, 0x7e38,
658                 0x7e40, 0x7e44,
659                 0x7e4c, 0x7e78,
660                 0x7e80, 0x7edc,
661                 0x7ee8, 0x7efc,
662                 0x8dc0, 0x8de0,
663                 0x8df8, 0x8e04,
664                 0x8e10, 0x8e84,
665                 0x8ea0, 0x8f84,
666                 0x8fc0, 0x9058,
667                 0x9060, 0x9060,
668                 0x9068, 0x90f8,
669                 0x9400, 0x9408,
670                 0x9410, 0x9470,
671                 0x9600, 0x9600,
672                 0x9608, 0x9638,
673                 0x9640, 0x96f4,
674                 0x9800, 0x9808,
675                 0x9820, 0x983c,
676                 0x9850, 0x9864,
677                 0x9c00, 0x9c6c,
678                 0x9c80, 0x9cec,
679                 0x9d00, 0x9d6c,
680                 0x9d80, 0x9dec,
681                 0x9e00, 0x9e6c,
682                 0x9e80, 0x9eec,
683                 0x9f00, 0x9f6c,
684                 0x9f80, 0xa020,
685                 0xd004, 0xd004,
686                 0xd010, 0xd03c,
687                 0xdfc0, 0xdfe0,
688                 0xe000, 0x1106c,
689                 0x11074, 0x11088,
690                 0x1109c, 0x1117c,
691                 0x11190, 0x11204,
692                 0x19040, 0x1906c,
693                 0x19078, 0x19080,
694                 0x1908c, 0x190e8,
695                 0x190f0, 0x190f8,
696                 0x19100, 0x19110,
697                 0x19120, 0x19124,
698                 0x19150, 0x19194,
699                 0x1919c, 0x191b0,
700                 0x191d0, 0x191e8,
701                 0x19238, 0x19290,
702                 0x193f8, 0x19428,
703                 0x19430, 0x19444,
704                 0x1944c, 0x1946c,
705                 0x19474, 0x19474,
706                 0x19490, 0x194cc,
707                 0x194f0, 0x194f8,
708                 0x19c00, 0x19c08,
709                 0x19c10, 0x19c60,
710                 0x19c94, 0x19ce4,
711                 0x19cf0, 0x19d40,
712                 0x19d50, 0x19d94,
713                 0x19da0, 0x19de8,
714                 0x19df0, 0x19e10,
715                 0x19e50, 0x19e90,
716                 0x19ea0, 0x19f24,
717                 0x19f34, 0x19f34,
718                 0x19f40, 0x19f50,
719                 0x19f90, 0x19fb4,
720                 0x19fc4, 0x19fe4,
721                 0x1a000, 0x1a004,
722                 0x1a010, 0x1a06c,
723                 0x1a0b0, 0x1a0e4,
724                 0x1a0ec, 0x1a0f8,
725                 0x1a100, 0x1a108,
726                 0x1a114, 0x1a120,
727                 0x1a128, 0x1a130,
728                 0x1a138, 0x1a138,
729                 0x1a190, 0x1a1c4,
730                 0x1a1fc, 0x1a1fc,
731                 0x1e008, 0x1e00c,
732                 0x1e040, 0x1e044,
733                 0x1e04c, 0x1e04c,
734                 0x1e284, 0x1e290,
735                 0x1e2c0, 0x1e2c0,
736                 0x1e2e0, 0x1e2e0,
737                 0x1e300, 0x1e384,
738                 0x1e3c0, 0x1e3c8,
739                 0x1e408, 0x1e40c,
740                 0x1e440, 0x1e444,
741                 0x1e44c, 0x1e44c,
742                 0x1e684, 0x1e690,
743                 0x1e6c0, 0x1e6c0,
744                 0x1e6e0, 0x1e6e0,
745                 0x1e700, 0x1e784,
746                 0x1e7c0, 0x1e7c8,
747                 0x1e808, 0x1e80c,
748                 0x1e840, 0x1e844,
749                 0x1e84c, 0x1e84c,
750                 0x1ea84, 0x1ea90,
751                 0x1eac0, 0x1eac0,
752                 0x1eae0, 0x1eae0,
753                 0x1eb00, 0x1eb84,
754                 0x1ebc0, 0x1ebc8,
755                 0x1ec08, 0x1ec0c,
756                 0x1ec40, 0x1ec44,
757                 0x1ec4c, 0x1ec4c,
758                 0x1ee84, 0x1ee90,
759                 0x1eec0, 0x1eec0,
760                 0x1eee0, 0x1eee0,
761                 0x1ef00, 0x1ef84,
762                 0x1efc0, 0x1efc8,
763                 0x1f008, 0x1f00c,
764                 0x1f040, 0x1f044,
765                 0x1f04c, 0x1f04c,
766                 0x1f284, 0x1f290,
767                 0x1f2c0, 0x1f2c0,
768                 0x1f2e0, 0x1f2e0,
769                 0x1f300, 0x1f384,
770                 0x1f3c0, 0x1f3c8,
771                 0x1f408, 0x1f40c,
772                 0x1f440, 0x1f444,
773                 0x1f44c, 0x1f44c,
774                 0x1f684, 0x1f690,
775                 0x1f6c0, 0x1f6c0,
776                 0x1f6e0, 0x1f6e0,
777                 0x1f700, 0x1f784,
778                 0x1f7c0, 0x1f7c8,
779                 0x1f808, 0x1f80c,
780                 0x1f840, 0x1f844,
781                 0x1f84c, 0x1f84c,
782                 0x1fa84, 0x1fa90,
783                 0x1fac0, 0x1fac0,
784                 0x1fae0, 0x1fae0,
785                 0x1fb00, 0x1fb84,
786                 0x1fbc0, 0x1fbc8,
787                 0x1fc08, 0x1fc0c,
788                 0x1fc40, 0x1fc44,
789                 0x1fc4c, 0x1fc4c,
790                 0x1fe84, 0x1fe90,
791                 0x1fec0, 0x1fec0,
792                 0x1fee0, 0x1fee0,
793                 0x1ff00, 0x1ff84,
794                 0x1ffc0, 0x1ffc8,
795                 0x30000, 0x30030,
796                 0x30038, 0x30038,
797                 0x30040, 0x30040,
798                 0x30100, 0x30144,
799                 0x30190, 0x301a0,
800                 0x301a8, 0x301b8,
801                 0x301c4, 0x301c8,
802                 0x301d0, 0x301d0,
803                 0x30200, 0x30318,
804                 0x30400, 0x304b4,
805                 0x304c0, 0x3052c,
806                 0x30540, 0x3061c,
807                 0x30800, 0x30828,
808                 0x30834, 0x30834,
809                 0x308c0, 0x30908,
810                 0x30910, 0x309ac,
811                 0x30a00, 0x30a14,
812                 0x30a1c, 0x30a2c,
813                 0x30a44, 0x30a50,
814                 0x30a74, 0x30a74,
815                 0x30a7c, 0x30afc,
816                 0x30b08, 0x30c24,
817                 0x30d00, 0x30d00,
818                 0x30d08, 0x30d14,
819                 0x30d1c, 0x30d20,
820                 0x30d3c, 0x30d3c,
821                 0x30d48, 0x30d50,
822                 0x31200, 0x3120c,
823                 0x31220, 0x31220,
824                 0x31240, 0x31240,
825                 0x31600, 0x3160c,
826                 0x31a00, 0x31a1c,
827                 0x31e00, 0x31e20,
828                 0x31e38, 0x31e3c,
829                 0x31e80, 0x31e80,
830                 0x31e88, 0x31ea8,
831                 0x31eb0, 0x31eb4,
832                 0x31ec8, 0x31ed4,
833                 0x31fb8, 0x32004,
834                 0x32200, 0x32200,
835                 0x32208, 0x32240,
836                 0x32248, 0x32280,
837                 0x32288, 0x322c0,
838                 0x322c8, 0x322fc,
839                 0x32600, 0x32630,
840                 0x32a00, 0x32abc,
841                 0x32b00, 0x32b10,
842                 0x32b20, 0x32b30,
843                 0x32b40, 0x32b50,
844                 0x32b60, 0x32b70,
845                 0x33000, 0x33028,
846                 0x33030, 0x33048,
847                 0x33060, 0x33068,
848                 0x33070, 0x3309c,
849                 0x330f0, 0x33128,
850                 0x33130, 0x33148,
851                 0x33160, 0x33168,
852                 0x33170, 0x3319c,
853                 0x331f0, 0x33238,
854                 0x33240, 0x33240,
855                 0x33248, 0x33250,
856                 0x3325c, 0x33264,
857                 0x33270, 0x332b8,
858                 0x332c0, 0x332e4,
859                 0x332f8, 0x33338,
860                 0x33340, 0x33340,
861                 0x33348, 0x33350,
862                 0x3335c, 0x33364,
863                 0x33370, 0x333b8,
864                 0x333c0, 0x333e4,
865                 0x333f8, 0x33428,
866                 0x33430, 0x33448,
867                 0x33460, 0x33468,
868                 0x33470, 0x3349c,
869                 0x334f0, 0x33528,
870                 0x33530, 0x33548,
871                 0x33560, 0x33568,
872                 0x33570, 0x3359c,
873                 0x335f0, 0x33638,
874                 0x33640, 0x33640,
875                 0x33648, 0x33650,
876                 0x3365c, 0x33664,
877                 0x33670, 0x336b8,
878                 0x336c0, 0x336e4,
879                 0x336f8, 0x33738,
880                 0x33740, 0x33740,
881                 0x33748, 0x33750,
882                 0x3375c, 0x33764,
883                 0x33770, 0x337b8,
884                 0x337c0, 0x337e4,
885                 0x337f8, 0x337fc,
886                 0x33814, 0x33814,
887                 0x3382c, 0x3382c,
888                 0x33880, 0x3388c,
889                 0x338e8, 0x338ec,
890                 0x33900, 0x33928,
891                 0x33930, 0x33948,
892                 0x33960, 0x33968,
893                 0x33970, 0x3399c,
894                 0x339f0, 0x33a38,
895                 0x33a40, 0x33a40,
896                 0x33a48, 0x33a50,
897                 0x33a5c, 0x33a64,
898                 0x33a70, 0x33ab8,
899                 0x33ac0, 0x33ae4,
900                 0x33af8, 0x33b10,
901                 0x33b28, 0x33b28,
902                 0x33b3c, 0x33b50,
903                 0x33bf0, 0x33c10,
904                 0x33c28, 0x33c28,
905                 0x33c3c, 0x33c50,
906                 0x33cf0, 0x33cfc,
907                 0x34000, 0x34030,
908                 0x34038, 0x34038,
909                 0x34040, 0x34040,
910                 0x34100, 0x34144,
911                 0x34190, 0x341a0,
912                 0x341a8, 0x341b8,
913                 0x341c4, 0x341c8,
914                 0x341d0, 0x341d0,
915                 0x34200, 0x34318,
916                 0x34400, 0x344b4,
917                 0x344c0, 0x3452c,
918                 0x34540, 0x3461c,
919                 0x34800, 0x34828,
920                 0x34834, 0x34834,
921                 0x348c0, 0x34908,
922                 0x34910, 0x349ac,
923                 0x34a00, 0x34a14,
924                 0x34a1c, 0x34a2c,
925                 0x34a44, 0x34a50,
926                 0x34a74, 0x34a74,
927                 0x34a7c, 0x34afc,
928                 0x34b08, 0x34c24,
929                 0x34d00, 0x34d00,
930                 0x34d08, 0x34d14,
931                 0x34d1c, 0x34d20,
932                 0x34d3c, 0x34d3c,
933                 0x34d48, 0x34d50,
934                 0x35200, 0x3520c,
935                 0x35220, 0x35220,
936                 0x35240, 0x35240,
937                 0x35600, 0x3560c,
938                 0x35a00, 0x35a1c,
939                 0x35e00, 0x35e20,
940                 0x35e38, 0x35e3c,
941                 0x35e80, 0x35e80,
942                 0x35e88, 0x35ea8,
943                 0x35eb0, 0x35eb4,
944                 0x35ec8, 0x35ed4,
945                 0x35fb8, 0x36004,
946                 0x36200, 0x36200,
947                 0x36208, 0x36240,
948                 0x36248, 0x36280,
949                 0x36288, 0x362c0,
950                 0x362c8, 0x362fc,
951                 0x36600, 0x36630,
952                 0x36a00, 0x36abc,
953                 0x36b00, 0x36b10,
954                 0x36b20, 0x36b30,
955                 0x36b40, 0x36b50,
956                 0x36b60, 0x36b70,
957                 0x37000, 0x37028,
958                 0x37030, 0x37048,
959                 0x37060, 0x37068,
960                 0x37070, 0x3709c,
961                 0x370f0, 0x37128,
962                 0x37130, 0x37148,
963                 0x37160, 0x37168,
964                 0x37170, 0x3719c,
965                 0x371f0, 0x37238,
966                 0x37240, 0x37240,
967                 0x37248, 0x37250,
968                 0x3725c, 0x37264,
969                 0x37270, 0x372b8,
970                 0x372c0, 0x372e4,
971                 0x372f8, 0x37338,
972                 0x37340, 0x37340,
973                 0x37348, 0x37350,
974                 0x3735c, 0x37364,
975                 0x37370, 0x373b8,
976                 0x373c0, 0x373e4,
977                 0x373f8, 0x37428,
978                 0x37430, 0x37448,
979                 0x37460, 0x37468,
980                 0x37470, 0x3749c,
981                 0x374f0, 0x37528,
982                 0x37530, 0x37548,
983                 0x37560, 0x37568,
984                 0x37570, 0x3759c,
985                 0x375f0, 0x37638,
986                 0x37640, 0x37640,
987                 0x37648, 0x37650,
988                 0x3765c, 0x37664,
989                 0x37670, 0x376b8,
990                 0x376c0, 0x376e4,
991                 0x376f8, 0x37738,
992                 0x37740, 0x37740,
993                 0x37748, 0x37750,
994                 0x3775c, 0x37764,
995                 0x37770, 0x377b8,
996                 0x377c0, 0x377e4,
997                 0x377f8, 0x377fc,
998                 0x37814, 0x37814,
999                 0x3782c, 0x3782c,
1000                 0x37880, 0x3788c,
1001                 0x378e8, 0x378ec,
1002                 0x37900, 0x37928,
1003                 0x37930, 0x37948,
1004                 0x37960, 0x37968,
1005                 0x37970, 0x3799c,
1006                 0x379f0, 0x37a38,
1007                 0x37a40, 0x37a40,
1008                 0x37a48, 0x37a50,
1009                 0x37a5c, 0x37a64,
1010                 0x37a70, 0x37ab8,
1011                 0x37ac0, 0x37ae4,
1012                 0x37af8, 0x37b10,
1013                 0x37b28, 0x37b28,
1014                 0x37b3c, 0x37b50,
1015                 0x37bf0, 0x37c10,
1016                 0x37c28, 0x37c28,
1017                 0x37c3c, 0x37c50,
1018                 0x37cf0, 0x37cfc,
1019                 0x38000, 0x38030,
1020                 0x38038, 0x38038,
1021                 0x38040, 0x38040,
1022                 0x38100, 0x38144,
1023                 0x38190, 0x381a0,
1024                 0x381a8, 0x381b8,
1025                 0x381c4, 0x381c8,
1026                 0x381d0, 0x381d0,
1027                 0x38200, 0x38318,
1028                 0x38400, 0x384b4,
1029                 0x384c0, 0x3852c,
1030                 0x38540, 0x3861c,
1031                 0x38800, 0x38828,
1032                 0x38834, 0x38834,
1033                 0x388c0, 0x38908,
1034                 0x38910, 0x389ac,
1035                 0x38a00, 0x38a14,
1036                 0x38a1c, 0x38a2c,
1037                 0x38a44, 0x38a50,
1038                 0x38a74, 0x38a74,
1039                 0x38a7c, 0x38afc,
1040                 0x38b08, 0x38c24,
1041                 0x38d00, 0x38d00,
1042                 0x38d08, 0x38d14,
1043                 0x38d1c, 0x38d20,
1044                 0x38d3c, 0x38d3c,
1045                 0x38d48, 0x38d50,
1046                 0x39200, 0x3920c,
1047                 0x39220, 0x39220,
1048                 0x39240, 0x39240,
1049                 0x39600, 0x3960c,
1050                 0x39a00, 0x39a1c,
1051                 0x39e00, 0x39e20,
1052                 0x39e38, 0x39e3c,
1053                 0x39e80, 0x39e80,
1054                 0x39e88, 0x39ea8,
1055                 0x39eb0, 0x39eb4,
1056                 0x39ec8, 0x39ed4,
1057                 0x39fb8, 0x3a004,
1058                 0x3a200, 0x3a200,
1059                 0x3a208, 0x3a240,
1060                 0x3a248, 0x3a280,
1061                 0x3a288, 0x3a2c0,
1062                 0x3a2c8, 0x3a2fc,
1063                 0x3a600, 0x3a630,
1064                 0x3aa00, 0x3aabc,
1065                 0x3ab00, 0x3ab10,
1066                 0x3ab20, 0x3ab30,
1067                 0x3ab40, 0x3ab50,
1068                 0x3ab60, 0x3ab70,
1069                 0x3b000, 0x3b028,
1070                 0x3b030, 0x3b048,
1071                 0x3b060, 0x3b068,
1072                 0x3b070, 0x3b09c,
1073                 0x3b0f0, 0x3b128,
1074                 0x3b130, 0x3b148,
1075                 0x3b160, 0x3b168,
1076                 0x3b170, 0x3b19c,
1077                 0x3b1f0, 0x3b238,
1078                 0x3b240, 0x3b240,
1079                 0x3b248, 0x3b250,
1080                 0x3b25c, 0x3b264,
1081                 0x3b270, 0x3b2b8,
1082                 0x3b2c0, 0x3b2e4,
1083                 0x3b2f8, 0x3b338,
1084                 0x3b340, 0x3b340,
1085                 0x3b348, 0x3b350,
1086                 0x3b35c, 0x3b364,
1087                 0x3b370, 0x3b3b8,
1088                 0x3b3c0, 0x3b3e4,
1089                 0x3b3f8, 0x3b428,
1090                 0x3b430, 0x3b448,
1091                 0x3b460, 0x3b468,
1092                 0x3b470, 0x3b49c,
1093                 0x3b4f0, 0x3b528,
1094                 0x3b530, 0x3b548,
1095                 0x3b560, 0x3b568,
1096                 0x3b570, 0x3b59c,
1097                 0x3b5f0, 0x3b638,
1098                 0x3b640, 0x3b640,
1099                 0x3b648, 0x3b650,
1100                 0x3b65c, 0x3b664,
1101                 0x3b670, 0x3b6b8,
1102                 0x3b6c0, 0x3b6e4,
1103                 0x3b6f8, 0x3b738,
1104                 0x3b740, 0x3b740,
1105                 0x3b748, 0x3b750,
1106                 0x3b75c, 0x3b764,
1107                 0x3b770, 0x3b7b8,
1108                 0x3b7c0, 0x3b7e4,
1109                 0x3b7f8, 0x3b7fc,
1110                 0x3b814, 0x3b814,
1111                 0x3b82c, 0x3b82c,
1112                 0x3b880, 0x3b88c,
1113                 0x3b8e8, 0x3b8ec,
1114                 0x3b900, 0x3b928,
1115                 0x3b930, 0x3b948,
1116                 0x3b960, 0x3b968,
1117                 0x3b970, 0x3b99c,
1118                 0x3b9f0, 0x3ba38,
1119                 0x3ba40, 0x3ba40,
1120                 0x3ba48, 0x3ba50,
1121                 0x3ba5c, 0x3ba64,
1122                 0x3ba70, 0x3bab8,
1123                 0x3bac0, 0x3bae4,
1124                 0x3baf8, 0x3bb10,
1125                 0x3bb28, 0x3bb28,
1126                 0x3bb3c, 0x3bb50,
1127                 0x3bbf0, 0x3bc10,
1128                 0x3bc28, 0x3bc28,
1129                 0x3bc3c, 0x3bc50,
1130                 0x3bcf0, 0x3bcfc,
1131                 0x3c000, 0x3c030,
1132                 0x3c038, 0x3c038,
1133                 0x3c040, 0x3c040,
1134                 0x3c100, 0x3c144,
1135                 0x3c190, 0x3c1a0,
1136                 0x3c1a8, 0x3c1b8,
1137                 0x3c1c4, 0x3c1c8,
1138                 0x3c1d0, 0x3c1d0,
1139                 0x3c200, 0x3c318,
1140                 0x3c400, 0x3c4b4,
1141                 0x3c4c0, 0x3c52c,
1142                 0x3c540, 0x3c61c,
1143                 0x3c800, 0x3c828,
1144                 0x3c834, 0x3c834,
1145                 0x3c8c0, 0x3c908,
1146                 0x3c910, 0x3c9ac,
1147                 0x3ca00, 0x3ca14,
1148                 0x3ca1c, 0x3ca2c,
1149                 0x3ca44, 0x3ca50,
1150                 0x3ca74, 0x3ca74,
1151                 0x3ca7c, 0x3cafc,
1152                 0x3cb08, 0x3cc24,
1153                 0x3cd00, 0x3cd00,
1154                 0x3cd08, 0x3cd14,
1155                 0x3cd1c, 0x3cd20,
1156                 0x3cd3c, 0x3cd3c,
1157                 0x3cd48, 0x3cd50,
1158                 0x3d200, 0x3d20c,
1159                 0x3d220, 0x3d220,
1160                 0x3d240, 0x3d240,
1161                 0x3d600, 0x3d60c,
1162                 0x3da00, 0x3da1c,
1163                 0x3de00, 0x3de20,
1164                 0x3de38, 0x3de3c,
1165                 0x3de80, 0x3de80,
1166                 0x3de88, 0x3dea8,
1167                 0x3deb0, 0x3deb4,
1168                 0x3dec8, 0x3ded4,
1169                 0x3dfb8, 0x3e004,
1170                 0x3e200, 0x3e200,
1171                 0x3e208, 0x3e240,
1172                 0x3e248, 0x3e280,
1173                 0x3e288, 0x3e2c0,
1174                 0x3e2c8, 0x3e2fc,
1175                 0x3e600, 0x3e630,
1176                 0x3ea00, 0x3eabc,
1177                 0x3eb00, 0x3eb10,
1178                 0x3eb20, 0x3eb30,
1179                 0x3eb40, 0x3eb50,
1180                 0x3eb60, 0x3eb70,
1181                 0x3f000, 0x3f028,
1182                 0x3f030, 0x3f048,
1183                 0x3f060, 0x3f068,
1184                 0x3f070, 0x3f09c,
1185                 0x3f0f0, 0x3f128,
1186                 0x3f130, 0x3f148,
1187                 0x3f160, 0x3f168,
1188                 0x3f170, 0x3f19c,
1189                 0x3f1f0, 0x3f238,
1190                 0x3f240, 0x3f240,
1191                 0x3f248, 0x3f250,
1192                 0x3f25c, 0x3f264,
1193                 0x3f270, 0x3f2b8,
1194                 0x3f2c0, 0x3f2e4,
1195                 0x3f2f8, 0x3f338,
1196                 0x3f340, 0x3f340,
1197                 0x3f348, 0x3f350,
1198                 0x3f35c, 0x3f364,
1199                 0x3f370, 0x3f3b8,
1200                 0x3f3c0, 0x3f3e4,
1201                 0x3f3f8, 0x3f428,
1202                 0x3f430, 0x3f448,
1203                 0x3f460, 0x3f468,
1204                 0x3f470, 0x3f49c,
1205                 0x3f4f0, 0x3f528,
1206                 0x3f530, 0x3f548,
1207                 0x3f560, 0x3f568,
1208                 0x3f570, 0x3f59c,
1209                 0x3f5f0, 0x3f638,
1210                 0x3f640, 0x3f640,
1211                 0x3f648, 0x3f650,
1212                 0x3f65c, 0x3f664,
1213                 0x3f670, 0x3f6b8,
1214                 0x3f6c0, 0x3f6e4,
1215                 0x3f6f8, 0x3f738,
1216                 0x3f740, 0x3f740,
1217                 0x3f748, 0x3f750,
1218                 0x3f75c, 0x3f764,
1219                 0x3f770, 0x3f7b8,
1220                 0x3f7c0, 0x3f7e4,
1221                 0x3f7f8, 0x3f7fc,
1222                 0x3f814, 0x3f814,
1223                 0x3f82c, 0x3f82c,
1224                 0x3f880, 0x3f88c,
1225                 0x3f8e8, 0x3f8ec,
1226                 0x3f900, 0x3f928,
1227                 0x3f930, 0x3f948,
1228                 0x3f960, 0x3f968,
1229                 0x3f970, 0x3f99c,
1230                 0x3f9f0, 0x3fa38,
1231                 0x3fa40, 0x3fa40,
1232                 0x3fa48, 0x3fa50,
1233                 0x3fa5c, 0x3fa64,
1234                 0x3fa70, 0x3fab8,
1235                 0x3fac0, 0x3fae4,
1236                 0x3faf8, 0x3fb10,
1237                 0x3fb28, 0x3fb28,
1238                 0x3fb3c, 0x3fb50,
1239                 0x3fbf0, 0x3fc10,
1240                 0x3fc28, 0x3fc28,
1241                 0x3fc3c, 0x3fc50,
1242                 0x3fcf0, 0x3fcfc,
1243                 0x40000, 0x4000c,
1244                 0x40040, 0x40050,
1245                 0x40060, 0x40068,
1246                 0x4007c, 0x4008c,
1247                 0x40094, 0x400b0,
1248                 0x400c0, 0x40144,
1249                 0x40180, 0x4018c,
1250                 0x40200, 0x40254,
1251                 0x40260, 0x40264,
1252                 0x40270, 0x40288,
1253                 0x40290, 0x40298,
1254                 0x402ac, 0x402c8,
1255                 0x402d0, 0x402e0,
1256                 0x402f0, 0x402f0,
1257                 0x40300, 0x4033c,
1258                 0x403f8, 0x403fc,
1259                 0x41304, 0x413c4,
1260                 0x41400, 0x4140c,
1261                 0x41414, 0x4141c,
1262                 0x41480, 0x414d0,
1263                 0x44000, 0x44054,
1264                 0x4405c, 0x44078,
1265                 0x440c0, 0x44174,
1266                 0x44180, 0x441ac,
1267                 0x441b4, 0x441b8,
1268                 0x441c0, 0x44254,
1269                 0x4425c, 0x44278,
1270                 0x442c0, 0x44374,
1271                 0x44380, 0x443ac,
1272                 0x443b4, 0x443b8,
1273                 0x443c0, 0x44454,
1274                 0x4445c, 0x44478,
1275                 0x444c0, 0x44574,
1276                 0x44580, 0x445ac,
1277                 0x445b4, 0x445b8,
1278                 0x445c0, 0x44654,
1279                 0x4465c, 0x44678,
1280                 0x446c0, 0x44774,
1281                 0x44780, 0x447ac,
1282                 0x447b4, 0x447b8,
1283                 0x447c0, 0x44854,
1284                 0x4485c, 0x44878,
1285                 0x448c0, 0x44974,
1286                 0x44980, 0x449ac,
1287                 0x449b4, 0x449b8,
1288                 0x449c0, 0x449fc,
1289                 0x45000, 0x45004,
1290                 0x45010, 0x45030,
1291                 0x45040, 0x45060,
1292                 0x45068, 0x45068,
1293                 0x45080, 0x45084,
1294                 0x450a0, 0x450b0,
1295                 0x45200, 0x45204,
1296                 0x45210, 0x45230,
1297                 0x45240, 0x45260,
1298                 0x45268, 0x45268,
1299                 0x45280, 0x45284,
1300                 0x452a0, 0x452b0,
1301                 0x460c0, 0x460e4,
1302                 0x47000, 0x4703c,
1303                 0x47044, 0x4708c,
1304                 0x47200, 0x47250,
1305                 0x47400, 0x47408,
1306                 0x47414, 0x47420,
1307                 0x47600, 0x47618,
1308                 0x47800, 0x47814,
1309                 0x48000, 0x4800c,
1310                 0x48040, 0x48050,
1311                 0x48060, 0x48068,
1312                 0x4807c, 0x4808c,
1313                 0x48094, 0x480b0,
1314                 0x480c0, 0x48144,
1315                 0x48180, 0x4818c,
1316                 0x48200, 0x48254,
1317                 0x48260, 0x48264,
1318                 0x48270, 0x48288,
1319                 0x48290, 0x48298,
1320                 0x482ac, 0x482c8,
1321                 0x482d0, 0x482e0,
1322                 0x482f0, 0x482f0,
1323                 0x48300, 0x4833c,
1324                 0x483f8, 0x483fc,
1325                 0x49304, 0x493c4,
1326                 0x49400, 0x4940c,
1327                 0x49414, 0x4941c,
1328                 0x49480, 0x494d0,
1329                 0x4c000, 0x4c054,
1330                 0x4c05c, 0x4c078,
1331                 0x4c0c0, 0x4c174,
1332                 0x4c180, 0x4c1ac,
1333                 0x4c1b4, 0x4c1b8,
1334                 0x4c1c0, 0x4c254,
1335                 0x4c25c, 0x4c278,
1336                 0x4c2c0, 0x4c374,
1337                 0x4c380, 0x4c3ac,
1338                 0x4c3b4, 0x4c3b8,
1339                 0x4c3c0, 0x4c454,
1340                 0x4c45c, 0x4c478,
1341                 0x4c4c0, 0x4c574,
1342                 0x4c580, 0x4c5ac,
1343                 0x4c5b4, 0x4c5b8,
1344                 0x4c5c0, 0x4c654,
1345                 0x4c65c, 0x4c678,
1346                 0x4c6c0, 0x4c774,
1347                 0x4c780, 0x4c7ac,
1348                 0x4c7b4, 0x4c7b8,
1349                 0x4c7c0, 0x4c854,
1350                 0x4c85c, 0x4c878,
1351                 0x4c8c0, 0x4c974,
1352                 0x4c980, 0x4c9ac,
1353                 0x4c9b4, 0x4c9b8,
1354                 0x4c9c0, 0x4c9fc,
1355                 0x4d000, 0x4d004,
1356                 0x4d010, 0x4d030,
1357                 0x4d040, 0x4d060,
1358                 0x4d068, 0x4d068,
1359                 0x4d080, 0x4d084,
1360                 0x4d0a0, 0x4d0b0,
1361                 0x4d200, 0x4d204,
1362                 0x4d210, 0x4d230,
1363                 0x4d240, 0x4d260,
1364                 0x4d268, 0x4d268,
1365                 0x4d280, 0x4d284,
1366                 0x4d2a0, 0x4d2b0,
1367                 0x4e0c0, 0x4e0e4,
1368                 0x4f000, 0x4f03c,
1369                 0x4f044, 0x4f08c,
1370                 0x4f200, 0x4f250,
1371                 0x4f400, 0x4f408,
1372                 0x4f414, 0x4f420,
1373                 0x4f600, 0x4f618,
1374                 0x4f800, 0x4f814,
1375                 0x50000, 0x50084,
1376                 0x50090, 0x500cc,
1377                 0x50400, 0x50400,
1378                 0x50800, 0x50884,
1379                 0x50890, 0x508cc,
1380                 0x50c00, 0x50c00,
1381                 0x51000, 0x5101c,
1382                 0x51300, 0x51308,
1383         };
1384
1385         u32 *buf_end = (u32 *)((char *)buf + buf_size);
1386         const unsigned int *reg_ranges;
1387         int reg_ranges_size, range;
1388         unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1389
1390         /* Select the right set of register ranges to dump depending on the
1391          * adapter chip type.
1392          */
1393         switch (chip_version) {
1394         case CHELSIO_T5:
1395                 reg_ranges = t5_reg_ranges;
1396                 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1397                 break;
1398
1399         default:
1400                 dev_err(adap,
1401                         "Unsupported chip version %d\n", chip_version);
1402                 return;
1403         }
1404
1405         /* Clear the register buffer and insert the appropriate register
1406          * values selected by the above register ranges.
1407          */
1408         memset(buf, 0, buf_size);
1409         for (range = 0; range < reg_ranges_size; range += 2) {
1410                 unsigned int reg = reg_ranges[range];
1411                 unsigned int last_reg = reg_ranges[range + 1];
1412                 u32 *bufp = (u32 *)((char *)buf + reg);
1413
1414                 /* Iterate across the register range filling in the register
1415                  * buffer but don't write past the end of the register buffer.
1416                  */
1417                 while (reg <= last_reg && bufp < buf_end) {
1418                         *bufp++ = t4_read_reg(adap, reg);
1419                         reg += sizeof(u32);
1420                 }
1421         }
1422 }
1423
1424 /* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
1425 #define EEPROM_DELAY            10              /* 10us per poll spin */
1426 #define EEPROM_MAX_POLL         5000            /* x 5000 == 50ms */
1427
1428 #define EEPROM_STAT_ADDR        0x7bfc
1429
1430 /**
1431  * Small utility function to wait till any outstanding VPD Access is complete.
1432  * We have a per-adapter state variable "VPD Busy" to indicate when we have a
1433  * VPD Access in flight.  This allows us to handle the problem of having a
1434  * previous VPD Access time out and prevent an attempt to inject a new VPD
1435  * Request before any in-flight VPD request has completed.
1436  */
1437 static int t4_seeprom_wait(struct adapter *adapter)
1438 {
1439         unsigned int base = adapter->params.pci.vpd_cap_addr;
1440         int max_poll;
1441
1442         /* If no VPD Access is in flight, we can just return success right
1443          * away.
1444          */
1445         if (!adapter->vpd_busy)
1446                 return 0;
1447
1448         /* Poll the VPD Capability Address/Flag register waiting for it
1449          * to indicate that the operation is complete.
1450          */
1451         max_poll = EEPROM_MAX_POLL;
1452         do {
1453                 u16 val;
1454
1455                 udelay(EEPROM_DELAY);
1456                 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
1457
1458                 /* If the operation is complete, mark the VPD as no longer
1459                  * busy and return success.
1460                  */
1461                 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
1462                         adapter->vpd_busy = 0;
1463                         return 0;
1464                 }
1465         } while (--max_poll);
1466
1467         /* Failure!  Note that we leave the VPD Busy status set in order to
1468          * avoid pushing a new VPD Access request into the VPD Capability till
1469          * the current operation eventually succeeds.  It's a bug to issue a
1470          * new request when an existing request is in flight and will result
1471          * in corrupt hardware state.
1472          */
1473         return -ETIMEDOUT;
1474 }
1475
1476 /**
1477  * t4_seeprom_read - read a serial EEPROM location
1478  * @adapter: adapter to read
1479  * @addr: EEPROM virtual address
1480  * @data: where to store the read data
1481  *
1482  * Read a 32-bit word from a location in serial EEPROM using the card's PCI
1483  * VPD capability.  Note that this function must be called with a virtual
1484  * address.
1485  */
1486 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
1487 {
1488         unsigned int base = adapter->params.pci.vpd_cap_addr;
1489         int ret;
1490
1491         /* VPD Accesses must alway be 4-byte aligned!
1492          */
1493         if (addr >= EEPROMVSIZE || (addr & 3))
1494                 return -EINVAL;
1495
1496         /* Wait for any previous operation which may still be in flight to
1497          * complete.
1498          */
1499         ret = t4_seeprom_wait(adapter);
1500         if (ret) {
1501                 dev_err(adapter, "VPD still busy from previous operation\n");
1502                 return ret;
1503         }
1504
1505         /* Issue our new VPD Read request, mark the VPD as being busy and wait
1506          * for our request to complete.  If it doesn't complete, note the
1507          * error and return it to our caller.  Note that we do not reset the
1508          * VPD Busy status!
1509          */
1510         t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
1511         adapter->vpd_busy = 1;
1512         adapter->vpd_flag = PCI_VPD_ADDR_F;
1513         ret = t4_seeprom_wait(adapter);
1514         if (ret) {
1515                 dev_err(adapter, "VPD read of address %#x failed\n", addr);
1516                 return ret;
1517         }
1518
1519         /* Grab the returned data, swizzle it into our endianness and
1520          * return success.
1521          */
1522         t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
1523         *data = le32_to_cpu(*data);
1524         return 0;
1525 }
1526
1527 /**
1528  * t4_seeprom_write - write a serial EEPROM location
1529  * @adapter: adapter to write
1530  * @addr: virtual EEPROM address
1531  * @data: value to write
1532  *
1533  * Write a 32-bit word to a location in serial EEPROM using the card's PCI
1534  * VPD capability.  Note that this function must be called with a virtual
1535  * address.
1536  */
1537 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
1538 {
1539         unsigned int base = adapter->params.pci.vpd_cap_addr;
1540         int ret;
1541         u32 stats_reg = 0;
1542         int max_poll;
1543
1544         /* VPD Accesses must alway be 4-byte aligned!
1545          */
1546         if (addr >= EEPROMVSIZE || (addr & 3))
1547                 return -EINVAL;
1548
1549         /* Wait for any previous operation which may still be in flight to
1550          * complete.
1551          */
1552         ret = t4_seeprom_wait(adapter);
1553         if (ret) {
1554                 dev_err(adapter, "VPD still busy from previous operation\n");
1555                 return ret;
1556         }
1557
1558         /* Issue our new VPD Read request, mark the VPD as being busy and wait
1559          * for our request to complete.  If it doesn't complete, note the
1560          * error and return it to our caller.  Note that we do not reset the
1561          * VPD Busy status!
1562          */
1563         t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
1564                              cpu_to_le32(data));
1565         t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
1566                              (u16)addr | PCI_VPD_ADDR_F);
1567         adapter->vpd_busy = 1;
1568         adapter->vpd_flag = 0;
1569         ret = t4_seeprom_wait(adapter);
1570         if (ret) {
1571                 dev_err(adapter, "VPD write of address %#x failed\n", addr);
1572                 return ret;
1573         }
1574
1575         /* Reset PCI_VPD_DATA register after a transaction and wait for our
1576          * request to complete. If it doesn't complete, return error.
1577          */
1578         t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
1579         max_poll = EEPROM_MAX_POLL;
1580         do {
1581                 udelay(EEPROM_DELAY);
1582                 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
1583         } while ((stats_reg & 0x1) && --max_poll);
1584         if (!max_poll)
1585                 return -ETIMEDOUT;
1586
1587         /* Return success! */
1588         return 0;
1589 }
1590
1591 /**
1592  * t4_seeprom_wp - enable/disable EEPROM write protection
1593  * @adapter: the adapter
1594  * @enable: whether to enable or disable write protection
1595  *
1596  * Enables or disables write protection on the serial EEPROM.
1597  */
1598 int t4_seeprom_wp(struct adapter *adapter, int enable)
1599 {
1600         return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
1601 }
1602
1603 /**
1604  * t4_config_rss_range - configure a portion of the RSS mapping table
1605  * @adapter: the adapter
1606  * @mbox: mbox to use for the FW command
1607  * @viid: virtual interface whose RSS subtable is to be written
1608  * @start: start entry in the table to write
1609  * @n: how many table entries to write
1610  * @rspq: values for the "response queue" (Ingress Queue) lookup table
1611  * @nrspq: number of values in @rspq
1612  *
1613  * Programs the selected part of the VI's RSS mapping table with the
1614  * provided values.  If @nrspq < @n the supplied values are used repeatedly
1615  * until the full table range is populated.
1616  *
1617  * The caller must ensure the values in @rspq are in the range allowed for
1618  * @viid.
1619  */
1620 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1621                         int start, int n, const u16 *rspq, unsigned int nrspq)
1622 {
1623         int ret;
1624         const u16 *rsp = rspq;
1625         const u16 *rsp_end = rspq + nrspq;
1626         struct fw_rss_ind_tbl_cmd cmd;
1627
1628         memset(&cmd, 0, sizeof(cmd));
1629         cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1630                                      F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
1631                                      V_FW_RSS_IND_TBL_CMD_VIID(viid));
1632         cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1633
1634         /*
1635          * Each firmware RSS command can accommodate up to 32 RSS Ingress
1636          * Queue Identifiers.  These Ingress Queue IDs are packed three to
1637          * a 32-bit word as 10-bit values with the upper remaining 2 bits
1638          * reserved.
1639          */
1640         while (n > 0) {
1641                 int nq = min(n, 32);
1642                 int nq_packed = 0;
1643                 __be32 *qp = &cmd.iq0_to_iq2;
1644
1645                 /*
1646                  * Set up the firmware RSS command header to send the next
1647                  * "nq" Ingress Queue IDs to the firmware.
1648                  */
1649                 cmd.niqid = cpu_to_be16(nq);
1650                 cmd.startidx = cpu_to_be16(start);
1651
1652                 /*
1653                  * "nq" more done for the start of the next loop.
1654                  */
1655                 start += nq;
1656                 n -= nq;
1657
1658                 /*
1659                  * While there are still Ingress Queue IDs to stuff into the
1660                  * current firmware RSS command, retrieve them from the
1661                  * Ingress Queue ID array and insert them into the command.
1662                  */
1663                 while (nq > 0) {
1664                         /*
1665                          * Grab up to the next 3 Ingress Queue IDs (wrapping
1666                          * around the Ingress Queue ID array if necessary) and
1667                          * insert them into the firmware RSS command at the
1668                          * current 3-tuple position within the commad.
1669                          */
1670                         u16 qbuf[3];
1671                         u16 *qbp = qbuf;
1672                         int nqbuf = min(3, nq);
1673
1674                         nq -= nqbuf;
1675                         qbuf[0] = 0;
1676                         qbuf[1] = 0;
1677                         qbuf[2] = 0;
1678                         while (nqbuf && nq_packed < 32) {
1679                                 nqbuf--;
1680                                 nq_packed++;
1681                                 *qbp++ = *rsp++;
1682                                 if (rsp >= rsp_end)
1683                                         rsp = rspq;
1684                         }
1685                         *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
1686                                             V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
1687                                             V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
1688                 }
1689
1690                 /*
1691                  * Send this portion of the RRS table update to the firmware;
1692                  * bail out on any errors.
1693                  */
1694                 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1695                 if (ret)
1696                         return ret;
1697         }
1698
1699         return 0;
1700 }
1701
1702 /**
1703  * t4_config_vi_rss - configure per VI RSS settings
1704  * @adapter: the adapter
1705  * @mbox: mbox to use for the FW command
1706  * @viid: the VI id
1707  * @flags: RSS flags
1708  * @defq: id of the default RSS queue for the VI.
1709  *
1710  * Configures VI-specific RSS properties.
1711  */
1712 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
1713                      unsigned int flags, unsigned int defq)
1714 {
1715         struct fw_rss_vi_config_cmd c;
1716
1717         memset(&c, 0, sizeof(c));
1718         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
1719                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
1720                                    V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
1721         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
1722         c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
1723                         V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
1724         return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1725 }
1726
1727 /**
1728  * init_cong_ctrl - initialize congestion control parameters
1729  * @a: the alpha values for congestion control
1730  * @b: the beta values for congestion control
1731  *
1732  * Initialize the congestion control parameters.
1733  */
1734 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
1735 {
1736         int i;
1737
1738         for (i = 0; i < 9; i++) {
1739                 a[i] = 1;
1740                 b[i] = 0;
1741         }
1742
1743         a[9] = 2;
1744         a[10] = 3;
1745         a[11] = 4;
1746         a[12] = 5;
1747         a[13] = 6;
1748         a[14] = 7;
1749         a[15] = 8;
1750         a[16] = 9;
1751         a[17] = 10;
1752         a[18] = 14;
1753         a[19] = 17;
1754         a[20] = 21;
1755         a[21] = 25;
1756         a[22] = 30;
1757         a[23] = 35;
1758         a[24] = 45;
1759         a[25] = 60;
1760         a[26] = 80;
1761         a[27] = 100;
1762         a[28] = 200;
1763         a[29] = 300;
1764         a[30] = 400;
1765         a[31] = 500;
1766
1767         b[9] = 1;
1768         b[10] = 1;
1769         b[11] = 2;
1770         b[12] = 2;
1771         b[13] = 3;
1772         b[14] = 3;
1773         b[15] = 3;
1774         b[16] = 3;
1775         b[17] = 4;
1776         b[18] = 4;
1777         b[19] = 4;
1778         b[20] = 4;
1779         b[21] = 4;
1780         b[22] = 5;
1781         b[23] = 5;
1782         b[24] = 5;
1783         b[25] = 5;
1784         b[26] = 5;
1785         b[27] = 5;
1786         b[28] = 6;
1787         b[29] = 6;
1788         b[30] = 7;
1789         b[31] = 7;
1790 }
1791
1792 #define INIT_CMD(var, cmd, rd_wr) do { \
1793         (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
1794                         F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
1795         (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
1796 } while (0)
1797
1798 int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
1799 {
1800         u32 cclk_param, cclk_val;
1801         int ret;
1802
1803         /*
1804          * Ask firmware for the Core Clock since it knows how to translate the
1805          * Reference Clock ('V2') VPD field into a Core Clock value ...
1806          */
1807         cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
1808                       V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
1809         ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
1810                               1, &cclk_param, &cclk_val);
1811         if (ret) {
1812                 dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
1813                         __func__, ret);
1814                 return ret;
1815         }
1816
1817         p->cclk = cclk_val;
1818         dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
1819         return 0;
1820 }
1821
1822 /* serial flash and firmware constants and flash config file constants */
1823 enum {
1824         SF_ATTEMPTS = 10,             /* max retries for SF operations */
1825
1826         /* flash command opcodes */
1827         SF_PROG_PAGE    = 2,          /* program page */
1828         SF_WR_DISABLE   = 4,          /* disable writes */
1829         SF_RD_STATUS    = 5,          /* read status register */
1830         SF_WR_ENABLE    = 6,          /* enable writes */
1831         SF_RD_DATA_FAST = 0xb,        /* read flash */
1832         SF_RD_ID        = 0x9f,       /* read ID */
1833         SF_ERASE_SECTOR = 0xd8,       /* erase sector */
1834 };
1835
1836 /**
1837  * sf1_read - read data from the serial flash
1838  * @adapter: the adapter
1839  * @byte_cnt: number of bytes to read
1840  * @cont: whether another operation will be chained
1841  * @lock: whether to lock SF for PL access only
1842  * @valp: where to store the read data
1843  *
1844  * Reads up to 4 bytes of data from the serial flash.  The location of
1845  * the read needs to be specified prior to calling this by issuing the
1846  * appropriate commands to the serial flash.
1847  */
1848 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
1849                     int lock, u32 *valp)
1850 {
1851         int ret;
1852
1853         if (!byte_cnt || byte_cnt > 4)
1854                 return -EINVAL;
1855         if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
1856                 return -EBUSY;
1857         t4_write_reg(adapter, A_SF_OP,
1858                      V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
1859         ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
1860         if (!ret)
1861                 *valp = t4_read_reg(adapter, A_SF_DATA);
1862         return ret;
1863 }
1864
1865 /**
1866  * sf1_write - write data to the serial flash
1867  * @adapter: the adapter
1868  * @byte_cnt: number of bytes to write
1869  * @cont: whether another operation will be chained
1870  * @lock: whether to lock SF for PL access only
1871  * @val: value to write
1872  *
1873  * Writes up to 4 bytes of data to the serial flash.  The location of
1874  * the write needs to be specified prior to calling this by issuing the
1875  * appropriate commands to the serial flash.
1876  */
1877 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
1878                      int lock, u32 val)
1879 {
1880         if (!byte_cnt || byte_cnt > 4)
1881                 return -EINVAL;
1882         if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
1883                 return -EBUSY;
1884         t4_write_reg(adapter, A_SF_DATA, val);
1885         t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
1886                      V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
1887         return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
1888 }
1889
1890 /**
1891  * t4_read_flash - read words from serial flash
1892  * @adapter: the adapter
1893  * @addr: the start address for the read
1894  * @nwords: how many 32-bit words to read
1895  * @data: where to store the read data
1896  * @byte_oriented: whether to store data as bytes or as words
1897  *
1898  * Read the specified number of 32-bit words from the serial flash.
1899  * If @byte_oriented is set the read data is stored as a byte array
1900  * (i.e., big-endian), otherwise as 32-bit words in the platform's
1901  * natural endianness.
1902  */
1903 int t4_read_flash(struct adapter *adapter, unsigned int addr,
1904                   unsigned int nwords, u32 *data, int byte_oriented)
1905 {
1906         int ret;
1907
1908         if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
1909             (addr & 3))
1910                 return -EINVAL;
1911
1912         addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
1913
1914         ret = sf1_write(adapter, 4, 1, 0, addr);
1915         if (ret != 0)
1916                 return ret;
1917
1918         ret = sf1_read(adapter, 1, 1, 0, data);
1919         if (ret != 0)
1920                 return ret;
1921
1922         for ( ; nwords; nwords--, data++) {
1923                 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
1924                 if (nwords == 1)
1925                         t4_write_reg(adapter, A_SF_OP, 0);    /* unlock SF */
1926                 if (ret)
1927                         return ret;
1928                 if (byte_oriented)
1929                         *data = cpu_to_be32(*data);
1930         }
1931         return 0;
1932 }
1933
1934 /**
1935  * t4_get_fw_version - read the firmware version
1936  * @adapter: the adapter
1937  * @vers: where to place the version
1938  *
1939  * Reads the FW version from flash.
1940  */
1941 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
1942 {
1943         return t4_read_flash(adapter, FLASH_FW_START +
1944                              offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
1945 }
1946
1947 /**
1948  * t4_get_tp_version - read the TP microcode version
1949  * @adapter: the adapter
1950  * @vers: where to place the version
1951  *
1952  * Reads the TP microcode version from flash.
1953  */
1954 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
1955 {
1956         return t4_read_flash(adapter, FLASH_FW_START +
1957                              offsetof(struct fw_hdr, tp_microcode_ver),
1958                              1, vers, 0);
1959 }
1960
1961 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1962                 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1963                 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1964
1965 /**
1966  * t4_link_l1cfg - apply link configuration to MAC/PHY
1967  * @phy: the PHY to setup
1968  * @mac: the MAC to setup
1969  * @lc: the requested link configuration
1970  *
1971  * Set up a port's MAC and PHY according to a desired link configuration.
1972  * - If the PHY can auto-negotiate first decide what to advertise, then
1973  *   enable/disable auto-negotiation as desired, and reset.
1974  * - If the PHY does not auto-negotiate just reset it.
1975  * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1976  *   otherwise do it later based on the outcome of auto-negotiation.
1977  */
1978 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
1979                   struct link_config *lc)
1980 {
1981         struct fw_port_cmd c;
1982         unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1983
1984         lc->link_ok = 0;
1985         if (lc->requested_fc & PAUSE_RX)
1986                 fc |= FW_PORT_CAP_FC_RX;
1987         if (lc->requested_fc & PAUSE_TX)
1988                 fc |= FW_PORT_CAP_FC_TX;
1989
1990         memset(&c, 0, sizeof(c));
1991         c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
1992                                      F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
1993                                      V_FW_PORT_CMD_PORTID(port));
1994         c.action_to_len16 =
1995                 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1996                             FW_LEN16(c));
1997
1998         if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1999                 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
2000                                              fc);
2001                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
2002         } else if (lc->autoneg == AUTONEG_DISABLE) {
2003                 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
2004                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
2005         } else {
2006                 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
2007         }
2008
2009         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2010 }
2011
2012 /**
2013  * t4_flash_cfg_addr - return the address of the flash configuration file
2014  * @adapter: the adapter
2015  *
2016  * Return the address within the flash where the Firmware Configuration
2017  * File is stored, or an error if the device FLASH is too small to contain
2018  * a Firmware Configuration File.
2019  */
2020 int t4_flash_cfg_addr(struct adapter *adapter)
2021 {
2022         /*
2023          * If the device FLASH isn't large enough to hold a Firmware
2024          * Configuration File, return an error.
2025          */
2026         if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
2027                 return -ENOSPC;
2028
2029         return FLASH_CFG_START;
2030 }
2031
2032 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2033
2034 /**
2035  * t4_intr_enable - enable interrupts
2036  * @adapter: the adapter whose interrupts should be enabled
2037  *
2038  * Enable PF-specific interrupts for the calling function and the top-level
2039  * interrupt concentrator for global interrupts.  Interrupts are already
2040  * enabled at each module, here we just enable the roots of the interrupt
2041  * hierarchies.
2042  *
2043  * Note: this function should be called only when the driver manages
2044  * non PF-specific interrupts from the various HW modules.  Only one PCI
2045  * function at a time should be doing this.
2046  */
2047 void t4_intr_enable(struct adapter *adapter)
2048 {
2049         u32 val = 0;
2050         u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2051
2052         if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
2053                 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
2054         t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2055                      F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2056                      F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
2057                      F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2058                      F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2059                      F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2060                      F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
2061         t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2062         t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2063 }
2064
2065 /**
2066  * t4_intr_disable - disable interrupts
2067  * @adapter: the adapter whose interrupts should be disabled
2068  *
2069  * Disable interrupts.  We only disable the top-level interrupt
2070  * concentrators.  The caller must be a PCI function managing global
2071  * interrupts.
2072  */
2073 void t4_intr_disable(struct adapter *adapter)
2074 {
2075         u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
2076
2077         t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2078         t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2079 }
2080
2081 /**
2082  * t4_get_port_type_description - return Port Type string description
2083  * @port_type: firmware Port Type enumeration
2084  */
2085 const char *t4_get_port_type_description(enum fw_port_type port_type)
2086 {
2087         static const char * const port_type_description[] = {
2088                 "Fiber_XFI",
2089                 "Fiber_XAUI",
2090                 "BT_SGMII",
2091                 "BT_XFI",
2092                 "BT_XAUI",
2093                 "KX4",
2094                 "CX4",
2095                 "KX",
2096                 "KR",
2097                 "SFP",
2098                 "BP_AP",
2099                 "BP4_AP",
2100                 "QSFP_10G",
2101                 "QSA",
2102                 "QSFP",
2103                 "BP40_BA",
2104         };
2105
2106         if (port_type < ARRAY_SIZE(port_type_description))
2107                 return port_type_description[port_type];
2108         return "UNKNOWN";
2109 }
2110
2111 /**
2112  * t4_get_mps_bg_map - return the buffer groups associated with a port
2113  * @adap: the adapter
2114  * @idx: the port index
2115  *
2116  * Returns a bitmap indicating which MPS buffer groups are associated
2117  * with the given port.  Bit i is set if buffer group i is used by the
2118  * port.
2119  */
2120 unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
2121 {
2122         u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
2123
2124         if (n == 0)
2125                 return idx == 0 ? 0xf : 0;
2126         if (n == 1)
2127                 return idx < 2 ? (3 << (2 * idx)) : 0;
2128         return 1 << idx;
2129 }
2130
2131 /**
2132  * t4_get_port_stats - collect port statistics
2133  * @adap: the adapter
2134  * @idx: the port index
2135  * @p: the stats structure to fill
2136  *
2137  * Collect statistics related to the given port from HW.
2138  */
2139 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2140 {
2141         u32 bgmap = t4_get_mps_bg_map(adap, idx);
2142         u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
2143
2144 #define GET_STAT(name) \
2145         t4_read_reg64(adap, \
2146                       (is_t4(adap->params.chip) ? \
2147                        PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
2148                        T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
2149 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
2150
2151         p->tx_octets           = GET_STAT(TX_PORT_BYTES);
2152         p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
2153         p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
2154         p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
2155         p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
2156         p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
2157         p->tx_frames_64        = GET_STAT(TX_PORT_64B);
2158         p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
2159         p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
2160         p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
2161         p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
2162         p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2163         p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
2164         p->tx_drop             = GET_STAT(TX_PORT_DROP);
2165         p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
2166         p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
2167         p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
2168         p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
2169         p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
2170         p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
2171         p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
2172         p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
2173         p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
2174
2175         if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
2176                 if (stat_ctl & F_COUNTPAUSESTATTX) {
2177                         p->tx_frames -= p->tx_pause;
2178                         p->tx_octets -= p->tx_pause * 64;
2179                 }
2180                 if (stat_ctl & F_COUNTPAUSEMCTX)
2181                         p->tx_mcast_frames -= p->tx_pause;
2182         }
2183
2184         p->rx_octets           = GET_STAT(RX_PORT_BYTES);
2185         p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
2186         p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
2187         p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
2188         p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
2189         p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
2190         p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2191         p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
2192         p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
2193         p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
2194         p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
2195         p->rx_frames_64        = GET_STAT(RX_PORT_64B);
2196         p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
2197         p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
2198         p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
2199         p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
2200         p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2201         p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
2202         p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
2203         p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
2204         p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
2205         p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
2206         p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
2207         p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
2208         p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
2209         p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
2210         p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
2211
2212         if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
2213                 if (stat_ctl & F_COUNTPAUSESTATRX) {
2214                         p->rx_frames -= p->rx_pause;
2215                         p->rx_octets -= p->rx_pause * 64;
2216                 }
2217                 if (stat_ctl & F_COUNTPAUSEMCRX)
2218                         p->rx_mcast_frames -= p->rx_pause;
2219         }
2220
2221         p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2222         p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2223         p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2224         p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2225         p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2226         p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2227         p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2228         p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2229
2230 #undef GET_STAT
2231 #undef GET_STAT_COM
2232 }
2233
2234 /**
2235  * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
2236  * @adap: The adapter
2237  * @idx: The port
2238  * @stats: Current stats to fill
2239  * @offset: Previous stats snapshot
2240  */
2241 void t4_get_port_stats_offset(struct adapter *adap, int idx,
2242                               struct port_stats *stats,
2243                               struct port_stats *offset)
2244 {
2245         u64 *s, *o;
2246         unsigned int i;
2247
2248         t4_get_port_stats(adap, idx, stats);
2249         for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
2250              i < (sizeof(struct port_stats) / sizeof(u64));
2251              i++, s++, o++)
2252                 *s -= *o;
2253 }
2254
2255 /**
2256  * t4_clr_port_stats - clear port statistics
2257  * @adap: the adapter
2258  * @idx: the port index
2259  *
2260  * Clear HW statistics for the given port.
2261  */
2262 void t4_clr_port_stats(struct adapter *adap, int idx)
2263 {
2264         unsigned int i;
2265         u32 bgmap = t4_get_mps_bg_map(adap, idx);
2266         u32 port_base_addr;
2267
2268         if (is_t4(adap->params.chip))
2269                 port_base_addr = PORT_BASE(idx);
2270         else
2271                 port_base_addr = T5_PORT_BASE(idx);
2272
2273         for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
2274              i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
2275                 t4_write_reg(adap, port_base_addr + i, 0);
2276         for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
2277              i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
2278                 t4_write_reg(adap, port_base_addr + i, 0);
2279         for (i = 0; i < 4; i++)
2280                 if (bgmap & (1 << i)) {
2281                         t4_write_reg(adap,
2282                                      A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
2283                                      i * 8, 0);
2284                         t4_write_reg(adap,
2285                                      A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
2286                                      i * 8, 0);
2287                 }
2288 }
2289
2290 /**
2291  * t4_fw_hello - establish communication with FW
2292  * @adap: the adapter
2293  * @mbox: mailbox to use for the FW command
2294  * @evt_mbox: mailbox to receive async FW events
2295  * @master: specifies the caller's willingness to be the device master
2296  * @state: returns the current device state (if non-NULL)
2297  *
2298  * Issues a command to establish communication with FW.  Returns either
2299  * an error (negative integer) or the mailbox of the Master PF.
2300  */
2301 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2302                 enum dev_master master, enum dev_state *state)
2303 {
2304         int ret;
2305         struct fw_hello_cmd c;
2306         u32 v;
2307         unsigned int master_mbox;
2308         int retries = FW_CMD_HELLO_RETRIES;
2309
2310 retry:
2311         memset(&c, 0, sizeof(c));
2312         INIT_CMD(c, HELLO, WRITE);
2313         c.err_to_clearinit = cpu_to_be32(
2314                         V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2315                         V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2316                         V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2317                                                 M_FW_HELLO_CMD_MBMASTER) |
2318                         V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2319                         V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
2320                         F_FW_HELLO_CMD_CLEARINIT);
2321
2322         /*
2323          * Issue the HELLO command to the firmware.  If it's not successful
2324          * but indicates that we got a "busy" or "timeout" condition, retry
2325          * the HELLO until we exhaust our retry limit.  If we do exceed our
2326          * retry limit, check to see if the firmware left us any error
2327          * information and report that if so ...
2328          */
2329         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2330         if (ret != FW_SUCCESS) {
2331                 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2332                         goto retry;
2333                 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
2334                         t4_report_fw_error(adap);
2335                 return ret;
2336         }
2337
2338         v = be32_to_cpu(c.err_to_clearinit);
2339         master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
2340         if (state) {
2341                 if (v & F_FW_HELLO_CMD_ERR)
2342                         *state = DEV_STATE_ERR;
2343                 else if (v & F_FW_HELLO_CMD_INIT)
2344                         *state = DEV_STATE_INIT;
2345                 else
2346                         *state = DEV_STATE_UNINIT;
2347         }
2348
2349         /*
2350          * If we're not the Master PF then we need to wait around for the
2351          * Master PF Driver to finish setting up the adapter.
2352          *
2353          * Note that we also do this wait if we're a non-Master-capable PF and
2354          * there is no current Master PF; a Master PF may show up momentarily
2355          * and we wouldn't want to fail pointlessly.  (This can happen when an
2356          * OS loads lots of different drivers rapidly at the same time).  In
2357          * this case, the Master PF returned by the firmware will be
2358          * M_PCIE_FW_MASTER so the test below will work ...
2359          */
2360         if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
2361             master_mbox != mbox) {
2362                 int waiting = FW_CMD_HELLO_TIMEOUT;
2363
2364                 /*
2365                  * Wait for the firmware to either indicate an error or
2366                  * initialized state.  If we see either of these we bail out
2367                  * and report the issue to the caller.  If we exhaust the
2368                  * "hello timeout" and we haven't exhausted our retries, try
2369                  * again.  Otherwise bail with a timeout error.
2370                  */
2371                 for (;;) {
2372                         u32 pcie_fw;
2373
2374                         msleep(50);
2375                         waiting -= 50;
2376
2377                         /*
2378                          * If neither Error nor Initialialized are indicated
2379                          * by the firmware keep waiting till we exaust our
2380                          * timeout ... and then retry if we haven't exhausted
2381                          * our retries ...
2382                          */
2383                         pcie_fw = t4_read_reg(adap, A_PCIE_FW);
2384                         if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
2385                                 if (waiting <= 0) {
2386                                         if (retries-- > 0)
2387                                                 goto retry;
2388
2389                                         return -ETIMEDOUT;
2390                                 }
2391                                 continue;
2392                         }
2393
2394                         /*
2395                          * We either have an Error or Initialized condition
2396                          * report errors preferentially.
2397                          */
2398                         if (state) {
2399                                 if (pcie_fw & F_PCIE_FW_ERR)
2400                                         *state = DEV_STATE_ERR;
2401                                 else if (pcie_fw & F_PCIE_FW_INIT)
2402                                         *state = DEV_STATE_INIT;
2403                         }
2404
2405                         /*
2406                          * If we arrived before a Master PF was selected and
2407                          * there's not a valid Master PF, grab its identity
2408                          * for our caller.
2409                          */
2410                         if (master_mbox == M_PCIE_FW_MASTER &&
2411                             (pcie_fw & F_PCIE_FW_MASTER_VLD))
2412                                 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
2413                         break;
2414                 }
2415         }
2416
2417         return master_mbox;
2418 }
2419
2420 /**
2421  * t4_fw_bye - end communication with FW
2422  * @adap: the adapter
2423  * @mbox: mailbox to use for the FW command
2424  *
2425  * Issues a command to terminate communication with FW.
2426  */
2427 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2428 {
2429         struct fw_bye_cmd c;
2430
2431         memset(&c, 0, sizeof(c));
2432         INIT_CMD(c, BYE, WRITE);
2433         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2434 }
2435
2436 /**
2437  * t4_fw_reset - issue a reset to FW
2438  * @adap: the adapter
2439  * @mbox: mailbox to use for the FW command
2440  * @reset: specifies the type of reset to perform
2441  *
2442  * Issues a reset command of the specified type to FW.
2443  */
2444 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2445 {
2446         struct fw_reset_cmd c;
2447
2448         memset(&c, 0, sizeof(c));
2449         INIT_CMD(c, RESET, WRITE);
2450         c.val = cpu_to_be32(reset);
2451         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2452 }
2453
2454 /**
2455  * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2456  * @adap: the adapter
2457  * @mbox: mailbox to use for the FW RESET command (if desired)
2458  * @force: force uP into RESET even if FW RESET command fails
2459  *
2460  * Issues a RESET command to firmware (if desired) with a HALT indication
2461  * and then puts the microprocessor into RESET state.  The RESET command
2462  * will only be issued if a legitimate mailbox is provided (mbox <=
2463  * M_PCIE_FW_MASTER).
2464  *
2465  * This is generally used in order for the host to safely manipulate the
2466  * adapter without fear of conflicting with whatever the firmware might
2467  * be doing.  The only way out of this state is to RESTART the firmware
2468  * ...
2469  */
2470 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2471 {
2472         int ret = 0;
2473
2474         /*
2475          * If a legitimate mailbox is provided, issue a RESET command
2476          * with a HALT indication.
2477          */
2478         if (mbox <= M_PCIE_FW_MASTER) {
2479                 struct fw_reset_cmd c;
2480
2481                 memset(&c, 0, sizeof(c));
2482                 INIT_CMD(c, RESET, WRITE);
2483                 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
2484                 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
2485                 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2486         }
2487
2488         /*
2489          * Normally we won't complete the operation if the firmware RESET
2490          * command fails but if our caller insists we'll go ahead and put the
2491          * uP into RESET.  This can be useful if the firmware is hung or even
2492          * missing ...  We'll have to take the risk of putting the uP into
2493          * RESET without the cooperation of firmware in that case.
2494          *
2495          * We also force the firmware's HALT flag to be on in case we bypassed
2496          * the firmware RESET command above or we're dealing with old firmware
2497          * which doesn't have the HALT capability.  This will serve as a flag
2498          * for the incoming firmware to know that it's coming out of a HALT
2499          * rather than a RESET ... if it's new enough to understand that ...
2500          */
2501         if (ret == 0 || force) {
2502                 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
2503                 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
2504                                  F_PCIE_FW_HALT);
2505         }
2506
2507         /*
2508          * And we always return the result of the firmware RESET command
2509          * even when we force the uP into RESET ...
2510          */
2511         return ret;
2512 }
2513
2514 /**
2515  * t4_fw_restart - restart the firmware by taking the uP out of RESET
2516  * @adap: the adapter
2517  * @mbox: mailbox to use for the FW RESET command (if desired)
2518  * @reset: if we want to do a RESET to restart things
2519  *
2520  * Restart firmware previously halted by t4_fw_halt().  On successful
2521  * return the previous PF Master remains as the new PF Master and there
2522  * is no need to issue a new HELLO command, etc.
2523  *
2524  * We do this in two ways:
2525  *
2526  * 1. If we're dealing with newer firmware we'll simply want to take
2527  *    the chip's microprocessor out of RESET.  This will cause the
2528  *    firmware to start up from its start vector.  And then we'll loop
2529  *    until the firmware indicates it's started again (PCIE_FW.HALT
2530  *    reset to 0) or we timeout.
2531  *
2532  * 2. If we're dealing with older firmware then we'll need to RESET
2533  *    the chip since older firmware won't recognize the PCIE_FW.HALT
2534  *    flag and automatically RESET itself on startup.
2535  */
2536 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
2537 {
2538         if (reset) {
2539                 /*
2540                  * Since we're directing the RESET instead of the firmware
2541                  * doing it automatically, we need to clear the PCIE_FW.HALT
2542                  * bit.
2543                  */
2544                 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
2545
2546                 /*
2547                  * If we've been given a valid mailbox, first try to get the
2548                  * firmware to do the RESET.  If that works, great and we can
2549                  * return success.  Otherwise, if we haven't been given a
2550                  * valid mailbox or the RESET command failed, fall back to
2551                  * hitting the chip with a hammer.
2552                  */
2553                 if (mbox <= M_PCIE_FW_MASTER) {
2554                         t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
2555                         msleep(100);
2556                         if (t4_fw_reset(adap, mbox,
2557                                         F_PIORST | F_PIORSTMODE) == 0)
2558                                 return 0;
2559                 }
2560
2561                 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
2562                 msleep(2000);
2563         } else {
2564                 int ms;
2565
2566                 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
2567                 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
2568                         if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
2569                                 return FW_SUCCESS;
2570                         msleep(100);
2571                         ms += 100;
2572                 }
2573                 return -ETIMEDOUT;
2574         }
2575         return 0;
2576 }
2577
2578 /**
2579  * t4_fixup_host_params_compat - fix up host-dependent parameters
2580  * @adap: the adapter
2581  * @page_size: the host's Base Page Size
2582  * @cache_line_size: the host's Cache Line Size
2583  * @chip_compat: maintain compatibility with designated chip
2584  *
2585  * Various registers in the chip contain values which are dependent on the
2586  * host's Base Page and Cache Line Sizes.  This function will fix all of
2587  * those registers with the appropriate values as passed in ...
2588  *
2589  * @chip_compat is used to limit the set of changes that are made
2590  * to be compatible with the indicated chip release.  This is used by
2591  * drivers to maintain compatibility with chip register settings when
2592  * the drivers haven't [yet] been updated with new chip support.
2593  */
2594 int t4_fixup_host_params_compat(struct adapter *adap,
2595                                 unsigned int page_size,
2596                                 unsigned int cache_line_size,
2597                                 enum chip_type chip_compat)
2598 {
2599         unsigned int page_shift = cxgbe_fls(page_size) - 1;
2600         unsigned int sge_hps = page_shift - 10;
2601         unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
2602         unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
2603         unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
2604
2605         t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
2606                      V_HOSTPAGESIZEPF0(sge_hps) |
2607                      V_HOSTPAGESIZEPF1(sge_hps) |
2608                      V_HOSTPAGESIZEPF2(sge_hps) |
2609                      V_HOSTPAGESIZEPF3(sge_hps) |
2610                      V_HOSTPAGESIZEPF4(sge_hps) |
2611                      V_HOSTPAGESIZEPF5(sge_hps) |
2612                      V_HOSTPAGESIZEPF6(sge_hps) |
2613                      V_HOSTPAGESIZEPF7(sge_hps));
2614
2615         if (is_t4(adap->params.chip) || is_t4(chip_compat))
2616                 t4_set_reg_field(adap, A_SGE_CONTROL,
2617                                  V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
2618                                  F_EGRSTATUSPAGESIZE,
2619                                  V_INGPADBOUNDARY(fl_align_log -
2620                                                   X_INGPADBOUNDARY_SHIFT) |
2621                                 V_EGRSTATUSPAGESIZE(stat_len != 64));
2622         else {
2623                 /*
2624                  * T5 introduced the separation of the Free List Padding and
2625                  * Packing Boundaries.  Thus, we can select a smaller Padding
2626                  * Boundary to avoid uselessly chewing up PCIe Link and Memory
2627                  * Bandwidth, and use a Packing Boundary which is large enough
2628                  * to avoid false sharing between CPUs, etc.
2629                  *
2630                  * For the PCI Link, the smaller the Padding Boundary the
2631                  * better.  For the Memory Controller, a smaller Padding
2632                  * Boundary is better until we cross under the Memory Line
2633                  * Size (the minimum unit of transfer to/from Memory).  If we
2634                  * have a Padding Boundary which is smaller than the Memory
2635                  * Line Size, that'll involve a Read-Modify-Write cycle on the
2636                  * Memory Controller which is never good.  For T5 the smallest
2637                  * Padding Boundary which we can select is 32 bytes which is
2638                  * larger than any known Memory Controller Line Size so we'll
2639                  * use that.
2640                  */
2641
2642                 /*
2643                  * N.B. T5 has a different interpretation of the "0" value for
2644                  * the Packing Boundary.  This corresponds to 16 bytes instead
2645                  * of the expected 32 bytes.  We never have a Packing Boundary
2646                  * less than 32 bytes so we can't use that special value but
2647                  * on the other hand, if we wanted 32 bytes, the best we can
2648                  * really do is 64 bytes ...
2649                  */
2650                 if (fl_align <= 32) {
2651                         fl_align = 64;
2652                         fl_align_log = 6;
2653                 }
2654                 t4_set_reg_field(adap, A_SGE_CONTROL,
2655                                  V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
2656                                  F_EGRSTATUSPAGESIZE,
2657                                  V_INGPADBOUNDARY(X_INGPCIEBOUNDARY_32B) |
2658                                  V_EGRSTATUSPAGESIZE(stat_len != 64));
2659                 t4_set_reg_field(adap, A_SGE_CONTROL2,
2660                                  V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
2661                                  V_INGPACKBOUNDARY(fl_align_log -
2662                                                    X_INGPACKBOUNDARY_SHIFT));
2663         }
2664
2665         /*
2666          * Adjust various SGE Free List Host Buffer Sizes.
2667          *
2668          * The first four entries are:
2669          *
2670          *   0: Host Page Size
2671          *   1: 64KB
2672          *   2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
2673          *   3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
2674          *
2675          * For the single-MTU buffers in unpacked mode we need to include
2676          * space for the SGE Control Packet Shift, 14 byte Ethernet header,
2677          * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
2678          * Padding boundary.  All of these are accommodated in the Factory
2679          * Default Firmware Configuration File but we need to adjust it for
2680          * this host's cache line size.
2681          */
2682         t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
2683         t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
2684                      (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
2685                      & ~(fl_align - 1));
2686         t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
2687                      (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
2688                      & ~(fl_align - 1));
2689
2690         t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
2691
2692         return 0;
2693 }
2694
2695 /**
2696  * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
2697  * @adap: the adapter
2698  * @page_size: the host's Base Page Size
2699  * @cache_line_size: the host's Cache Line Size
2700  *
2701  * Various registers in T4 contain values which are dependent on the
2702  * host's Base Page and Cache Line Sizes.  This function will fix all of
2703  * those registers with the appropriate values as passed in ...
2704  *
2705  * This routine makes changes which are compatible with T4 chips.
2706  */
2707 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2708                          unsigned int cache_line_size)
2709 {
2710         return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
2711                                            T4_LAST_REV);
2712 }
2713
2714 /**
2715  * t4_fw_initialize - ask FW to initialize the device
2716  * @adap: the adapter
2717  * @mbox: mailbox to use for the FW command
2718  *
2719  * Issues a command to FW to partially initialize the device.  This
2720  * performs initialization that generally doesn't depend on user input.
2721  */
2722 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
2723 {
2724         struct fw_initialize_cmd c;
2725
2726         memset(&c, 0, sizeof(c));
2727         INIT_CMD(c, INITIALIZE, WRITE);
2728         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2729 }
2730
2731 /**
2732  * t4_query_params_rw - query FW or device parameters
2733  * @adap: the adapter
2734  * @mbox: mailbox to use for the FW command
2735  * @pf: the PF
2736  * @vf: the VF
2737  * @nparams: the number of parameters
2738  * @params: the parameter names
2739  * @val: the parameter values
2740  * @rw: Write and read flag
2741  *
2742  * Reads the value of FW or device parameters.  Up to 7 parameters can be
2743  * queried at once.
2744  */
2745 static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
2746                               unsigned int pf, unsigned int vf,
2747                               unsigned int nparams, const u32 *params,
2748                               u32 *val, int rw)
2749 {
2750         unsigned int i;
2751         int ret;
2752         struct fw_params_cmd c;
2753         __be32 *p = &c.param[0].mnem;
2754
2755         if (nparams > 7)
2756                 return -EINVAL;
2757
2758         memset(&c, 0, sizeof(c));
2759         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
2760                                   F_FW_CMD_REQUEST | F_FW_CMD_READ |
2761                                   V_FW_PARAMS_CMD_PFN(pf) |
2762                                   V_FW_PARAMS_CMD_VFN(vf));
2763         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2764
2765         for (i = 0; i < nparams; i++) {
2766                 *p++ = cpu_to_be32(*params++);
2767                 if (rw)
2768                         *p = cpu_to_be32(*(val + i));
2769                 p++;
2770         }
2771
2772         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2773         if (ret == 0)
2774                 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2775                         *val++ = be32_to_cpu(*p);
2776         return ret;
2777 }
2778
2779 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2780                     unsigned int vf, unsigned int nparams, const u32 *params,
2781                     u32 *val)
2782 {
2783         return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
2784 }
2785
2786 /**
2787  * t4_set_params_timeout - sets FW or device parameters
2788  * @adap: the adapter
2789  * @mbox: mailbox to use for the FW command
2790  * @pf: the PF
2791  * @vf: the VF
2792  * @nparams: the number of parameters
2793  * @params: the parameter names
2794  * @val: the parameter values
2795  * @timeout: the timeout time
2796  *
2797  * Sets the value of FW or device parameters.  Up to 7 parameters can be
2798  * specified at once.
2799  */
2800 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
2801                           unsigned int pf, unsigned int vf,
2802                           unsigned int nparams, const u32 *params,
2803                           const u32 *val, int timeout)
2804 {
2805         struct fw_params_cmd c;
2806         __be32 *p = &c.param[0].mnem;
2807
2808         if (nparams > 7)
2809                 return -EINVAL;
2810
2811         memset(&c, 0, sizeof(c));
2812         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
2813                                   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2814                                   V_FW_PARAMS_CMD_PFN(pf) |
2815                                   V_FW_PARAMS_CMD_VFN(vf));
2816         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2817
2818         while (nparams--) {
2819                 *p++ = cpu_to_be32(*params++);
2820                 *p++ = cpu_to_be32(*val++);
2821         }
2822
2823         return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
2824 }
2825
2826 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2827                   unsigned int vf, unsigned int nparams, const u32 *params,
2828                   const u32 *val)
2829 {
2830         return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
2831                                      FW_CMD_MAX_TIMEOUT);
2832 }
2833
2834 /**
2835  * t4_alloc_vi_func - allocate a virtual interface
2836  * @adap: the adapter
2837  * @mbox: mailbox to use for the FW command
2838  * @port: physical port associated with the VI
2839  * @pf: the PF owning the VI
2840  * @vf: the VF owning the VI
2841  * @nmac: number of MAC addresses needed (1 to 5)
2842  * @mac: the MAC addresses of the VI
2843  * @rss_size: size of RSS table slice associated with this VI
2844  * @portfunc: which Port Application Function MAC Address is desired
2845  * @idstype: Intrusion Detection Type
2846  *
2847  * Allocates a virtual interface for the given physical port.  If @mac is
2848  * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2849  * @mac should be large enough to hold @nmac Ethernet addresses, they are
2850  * stored consecutively so the space needed is @nmac * 6 bytes.
2851  * Returns a negative error number or the non-negative VI id.
2852  */
2853 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
2854                      unsigned int port, unsigned int pf, unsigned int vf,
2855                      unsigned int nmac, u8 *mac, unsigned int *rss_size,
2856                      unsigned int portfunc, unsigned int idstype)
2857 {
2858         int ret;
2859         struct fw_vi_cmd c;
2860
2861         memset(&c, 0, sizeof(c));
2862         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
2863                                   F_FW_CMD_WRITE | F_FW_CMD_EXEC |
2864                                   V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
2865         c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
2866         c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
2867                                      V_FW_VI_CMD_FUNC(portfunc));
2868         c.portid_pkd = V_FW_VI_CMD_PORTID(port);
2869         c.nmac = nmac - 1;
2870
2871         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2872         if (ret)
2873                 return ret;
2874
2875         if (mac) {
2876                 memcpy(mac, c.mac, sizeof(c.mac));
2877                 switch (nmac) {
2878                 case 5:
2879                         memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2880                         /* FALLTHROUGH */
2881                 case 4:
2882                         memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2883                         /* FALLTHROUGH */
2884                 case 3:
2885                         memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2886                         /* FALLTHROUGH */
2887                 case 2:
2888                         memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
2889                         /* FALLTHROUGH */
2890                 }
2891         }
2892         if (rss_size)
2893                 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
2894         return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
2895 }
2896
2897 /**
2898  * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
2899  * @adap: the adapter
2900  * @mbox: mailbox to use for the FW command
2901  * @port: physical port associated with the VI
2902  * @pf: the PF owning the VI
2903  * @vf: the VF owning the VI
2904  * @nmac: number of MAC addresses needed (1 to 5)
2905  * @mac: the MAC addresses of the VI
2906  * @rss_size: size of RSS table slice associated with this VI
2907  *
2908  * Backwards compatible and convieniance routine to allocate a Virtual
2909  * Interface with a Ethernet Port Application Function and Intrustion
2910  * Detection System disabled.
2911  */
2912 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2913                 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2914                 unsigned int *rss_size)
2915 {
2916         return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
2917                                 FW_VI_FUNC_ETH, 0);
2918 }
2919
2920 /**
2921  * t4_free_vi - free a virtual interface
2922  * @adap: the adapter
2923  * @mbox: mailbox to use for the FW command
2924  * @pf: the PF owning the VI
2925  * @vf: the VF owning the VI
2926  * @viid: virtual interface identifiler
2927  *
2928  * Free a previously allocated virtual interface.
2929  */
2930 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2931                unsigned int vf, unsigned int viid)
2932 {
2933         struct fw_vi_cmd c;
2934
2935         memset(&c, 0, sizeof(c));
2936         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
2937                                   F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) |
2938                                   V_FW_VI_CMD_VFN(vf));
2939         c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
2940         c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
2941
2942         return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2943 }
2944
2945 /**
2946  * t4_set_rxmode - set Rx properties of a virtual interface
2947  * @adap: the adapter
2948  * @mbox: mailbox to use for the FW command
2949  * @viid: the VI id
2950  * @mtu: the new MTU or -1
2951  * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2952  * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2953  * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2954  * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
2955  *          -1 no change
2956  * @sleep_ok: if true we may sleep while awaiting command completion
2957  *
2958  * Sets Rx properties of a virtual interface.
2959  */
2960 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2961                   int mtu, int promisc, int all_multi, int bcast, int vlanex,
2962                   bool sleep_ok)
2963 {
2964         struct fw_vi_rxmode_cmd c;
2965
2966         /* convert to FW values */
2967         if (mtu < 0)
2968                 mtu = M_FW_VI_RXMODE_CMD_MTU;
2969         if (promisc < 0)
2970                 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
2971         if (all_multi < 0)
2972                 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
2973         if (bcast < 0)
2974                 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
2975         if (vlanex < 0)
2976                 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
2977
2978         memset(&c, 0, sizeof(c));
2979         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
2980                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2981                                    V_FW_VI_RXMODE_CMD_VIID(viid));
2982         c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2983         c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
2984                             V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2985                             V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2986                             V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2987                             V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
2988         return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2989 }
2990
2991 /**
2992  * t4_change_mac - modifies the exact-match filter for a MAC address
2993  * @adap: the adapter
2994  * @mbox: mailbox to use for the FW command
2995  * @viid: the VI id
2996  * @idx: index of existing filter for old value of MAC address, or -1
2997  * @addr: the new MAC address value
2998  * @persist: whether a new MAC allocation should be persistent
2999  * @add_smt: if true also add the address to the HW SMT
3000  *
3001  * Modifies an exact-match filter and sets it to the new MAC address if
3002  * @idx >= 0, or adds the MAC address to a new filter if @idx < 0.  In the
3003  * latter case the address is added persistently if @persist is %true.
3004  *
3005  * Note that in general it is not possible to modify the value of a given
3006  * filter so the generic way to modify an address filter is to free the one
3007  * being used by the old address value and allocate a new filter for the
3008  * new address value.
3009  *
3010  * Returns a negative error number or the index of the filter with the new
3011  * MAC value.  Note that this index may differ from @idx.
3012  */
3013 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3014                   int idx, const u8 *addr, bool persist, bool add_smt)
3015 {
3016         int ret, mode;
3017         struct fw_vi_mac_cmd c;
3018         struct fw_vi_mac_exact *p = c.u.exact;
3019         int max_mac_addr = adap->params.arch.mps_tcam_size;
3020
3021         if (idx < 0)                             /* new allocation */
3022                 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3023         mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3024
3025         memset(&c, 0, sizeof(c));
3026         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
3027                                    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3028                                    V_FW_VI_MAC_CMD_VIID(viid));
3029         c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
3030         p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
3031                                       V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3032                                       V_FW_VI_MAC_CMD_IDX(idx));
3033         memcpy(p->macaddr, addr, sizeof(p->macaddr));
3034
3035         ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3036         if (ret == 0) {
3037                 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
3038                 if (ret >= max_mac_addr)
3039                         ret = -ENOMEM;
3040         }
3041         return ret;
3042 }
3043
3044 /**
3045  * t4_enable_vi_params - enable/disable a virtual interface
3046  * @adap: the adapter
3047  * @mbox: mailbox to use for the FW command
3048  * @viid: the VI id
3049  * @rx_en: 1=enable Rx, 0=disable Rx
3050  * @tx_en: 1=enable Tx, 0=disable Tx
3051  * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3052  *
3053  * Enables/disables a virtual interface.  Note that setting DCB Enable
3054  * only makes sense when enabling a Virtual Interface ...
3055  */
3056 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3057                         unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3058 {
3059         struct fw_vi_enable_cmd c;
3060
3061         memset(&c, 0, sizeof(c));
3062         c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
3063                                    F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3064                                    V_FW_VI_ENABLE_CMD_VIID(viid));
3065         c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
3066                                      V_FW_VI_ENABLE_CMD_EEN(tx_en) |
3067                                      V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
3068                                      FW_LEN16(c));
3069         return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3070 }
3071
3072 /**
3073  * t4_enable_vi - enable/disable a virtual interface
3074  * @adap: the adapter
3075  * @mbox: mailbox to use for the FW command
3076  * @viid: the VI id
3077  * @rx_en: 1=enable Rx, 0=disable Rx
3078  * @tx_en: 1=enable Tx, 0=disable Tx
3079  *
3080  * Enables/disables a virtual interface.  Note that setting DCB Enable
3081  * only makes sense when enabling a Virtual Interface ...
3082  */
3083 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3084                  bool rx_en, bool tx_en)
3085 {
3086         return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
3087 }
3088
3089 /**
3090  * t4_iq_start_stop - enable/disable an ingress queue and its FLs
3091  * @adap: the adapter
3092  * @mbox: mailbox to use for the FW command
3093  * @start: %true to enable the queues, %false to disable them
3094  * @pf: the PF owning the queues
3095  * @vf: the VF owning the queues
3096  * @iqid: ingress queue id
3097  * @fl0id: FL0 queue id or 0xffff if no attached FL0
3098  * @fl1id: FL1 queue id or 0xffff if no attached FL1
3099  *
3100  * Starts or stops an ingress queue and its associated FLs, if any.
3101  */
3102 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
3103                      unsigned int pf, unsigned int vf, unsigned int iqid,
3104                      unsigned int fl0id, unsigned int fl1id)
3105 {
3106         struct fw_iq_cmd c;
3107
3108         memset(&c, 0, sizeof(c));
3109         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
3110                                   F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
3111                                   V_FW_IQ_CMD_VFN(vf));
3112         c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
3113                                        V_FW_IQ_CMD_IQSTOP(!start) |
3114                                        FW_LEN16(c));
3115         c.iqid = cpu_to_be16(iqid);
3116         c.fl0id = cpu_to_be16(fl0id);
3117         c.fl1id = cpu_to_be16(fl1id);
3118         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3119 }
3120
3121 /**
3122  * t4_iq_free - free an ingress queue and its FLs
3123  * @adap: the adapter
3124  * @mbox: mailbox to use for the FW command
3125  * @pf: the PF owning the queues
3126  * @vf: the VF owning the queues
3127  * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
3128  * @iqid: ingress queue id
3129  * @fl0id: FL0 queue id or 0xffff if no attached FL0
3130  * @fl1id: FL1 queue id or 0xffff if no attached FL1
3131  *
3132  * Frees an ingress queue and its associated FLs, if any.
3133  */
3134 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3135                unsigned int vf, unsigned int iqtype, unsigned int iqid,
3136                unsigned int fl0id, unsigned int fl1id)
3137 {
3138         struct fw_iq_cmd c;
3139
3140         memset(&c, 0, sizeof(c));
3141         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
3142                                   F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
3143                                   V_FW_IQ_CMD_VFN(vf));
3144         c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
3145         c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
3146         c.iqid = cpu_to_be16(iqid);
3147         c.fl0id = cpu_to_be16(fl0id);
3148         c.fl1id = cpu_to_be16(fl1id);
3149         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3150 }
3151
3152 /**
3153  * t4_eth_eq_free - free an Ethernet egress queue
3154  * @adap: the adapter
3155  * @mbox: mailbox to use for the FW command
3156  * @pf: the PF owning the queue
3157  * @vf: the VF owning the queue
3158  * @eqid: egress queue id
3159  *
3160  * Frees an Ethernet egress queue.
3161  */
3162 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3163                    unsigned int vf, unsigned int eqid)
3164 {
3165         struct fw_eq_eth_cmd c;
3166
3167         memset(&c, 0, sizeof(c));
3168         c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
3169                                   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3170                                   V_FW_EQ_ETH_CMD_PFN(pf) |
3171                                   V_FW_EQ_ETH_CMD_VFN(vf));
3172         c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3173         c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
3174         return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3175 }
3176
3177 /**
3178  * t4_handle_fw_rpl - process a FW reply message
3179  * @adap: the adapter
3180  * @rpl: start of the FW message
3181  *
3182  * Processes a FW message, such as link state change messages.
3183  */
3184 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3185 {
3186         u8 opcode = *(const u8 *)rpl;
3187
3188         /*
3189          * This might be a port command ... this simplifies the following
3190          * conditionals ...  We can get away with pre-dereferencing
3191          * action_to_len16 because it's in the first 16 bytes and all messages
3192          * will be at least that long.
3193          */
3194         const struct fw_port_cmd *p = (const void *)rpl;
3195         unsigned int action =
3196                 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
3197
3198         if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
3199                 /* link/module state change message */
3200                 int speed = 0, fc = 0, i;
3201                 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
3202                 struct port_info *pi = NULL;
3203                 struct link_config *lc;
3204                 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
3205                 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
3206                 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
3207
3208                 if (stat & F_FW_PORT_CMD_RXPAUSE)
3209                         fc |= PAUSE_RX;
3210                 if (stat & F_FW_PORT_CMD_TXPAUSE)
3211                         fc |= PAUSE_TX;
3212                 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3213                         speed = ETH_SPEED_NUM_100M;
3214                 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3215                         speed = ETH_SPEED_NUM_1G;
3216                 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3217                         speed = ETH_SPEED_NUM_10G;
3218                 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
3219                         speed = ETH_SPEED_NUM_40G;
3220
3221                 for_each_port(adap, i) {
3222                         pi = adap2pinfo(adap, i);
3223                         if (pi->tx_chan == chan)
3224                                 break;
3225                 }
3226                 lc = &pi->link_cfg;
3227
3228                 if (mod != pi->mod_type) {
3229                         pi->mod_type = mod;
3230                         t4_os_portmod_changed(adap, i);
3231                 }
3232                 if (link_ok != lc->link_ok || speed != lc->speed ||
3233                     fc != lc->fc) {                    /* something changed */
3234                         if (!link_ok && lc->link_ok) {
3235                                 static const char * const reason[] = {
3236                                         "Link Down",
3237                                         "Remote Fault",
3238                                         "Auto-negotiation Failure",
3239                                         "Reserved",
3240                                         "Insufficient Airflow",
3241                                         "Unable To Determine Reason",
3242                                         "No RX Signal Detected",
3243                                         "Reserved",
3244                                 };
3245                                 unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat);
3246
3247                                 dev_warn(adap, "Port %d link down, reason: %s\n",
3248                                          chan, reason[rc]);
3249                         }
3250                         lc->link_ok = link_ok;
3251                         lc->speed = speed;
3252                         lc->fc = fc;
3253                         lc->supported = be16_to_cpu(p->u.info.pcap);
3254                 }
3255         } else {
3256                 dev_warn(adap, "Unknown firmware reply %d\n", opcode);
3257                 return -EINVAL;
3258         }
3259         return 0;
3260 }
3261
3262 void t4_reset_link_config(struct adapter *adap, int idx)
3263 {
3264         struct port_info *pi = adap2pinfo(adap, idx);
3265         struct link_config *lc = &pi->link_cfg;
3266
3267         lc->link_ok = 0;
3268         lc->requested_speed = 0;
3269         lc->requested_fc = 0;
3270         lc->speed = 0;
3271         lc->fc = 0;
3272 }
3273
3274 /**
3275  * init_link_config - initialize a link's SW state
3276  * @lc: structure holding the link state
3277  * @caps: link capabilities
3278  *
3279  * Initializes the SW state maintained for each link, including the link's
3280  * capabilities and default speed/flow-control/autonegotiation settings.
3281  */
3282 static void init_link_config(struct link_config *lc,
3283                              unsigned int caps)
3284 {
3285         lc->supported = caps;
3286         lc->requested_speed = 0;
3287         lc->speed = 0;
3288         lc->requested_fc = 0;
3289         lc->fc = 0;
3290         if (lc->supported & FW_PORT_CAP_ANEG) {
3291                 lc->advertising = lc->supported & ADVERT_MASK;
3292                 lc->autoneg = AUTONEG_ENABLE;
3293         } else {
3294                 lc->advertising = 0;
3295                 lc->autoneg = AUTONEG_DISABLE;
3296         }
3297 }
3298
3299 /**
3300  * t4_wait_dev_ready - wait till to reads of registers work
3301  *
3302  * Right after the device is RESET is can take a small amount of time
3303  * for it to respond to register reads.  Until then, all reads will
3304  * return either 0xff...ff or 0xee...ee.  Return an error if reads
3305  * don't work within a reasonable time frame.
3306  */
3307 static int t4_wait_dev_ready(struct adapter *adapter)
3308 {
3309         u32 whoami;
3310
3311         whoami = t4_read_reg(adapter, A_PL_WHOAMI);
3312
3313         if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
3314                 return 0;
3315
3316         msleep(500);
3317         whoami = t4_read_reg(adapter, A_PL_WHOAMI);
3318         if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
3319                 return 0;
3320
3321         dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
3322                 whoami);
3323         return -EIO;
3324 }
3325
3326 struct flash_desc {
3327         u32 vendor_and_model_id;
3328         u32 size_mb;
3329 };
3330
3331 int t4_get_flash_params(struct adapter *adapter)
3332 {
3333         /*
3334          * Table for non-standard supported Flash parts.  Note, all Flash
3335          * parts must have 64KB sectors.
3336          */
3337         static struct flash_desc supported_flash[] = {
3338                 { 0x00150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
3339         };
3340
3341         int ret;
3342         u32 flashid = 0;
3343         unsigned int part, manufacturer;
3344         unsigned int density, size = 0;
3345
3346         /**
3347          * Issue a Read ID Command to the Flash part.  We decode supported
3348          * Flash parts and their sizes from this.  There's a newer Query
3349          * Command which can retrieve detailed geometry information but
3350          * many Flash parts don't support it.
3351          */
3352         ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
3353         if (!ret)
3354                 ret = sf1_read(adapter, 3, 0, 1, &flashid);
3355         t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
3356         if (ret < 0)
3357                 return ret;
3358
3359         /**
3360          * Check to see if it's one of our non-standard supported Flash parts.
3361          */
3362         for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
3363                 if (supported_flash[part].vendor_and_model_id == flashid) {
3364                         adapter->params.sf_size =
3365                                 supported_flash[part].size_mb;
3366                         adapter->params.sf_nsec =
3367                                 adapter->params.sf_size / SF_SEC_SIZE;
3368                         goto found;
3369                 }
3370         }
3371
3372         /**
3373          * Decode Flash part size.  The code below looks repetative with
3374          * common encodings, but that's not guaranteed in the JEDEC
3375          * specification for the Read JADEC ID command.  The only thing that
3376          * we're guaranteed by the JADEC specification is where the
3377          * Manufacturer ID is in the returned result.  After that each
3378          * Manufacturer ~could~ encode things completely differently.
3379          * Note, all Flash parts must have 64KB sectors.
3380          */
3381         manufacturer = flashid & 0xff;
3382         switch (manufacturer) {
3383         case 0x20: { /* Micron/Numonix */
3384                 /**
3385                  * This Density -> Size decoding table is taken from Micron
3386                  * Data Sheets.
3387                  */
3388                 density = (flashid >> 16) & 0xff;
3389                 switch (density) {
3390                 case 0x14:
3391                         size = 1 << 20; /* 1MB */
3392                         break;
3393                 case 0x15:
3394                         size = 1 << 21; /* 2MB */
3395                         break;
3396                 case 0x16:
3397                         size = 1 << 22; /* 4MB */
3398                         break;
3399                 case 0x17:
3400                         size = 1 << 23; /* 8MB */
3401                         break;
3402                 case 0x18:
3403                         size = 1 << 24; /* 16MB */
3404                         break;
3405                 case 0x19:
3406                         size = 1 << 25; /* 32MB */
3407                         break;
3408                 case 0x20:
3409                         size = 1 << 26; /* 64MB */
3410                         break;
3411                 case 0x21:
3412                         size = 1 << 27; /* 128MB */
3413                         break;
3414                 case 0x22:
3415                         size = 1 << 28; /* 256MB */
3416                         break;
3417                 }
3418                 break;
3419         }
3420
3421         case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
3422                 /**
3423                  * This Density -> Size decoding table is taken from ISSI
3424                  * Data Sheets.
3425                  */
3426                 density = (flashid >> 16) & 0xff;
3427                 switch (density) {
3428                 case 0x16:
3429                         size = 1 << 25; /* 32MB */
3430                         break;
3431                 case 0x17:
3432                         size = 1 << 26; /* 64MB */
3433                         break;
3434                 }
3435                 break;
3436         }
3437
3438         case 0xc2: { /* Macronix */
3439                 /**
3440                  * This Density -> Size decoding table is taken from Macronix
3441                  * Data Sheets.
3442                  */
3443                 density = (flashid >> 16) & 0xff;
3444                 switch (density) {
3445                 case 0x17:
3446                         size = 1 << 23; /* 8MB */
3447                         break;
3448                 case 0x18:
3449                         size = 1 << 24; /* 16MB */
3450                         break;
3451                 }
3452                 break;
3453         }
3454
3455         case 0xef: { /* Winbond */
3456                 /**
3457                  * This Density -> Size decoding table is taken from Winbond
3458                  * Data Sheets.
3459                  */
3460                 density = (flashid >> 16) & 0xff;
3461                 switch (density) {
3462                 case 0x17:
3463                         size = 1 << 23; /* 8MB */
3464                         break;
3465                 case 0x18:
3466                         size = 1 << 24; /* 16MB */
3467                         break;
3468                 }
3469                 break;
3470         }
3471         }
3472
3473         /* If we didn't recognize the FLASH part, that's no real issue: the
3474          * Hardware/Software contract says that Hardware will _*ALWAYS*_
3475          * use a FLASH part which is at least 4MB in size and has 64KB
3476          * sectors.  The unrecognized FLASH part is likely to be much larger
3477          * than 4MB, but that's all we really need.
3478          */
3479         if (size == 0) {
3480                 dev_warn(adapter,
3481                          "Unknown Flash Part, ID = %#x, assuming 4MB\n",
3482                          flashid);
3483                 size = 1 << 22;
3484         }
3485
3486         /**
3487          * Store decoded Flash size and fall through into vetting code.
3488          */
3489         adapter->params.sf_size = size;
3490         adapter->params.sf_nsec = size / SF_SEC_SIZE;
3491
3492 found:
3493         /*
3494          * We should reject adapters with FLASHes which are too small. So, emit
3495          * a warning.
3496          */
3497         if (adapter->params.sf_size < FLASH_MIN_SIZE)
3498                 dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
3499                          flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
3500
3501         return 0;
3502 }
3503
3504 static void set_pcie_completion_timeout(struct adapter *adapter,
3505                                         u8 range)
3506 {
3507         u32 pcie_cap;
3508         u16 val;
3509
3510         pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
3511         if (pcie_cap) {
3512                 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
3513                 val &= 0xfff0;
3514                 val |= range;
3515                 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
3516         }
3517 }
3518
3519 /**
3520  * t4_prep_adapter - prepare SW and HW for operation
3521  * @adapter: the adapter
3522  *
3523  * Initialize adapter SW state for the various HW modules, set initial
3524  * values for some adapter tunables, take PHYs out of reset, and
3525  * initialize the MDIO interface.
3526  */
3527 int t4_prep_adapter(struct adapter *adapter)
3528 {
3529         int ret, ver;
3530         u32 pl_rev;
3531
3532         ret = t4_wait_dev_ready(adapter);
3533         if (ret < 0)
3534                 return ret;
3535
3536         pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
3537         adapter->params.pci.device_id = adapter->pdev->id.device_id;
3538         adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
3539
3540         /*
3541          * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
3542          * ADAPTER (VERSION << 4 | REVISION)
3543          */
3544         ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
3545         adapter->params.chip = 0;
3546         switch (ver) {
3547         case CHELSIO_T5:
3548                 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
3549                 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
3550                 adapter->params.arch.mps_tcam_size =
3551                                                 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3552                 adapter->params.arch.mps_rplc_size = 128;
3553                 adapter->params.arch.nchan = NCHAN;
3554                 adapter->params.arch.vfcount = 128;
3555                 break;
3556         default:
3557                 dev_err(adapter, "%s: Device %d is not supported\n",
3558                         __func__, adapter->params.pci.device_id);
3559                 return -EINVAL;
3560         }
3561
3562         adapter->params.pci.vpd_cap_addr =
3563                 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
3564
3565         ret = t4_get_flash_params(adapter);
3566         if (ret < 0) {
3567                 dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
3568                         -ret);
3569                 return ret;
3570         }
3571
3572         adapter->params.cim_la_size = CIMLA_SIZE;
3573
3574         init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3575
3576         /*
3577          * Default port and clock for debugging in case we can't reach FW.
3578          */
3579         adapter->params.nports = 1;
3580         adapter->params.portvec = 1;
3581         adapter->params.vpd.cclk = 50000;
3582
3583         /* Set pci completion timeout value to 4 seconds. */
3584         set_pcie_completion_timeout(adapter, 0xd);
3585         return 0;
3586 }
3587
3588 /**
3589  * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
3590  * @adapter: the adapter
3591  * @qid: the Queue ID
3592  * @qtype: the Ingress or Egress type for @qid
3593  * @pbar2_qoffset: BAR2 Queue Offset
3594  * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
3595  *
3596  * Returns the BAR2 SGE Queue Registers information associated with the
3597  * indicated Absolute Queue ID.  These are passed back in return value
3598  * pointers.  @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
3599  * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
3600  *
3601  * This may return an error which indicates that BAR2 SGE Queue
3602  * registers aren't available.  If an error is not returned, then the
3603  * following values are returned:
3604  *
3605  *   *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
3606  *   *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
3607  *
3608  * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
3609  * require the "Inferred Queue ID" ability may be used.  E.g. the
3610  * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
3611  * then these "Inferred Queue ID" register may not be used.
3612  */
3613 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
3614                       enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
3615                       unsigned int *pbar2_qid)
3616 {
3617         unsigned int page_shift, page_size, qpp_shift, qpp_mask;
3618         u64 bar2_page_offset, bar2_qoffset;
3619         unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
3620
3621         /*
3622          * T4 doesn't support BAR2 SGE Queue registers.
3623          */
3624         if (is_t4(adapter->params.chip))
3625                 return -EINVAL;
3626
3627         /*
3628          * Get our SGE Page Size parameters.
3629          */
3630         page_shift = adapter->params.sge.hps + 10;
3631         page_size = 1 << page_shift;
3632
3633         /*
3634          * Get the right Queues per Page parameters for our Queue.
3635          */
3636         qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
3637                               adapter->params.sge.eq_qpp :
3638                               adapter->params.sge.iq_qpp);
3639         qpp_mask = (1 << qpp_shift) - 1;
3640
3641         /*
3642          * Calculate the basics of the BAR2 SGE Queue register area:
3643          *  o The BAR2 page the Queue registers will be in.
3644          *  o The BAR2 Queue ID.
3645          *  o The BAR2 Queue ID Offset into the BAR2 page.
3646          */
3647         bar2_page_offset = ((qid >> qpp_shift) << page_shift);
3648         bar2_qid = qid & qpp_mask;
3649         bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
3650
3651         /*
3652          * If the BAR2 Queue ID Offset is less than the Page Size, then the
3653          * hardware will infer the Absolute Queue ID simply from the writes to
3654          * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
3655          * BAR2 Queue ID of 0 for those writes).  Otherwise, we'll simply
3656          * write to the first BAR2 SGE Queue Area within the BAR2 Page with
3657          * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
3658          * from the BAR2 Page and BAR2 Queue ID.
3659          *
3660          * One important censequence of this is that some BAR2 SGE registers
3661          * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
3662          * there.  But other registers synthesize the SGE Queue ID purely
3663          * from the writes to the registers -- the Write Combined Doorbell
3664          * Buffer is a good example.  These BAR2 SGE Registers are only
3665          * available for those BAR2 SGE Register areas where the SGE Absolute
3666          * Queue ID can be inferred from simple writes.
3667          */
3668         bar2_qoffset = bar2_page_offset;
3669         bar2_qinferred = (bar2_qid_offset < page_size);
3670         if (bar2_qinferred) {
3671                 bar2_qoffset += bar2_qid_offset;
3672                 bar2_qid = 0;
3673         }
3674
3675         *pbar2_qoffset = bar2_qoffset;
3676         *pbar2_qid = bar2_qid;
3677         return 0;
3678 }
3679
3680 /**
3681  * t4_init_sge_params - initialize adap->params.sge
3682  * @adapter: the adapter
3683  *
3684  * Initialize various fields of the adapter's SGE Parameters structure.
3685  */
3686 int t4_init_sge_params(struct adapter *adapter)
3687 {
3688         struct sge_params *sge_params = &adapter->params.sge;
3689         u32 hps, qpp;
3690         unsigned int s_hps, s_qpp;
3691
3692         /*
3693          * Extract the SGE Page Size for our PF.
3694          */
3695         hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
3696         s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
3697                  adapter->pf);
3698         sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
3699
3700         /*
3701          * Extract the SGE Egress and Ingess Queues Per Page for our PF.
3702          */
3703         s_qpp = (S_QUEUESPERPAGEPF0 +
3704                  (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
3705         qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
3706         sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
3707         qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
3708         sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
3709
3710         return 0;
3711 }
3712
3713 /**
3714  * t4_init_tp_params - initialize adap->params.tp
3715  * @adap: the adapter
3716  *
3717  * Initialize various fields of the adapter's TP Parameters structure.
3718  */
3719 int t4_init_tp_params(struct adapter *adap)
3720 {
3721         int chan;
3722         u32 v;
3723
3724         v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
3725         adap->params.tp.tre = G_TIMERRESOLUTION(v);
3726         adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
3727
3728         /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3729         for (chan = 0; chan < NCHAN; chan++)
3730                 adap->params.tp.tx_modq[chan] = chan;
3731
3732         /*
3733          * Cache the adapter's Compressed Filter Mode and global Incress
3734          * Configuration.
3735          */
3736         t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3737                          &adap->params.tp.vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
3738         t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
3739                          &adap->params.tp.ingress_config, 1,
3740                          A_TP_INGRESS_CONFIG);
3741
3742         /*
3743          * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3744          * shift positions of several elements of the Compressed Filter Tuple
3745          * for this adapter which we need frequently ...
3746          */
3747         adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
3748         adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
3749         adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
3750         adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
3751                                                                F_PROTOCOL);
3752
3753         /*
3754          * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
3755          * represents the presense of an Outer VLAN instead of a VNIC ID.
3756          */
3757         if ((adap->params.tp.ingress_config & F_VNIC) == 0)
3758                 adap->params.tp.vnic_shift = -1;
3759
3760         return 0;
3761 }
3762
3763 /**
3764  * t4_filter_field_shift - calculate filter field shift
3765  * @adap: the adapter
3766  * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
3767  *
3768  * Return the shift position of a filter field within the Compressed
3769  * Filter Tuple.  The filter field is specified via its selection bit
3770  * within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
3771  */
3772 int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
3773 {
3774         unsigned int filter_mode = adap->params.tp.vlan_pri_map;
3775         unsigned int sel;
3776         int field_shift;
3777
3778         if ((filter_mode & filter_sel) == 0)
3779                 return -1;
3780
3781         for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
3782                 switch (filter_mode & sel) {
3783                 case F_FCOE:
3784                         field_shift += W_FT_FCOE;
3785                         break;
3786                 case F_PORT:
3787                         field_shift += W_FT_PORT;
3788                         break;
3789                 case F_VNIC_ID:
3790                         field_shift += W_FT_VNIC_ID;
3791                         break;
3792                 case F_VLAN:
3793                         field_shift += W_FT_VLAN;
3794                         break;
3795                 case F_TOS:
3796                         field_shift += W_FT_TOS;
3797                         break;
3798                 case F_PROTOCOL:
3799                         field_shift += W_FT_PROTOCOL;
3800                         break;
3801                 case F_ETHERTYPE:
3802                         field_shift += W_FT_ETHERTYPE;
3803                         break;
3804                 case F_MACMATCH:
3805                         field_shift += W_FT_MACMATCH;
3806                         break;
3807                 case F_MPSHITTYPE:
3808                         field_shift += W_FT_MPSHITTYPE;
3809                         break;
3810                 case F_FRAGMENTATION:
3811                         field_shift += W_FT_FRAGMENTATION;
3812                         break;
3813                 }
3814         }
3815         return field_shift;
3816 }
3817
3818 int t4_init_rss_mode(struct adapter *adap, int mbox)
3819 {
3820         int i, ret;
3821         struct fw_rss_vi_config_cmd rvc;
3822
3823         memset(&rvc, 0, sizeof(rvc));
3824
3825         for_each_port(adap, i) {
3826                 struct port_info *p = adap2pinfo(adap, i);
3827
3828                 rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
3829                                        F_FW_CMD_REQUEST | F_FW_CMD_READ |
3830                                        V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
3831                 rvc.retval_len16 = htonl(FW_LEN16(rvc));
3832                 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
3833                 if (ret)
3834                         return ret;
3835                 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
3836         }
3837         return 0;
3838 }
3839
3840 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3841 {
3842         u8 addr[6];
3843         int ret, i, j = 0;
3844         struct fw_port_cmd c;
3845
3846         memset(&c, 0, sizeof(c));
3847
3848         for_each_port(adap, i) {
3849                 unsigned int rss_size = 0;
3850                 struct port_info *p = adap2pinfo(adap, i);
3851
3852                 while ((adap->params.portvec & (1 << j)) == 0)
3853                         j++;
3854
3855                 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
3856                                              F_FW_CMD_REQUEST | F_FW_CMD_READ |
3857                                              V_FW_PORT_CMD_PORTID(j));
3858                 c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(
3859                                                 FW_PORT_ACTION_GET_PORT_INFO) |
3860                                                 FW_LEN16(c));
3861                 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3862                 if (ret)
3863                         return ret;
3864
3865                 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
3866                 if (ret < 0)
3867                         return ret;
3868
3869                 p->viid = ret;
3870                 p->tx_chan = j;
3871                 p->rss_size = rss_size;
3872                 t4_os_set_hw_addr(adap, i, addr);
3873
3874                 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
3875                 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
3876                                 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
3877                 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
3878                 p->mod_type = FW_PORT_MOD_TYPE_NA;
3879
3880                 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
3881                 j++;
3882         }
3883         return 0;
3884 }