New upstream version 18.08
[deb_dpdk.git] / drivers / net / bnx2x / ecore_sp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2007-2013 Broadcom Corporation.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9  * Copyright (c) 2015-2018 Cavium Inc.
10  * All rights reserved.
11  * www.cavium.com
12  */
13
14 #include "bnx2x.h"
15 #include "ecore_init.h"
16
17 /**** Exe Queue interfaces ****/
18
19 /**
20  * ecore_exe_queue_init - init the Exe Queue object
21  *
22  * @o:          pointer to the object
23  * @exe_len:    length
24  * @owner:      pointer to the owner
25  * @validate:   validate function pointer
26  * @optimize:   optimize function pointer
27  * @exec:       execute function pointer
28  * @get:        get function pointer
29  */
30 static void
31 ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
32                      struct ecore_exe_queue_obj *o,
33                      int exe_len,
34                      union ecore_qable_obj *owner,
35                      exe_q_validate validate,
36                      exe_q_remove remove,
37                      exe_q_optimize optimize, exe_q_execute exec, exe_q_get get)
38 {
39         ECORE_MEMSET(o, 0, sizeof(*o));
40
41         ECORE_LIST_INIT(&o->exe_queue);
42         ECORE_LIST_INIT(&o->pending_comp);
43
44         ECORE_SPIN_LOCK_INIT(&o->lock, sc);
45
46         o->exe_chunk_len = exe_len;
47         o->owner = owner;
48
49         /* Owner specific callbacks */
50         o->validate = validate;
51         o->remove = remove;
52         o->optimize = optimize;
53         o->execute = exec;
54         o->get = get;
55
56         ECORE_MSG("Setup the execution queue with the chunk length of %d",
57                   exe_len);
58 }
59
60 static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
61                                       struct ecore_exeq_elem *elem)
62 {
63         ECORE_MSG("Deleting an exe_queue element");
64         ECORE_FREE(sc, elem, sizeof(*elem));
65 }
66
67 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
68 {
69         struct ecore_exeq_elem *elem;
70         int cnt = 0;
71
72         ECORE_SPIN_LOCK_BH(&o->lock);
73
74         ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
75                                   struct ecore_exeq_elem) cnt++;
76
77         ECORE_SPIN_UNLOCK_BH(&o->lock);
78
79         return cnt;
80 }
81
82 /**
83  * ecore_exe_queue_add - add a new element to the execution queue
84  *
85  * @sc:         driver handle
86  * @o:          queue
87  * @cmd:        new command to add
88  * @restore:    true - do not optimize the command
89  *
90  * If the element is optimized or is illegal, frees it.
91  */
92 static int ecore_exe_queue_add(struct bnx2x_softc *sc,
93                                struct ecore_exe_queue_obj *o,
94                                struct ecore_exeq_elem *elem, int restore)
95 {
96         int rc;
97
98         ECORE_SPIN_LOCK_BH(&o->lock);
99
100         if (!restore) {
101                 /* Try to cancel this element queue */
102                 rc = o->optimize(sc, o->owner, elem);
103                 if (rc)
104                         goto free_and_exit;
105
106                 /* Check if this request is ok */
107                 rc = o->validate(sc, o->owner, elem);
108                 if (rc) {
109                         ECORE_MSG("Preamble failed: %d", rc);
110                         goto free_and_exit;
111                 }
112         }
113
114         /* If so, add it to the execution queue */
115         ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
116
117         ECORE_SPIN_UNLOCK_BH(&o->lock);
118
119         return ECORE_SUCCESS;
120
121 free_and_exit:
122         ecore_exe_queue_free_elem(sc, elem);
123
124         ECORE_SPIN_UNLOCK_BH(&o->lock);
125
126         return rc;
127 }
128
129 static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj
130                                             *o)
131 {
132         struct ecore_exeq_elem *elem;
133
134         while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
135                 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
136                                               struct ecore_exeq_elem, link);
137
138                 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
139                 ecore_exe_queue_free_elem(sc, elem);
140         }
141 }
142
143 static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc,
144                                                  struct ecore_exe_queue_obj *o)
145 {
146         ECORE_SPIN_LOCK_BH(&o->lock);
147
148         __ecore_exe_queue_reset_pending(sc, o);
149
150         ECORE_SPIN_UNLOCK_BH(&o->lock);
151 }
152
153 /**
154  * ecore_exe_queue_step - execute one execution chunk atomically
155  *
156  * @sc:                 driver handle
157  * @o:                  queue
158  * @ramrod_flags:       flags
159  *
160  * (Should be called while holding the exe_queue->lock).
161  */
162 static int ecore_exe_queue_step(struct bnx2x_softc *sc,
163                                 struct ecore_exe_queue_obj *o,
164                                 unsigned long *ramrod_flags)
165 {
166         struct ecore_exeq_elem *elem, spacer;
167         int cur_len = 0, rc;
168
169         ECORE_MEMSET(&spacer, 0, sizeof(spacer));
170
171         /* Next step should not be performed until the current is finished,
172          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
173          * properly clear object internals without sending any command to the FW
174          * which also implies there won't be any completion to clear the
175          * 'pending' list.
176          */
177         if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
178                 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
179                         ECORE_MSG
180                             ("RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
181                         __ecore_exe_queue_reset_pending(sc, o);
182                 } else {
183                         return ECORE_PENDING;
184                 }
185         }
186
187         /* Run through the pending commands list and create a next
188          * execution chunk.
189          */
190         while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
191                 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
192                                               struct ecore_exeq_elem, link);
193                 ECORE_DBG_BREAK_IF(!elem->cmd_len);
194
195                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
196                         cur_len += elem->cmd_len;
197                         /* Prevent from both lists being empty when moving an
198                          * element. This will allow the call of
199                          * ecore_exe_queue_empty() without locking.
200                          */
201                         ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
202                         mb();
203                         ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
204                         ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
205                         ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
206                 } else
207                         break;
208         }
209
210         /* Sanity check */
211         if (!cur_len)
212                 return ECORE_SUCCESS;
213
214         rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
215         if (rc < 0)
216                 /* In case of an error return the commands back to the queue
217                  *  and reset the pending_comp.
218                  */
219                 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
220         else if (!rc)
221                 /* If zero is returned, means there are no outstanding pending
222                  * completions and we may dismiss the pending list.
223                  */
224                 __ecore_exe_queue_reset_pending(sc, o);
225
226         return rc;
227 }
228
229 static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
230 {
231         int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
232
233         /* Don't reorder!!! */
234         mb();
235
236         return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
237 }
238
239 static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
240                                                           bnx2x_softc *sc
241                                                           __rte_unused)
242 {
243         ECORE_MSG("Allocating a new exe_queue element");
244         return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
245 }
246
247 /************************ raw_obj functions ***********************************/
248 static int ecore_raw_check_pending(struct ecore_raw_obj *o)
249 {
250         /*
251          * !! converts the value returned by ECORE_TEST_BIT such that it
252          * is guaranteed not to be truncated regardless of int definition.
253          *
254          * Note we cannot simply define the function's return value type
255          * to match the type returned by ECORE_TEST_BIT, as it varies by
256          * platform/implementation.
257          */
258
259         return ! !ECORE_TEST_BIT(o->state, o->pstate);
260 }
261
262 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
263 {
264         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
265         ECORE_CLEAR_BIT(o->state, o->pstate);
266         ECORE_SMP_MB_AFTER_CLEAR_BIT();
267 }
268
269 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
270 {
271         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
272         ECORE_SET_BIT(o->state, o->pstate);
273         ECORE_SMP_MB_AFTER_CLEAR_BIT();
274 }
275
276 /**
277  * ecore_state_wait - wait until the given bit(state) is cleared
278  *
279  * @sc:         device handle
280  * @state:      state which is to be cleared
281  * @state_p:    state buffer
282  *
283  */
284 static int ecore_state_wait(struct bnx2x_softc *sc, int state,
285                             unsigned long *pstate)
286 {
287         /* can take a while if any port is running */
288         int cnt = 5000;
289
290         if (CHIP_REV_IS_EMUL(sc))
291                 cnt *= 20;
292
293         ECORE_MSG("waiting for state to become %d", state);
294
295         ECORE_MIGHT_SLEEP();
296         while (cnt--) {
297                 bnx2x_intr_legacy(sc, 1);
298                 if (!ECORE_TEST_BIT(state, pstate)) {
299 #ifdef ECORE_STOP_ON_ERROR
300                         ECORE_MSG("exit  (cnt %d)", 5000 - cnt);
301 #endif
302                         return ECORE_SUCCESS;
303                 }
304
305                 ECORE_WAIT(sc, delay_us);
306
307                 if (sc->panic)
308                         return ECORE_IO;
309         }
310
311         /* timeout! */
312         PMD_DRV_LOG(ERR, "timeout waiting for state %d", state);
313 #ifdef ECORE_STOP_ON_ERROR
314         ecore_panic();
315 #endif
316
317         return ECORE_TIMEOUT;
318 }
319
320 static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw)
321 {
322         return ecore_state_wait(sc, raw->state, raw->pstate);
323 }
324
325 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
326 /* credit handling callbacks */
327 static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
328 {
329         struct ecore_credit_pool_obj *mp = o->macs_pool;
330
331         ECORE_DBG_BREAK_IF(!mp);
332
333         return mp->get_entry(mp, offset);
334 }
335
336 static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
337 {
338         struct ecore_credit_pool_obj *mp = o->macs_pool;
339
340         ECORE_DBG_BREAK_IF(!mp);
341
342         return mp->get(mp, 1);
343 }
344
345 static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
346 {
347         struct ecore_credit_pool_obj *mp = o->macs_pool;
348
349         return mp->put_entry(mp, offset);
350 }
351
352 static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
353 {
354         struct ecore_credit_pool_obj *mp = o->macs_pool;
355
356         return mp->put(mp, 1);
357 }
358
359 /**
360  * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
361  * head list.
362  *
363  * @sc:         device handle
364  * @o:          vlan_mac object
365  *
366  * @details: Non-blocking implementation; should be called under execution
367  *           queue lock.
368  */
369 static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
370                                             struct ecore_vlan_mac_obj *o)
371 {
372         if (o->head_reader) {
373                 ECORE_MSG("vlan_mac_lock writer - There are readers; Busy");
374                 return ECORE_BUSY;
375         }
376
377         ECORE_MSG("vlan_mac_lock writer - Taken");
378         return ECORE_SUCCESS;
379 }
380
381 /**
382  * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
383  * which wasn't able to run due to a taken lock on vlan mac head list.
384  *
385  * @sc:         device handle
386  * @o:          vlan_mac object
387  *
388  * @details Should be called under execution queue lock; notice it might release
389  *          and reclaim it during its run.
390  */
391 static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
392                                             struct ecore_vlan_mac_obj *o)
393 {
394         int rc;
395         unsigned long ramrod_flags = o->saved_ramrod_flags;
396
397         ECORE_MSG("vlan_mac_lock execute pending command with ramrod flags %lu",
398                   ramrod_flags);
399         o->head_exe_request = FALSE;
400         o->saved_ramrod_flags = 0;
401         rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
402         if (rc != ECORE_SUCCESS) {
403                 PMD_DRV_LOG(ERR,
404                             "execution of pending commands failed with rc %d",
405                             rc);
406 #ifdef ECORE_STOP_ON_ERROR
407                 ecore_panic();
408 #endif
409         }
410 }
411
412 /**
413  * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
414  * called due to vlan mac head list lock being taken.
415  *
416  * @sc:                 device handle
417  * @o:                  vlan_mac object
418  * @ramrod_flags:       ramrod flags of missed execution
419  *
420  * @details Should be called under execution queue lock.
421  */
422 static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
423                                     struct ecore_vlan_mac_obj *o,
424                                     unsigned long ramrod_flags)
425 {
426         o->head_exe_request = TRUE;
427         o->saved_ramrod_flags = ramrod_flags;
428         ECORE_MSG("Placing pending execution with ramrod flags %lu",
429                   ramrod_flags);
430 }
431
432 /**
433  * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
434  *
435  * @sc:                 device handle
436  * @o:                  vlan_mac object
437  *
438  * @details Should be called under execution queue lock. Notice if a pending
439  *          execution exists, it would perform it - possibly releasing and
440  *          reclaiming the execution queue lock.
441  */
442 static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
443                                             struct ecore_vlan_mac_obj *o)
444 {
445         /* It's possible a new pending execution was added since this writer
446          * executed. If so, execute again. [Ad infinitum]
447          */
448         while (o->head_exe_request) {
449                 ECORE_MSG
450                     ("vlan_mac_lock - writer release encountered a pending request");
451                 __ecore_vlan_mac_h_exec_pending(sc, o);
452         }
453 }
454
455 /**
456  * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
457  *
458  * @sc:                 device handle
459  * @o:                  vlan_mac object
460  *
461  * @details Notice if a pending execution exists, it would perform it -
462  *          possibly releasing and reclaiming the execution queue lock.
463  */
464 void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
465                                    struct ecore_vlan_mac_obj *o)
466 {
467         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
468         __ecore_vlan_mac_h_write_unlock(sc, o);
469         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
470 }
471
472 /**
473  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
474  *
475  * @sc:                 device handle
476  * @o:                  vlan_mac object
477  *
478  * @details Should be called under the execution queue lock. May sleep. May
479  *          release and reclaim execution queue lock during its run.
480  */
481 static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
482                                         struct ecore_vlan_mac_obj *o)
483 {
484         /* If we got here, we're holding lock --> no WRITER exists */
485         o->head_reader++;
486         ECORE_MSG("vlan_mac_lock - locked reader - number %d", o->head_reader);
487
488         return ECORE_SUCCESS;
489 }
490
491 /**
492  * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
493  *
494  * @sc:                 device handle
495  * @o:                  vlan_mac object
496  *
497  * @details May sleep. Claims and releases execution queue lock during its run.
498  */
499 static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
500                                       struct ecore_vlan_mac_obj *o)
501 {
502         int rc;
503
504         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
505         rc = __ecore_vlan_mac_h_read_lock(sc, o);
506         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
507
508         return rc;
509 }
510
511 /**
512  * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
513  *
514  * @sc:                 device handle
515  * @o:                  vlan_mac object
516  *
517  * @details Should be called under execution queue lock. Notice if a pending
518  *          execution exists, it would be performed if this was the last
519  *          reader. possibly releasing and reclaiming the execution queue lock.
520  */
521 static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
522                                            struct ecore_vlan_mac_obj *o)
523 {
524         if (!o->head_reader) {
525                 PMD_DRV_LOG(ERR,
526                             "Need to release vlan mac reader lock, but lock isn't taken");
527 #ifdef ECORE_STOP_ON_ERROR
528                 ecore_panic();
529 #endif
530         } else {
531                 o->head_reader--;
532                 PMD_DRV_LOG(INFO,
533                             "vlan_mac_lock - decreased readers to %d",
534                             o->head_reader);
535         }
536
537         /* It's possible a new pending execution was added, and that this reader
538          * was last - if so we need to execute the command.
539          */
540         if (!o->head_reader && o->head_exe_request) {
541                 PMD_DRV_LOG(INFO,
542                             "vlan_mac_lock - reader release encountered a pending request");
543
544                 /* Writer release will do the trick */
545                 __ecore_vlan_mac_h_write_unlock(sc, o);
546         }
547 }
548
549 /**
550  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
551  *
552  * @sc:                 device handle
553  * @o:                  vlan_mac object
554  *
555  * @details Notice if a pending execution exists, it would be performed if this
556  *          was the last reader. Claims and releases the execution queue lock
557  *          during its run.
558  */
559 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
560                                   struct ecore_vlan_mac_obj *o)
561 {
562         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
563         __ecore_vlan_mac_h_read_unlock(sc, o);
564         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
565 }
566
567 /**
568  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
569  *
570  * @sc:                 device handle
571  * @o:                  vlan_mac object
572  * @n:                  number of elements to get
573  * @base:               base address for element placement
574  * @stride:             stride between elements (in bytes)
575  */
576 static int ecore_get_n_elements(struct bnx2x_softc *sc,
577                                 struct ecore_vlan_mac_obj *o, int n,
578                                 uint8_t * base, uint8_t stride, uint8_t size)
579 {
580         struct ecore_vlan_mac_registry_elem *pos;
581         uint8_t *next = base;
582         int counter = 0, read_lock;
583
584         ECORE_MSG("get_n_elements - taking vlan_mac_lock (reader)");
585         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
586         if (read_lock != ECORE_SUCCESS)
587                 PMD_DRV_LOG(ERR,
588                             "get_n_elements failed to get vlan mac reader lock; Access without lock");
589
590         /* traverse list */
591         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
592                                   struct ecore_vlan_mac_registry_elem) {
593                 if (counter < n) {
594                         ECORE_MEMCPY(next, &pos->u, size);
595                         counter++;
596                         ECORE_MSG
597                             ("copied element number %d to address %p element was:",
598                              counter, next);
599                         next += stride + size;
600                 }
601         }
602
603         if (read_lock == ECORE_SUCCESS) {
604                 ECORE_MSG("get_n_elements - releasing vlan_mac_lock (reader)");
605                 ecore_vlan_mac_h_read_unlock(sc, o);
606         }
607
608         return counter * ETH_ALEN;
609 }
610
611 /* check_add() callbacks */
612 static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
613                                struct ecore_vlan_mac_obj *o,
614                                union ecore_classification_ramrod_data *data)
615 {
616         struct ecore_vlan_mac_registry_elem *pos;
617
618         ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
619                   data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
620                   data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
621
622         if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
623                 return ECORE_INVAL;
624
625         /* Check if a requested MAC already exists */
626         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
627                                   struct ecore_vlan_mac_registry_elem)
628             if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
629                 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
630                 return ECORE_EXISTS;
631
632         return ECORE_SUCCESS;
633 }
634
635 /* check_del() callbacks */
636 static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc
637                                                                 *sc
638                                                                 __rte_unused,
639                                                                 struct
640                                                                 ecore_vlan_mac_obj
641                                                                 *o, union
642                                                                 ecore_classification_ramrod_data
643                                                                 *data)
644 {
645         struct ecore_vlan_mac_registry_elem *pos;
646
647         ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
648                   data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
649                   data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
650
651         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
652                                   struct ecore_vlan_mac_registry_elem)
653         if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
654             (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
655                 return pos;
656
657         return NULL;
658 }
659
660 /* check_move() callback */
661 static int ecore_check_move(struct bnx2x_softc *sc,
662                             struct ecore_vlan_mac_obj *src_o,
663                             struct ecore_vlan_mac_obj *dst_o,
664                             union ecore_classification_ramrod_data *data)
665 {
666         struct ecore_vlan_mac_registry_elem *pos;
667         int rc;
668
669         /* Check if we can delete the requested configuration from the first
670          * object.
671          */
672         pos = src_o->check_del(sc, src_o, data);
673
674         /*  check if configuration can be added */
675         rc = dst_o->check_add(sc, dst_o, data);
676
677         /* If this classification can not be added (is already set)
678          * or can't be deleted - return an error.
679          */
680         if (rc || !pos)
681                 return FALSE;
682
683         return TRUE;
684 }
685
686 static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc,
687                                        __rte_unused struct ecore_vlan_mac_obj
688                                        *src_o, __rte_unused struct ecore_vlan_mac_obj
689                                        *dst_o, __rte_unused union
690                                        ecore_classification_ramrod_data *data)
691 {
692         return FALSE;
693 }
694
695 static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
696                                              *o)
697 {
698         struct ecore_raw_obj *raw = &o->raw;
699         uint8_t rx_tx_flag = 0;
700
701         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
702             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
703                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
704
705         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
706             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
707                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
708
709         return rx_tx_flag;
710 }
711
712 static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
713                                  int add, unsigned char *dev_addr, int index)
714 {
715         uint32_t wb_data[2];
716         uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
717             NIG_REG_LLH0_FUNC_MEM;
718
719         if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
720                 return;
721
722         if (index > ECORE_LLH_CAM_MAX_PF_LINE)
723                 return;
724
725         ECORE_MSG("Going to %s LLH configuration at entry %d",
726                   (add ? "ADD" : "DELETE"), index);
727
728         if (add) {
729                 /* LLH_FUNC_MEM is a uint64_t WB register */
730                 reg_offset += 8 * index;
731
732                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
733                               (dev_addr[4] << 8) | dev_addr[5]);
734                 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
735
736                 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
737         }
738
739         REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
740                     NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add);
741 }
742
743 /**
744  * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
745  *
746  * @sc:         device handle
747  * @o:          queue for which we want to configure this rule
748  * @add:        if TRUE the command is an ADD command, DEL otherwise
749  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
750  * @hdr:        pointer to a header to setup
751  *
752  */
753 static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o,
754                                           int add, int opcode,
755                                           struct eth_classify_cmd_header
756                                           *hdr)
757 {
758         struct ecore_raw_obj *raw = &o->raw;
759
760         hdr->client_id = raw->cl_id;
761         hdr->func_id = raw->func_id;
762
763         /* Rx or/and Tx (internal switching) configuration ? */
764         hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o);
765
766         if (add)
767                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
768
769         hdr->cmd_general_data |=
770             (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
771 }
772
773 /**
774  * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
775  *
776  * @cid:        connection id
777  * @type:       ECORE_FILTER_XXX_PENDING
778  * @hdr:        pointer to header to setup
779  * @rule_cnt:
780  *
781  * currently we always configure one rule and echo field to contain a CID and an
782  * opcode type.
783  */
784 static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header
785                                             *hdr, int rule_cnt)
786 {
787         hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
788                                       (type << ECORE_SWCID_SHIFT));
789         hdr->rule_cnt = (uint8_t) rule_cnt;
790 }
791
792 /* hw_config() callbacks */
793 static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
794                                  struct ecore_vlan_mac_obj *o,
795                                  struct ecore_exeq_elem *elem, int rule_idx,
796                                  __rte_unused int cam_offset)
797 {
798         struct ecore_raw_obj *raw = &o->raw;
799         struct eth_classify_rules_ramrod_data *data =
800             (struct eth_classify_rules_ramrod_data *)(raw->rdata);
801         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
802         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
803         int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
804         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
805         uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
806
807         /* Set LLH CAM entry: currently only iSCSI and ETH macs are
808          * relevant. In addition, current implementation is tuned for a
809          * single ETH MAC.
810          *
811          * When multiple unicast ETH MACs PF configuration in switch
812          * independent mode is required (NetQ, multiple netdev MACs,
813          * etc.), consider better utilisation of 8 per function MAC
814          * entries in the LLH register. There is also
815          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
816          * total number of CAM entries to 16.
817          *
818          * Currently we won't configure NIG for MACs other than a primary ETH
819          * MAC and iSCSI L2 MAC.
820          *
821          * If this MAC is moving from one Queue to another, no need to change
822          * NIG configuration.
823          */
824         if (cmd != ECORE_VLAN_MAC_MOVE) {
825                 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
826                         ecore_set_mac_in_nig(sc, add, mac,
827                                              ECORE_LLH_CAM_ISCSI_ETH_LINE);
828                 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
829                         ecore_set_mac_in_nig(sc, add, mac,
830                                              ECORE_LLH_CAM_ETH_LINE);
831         }
832
833         /* Reset the ramrod data buffer for the first rule */
834         if (rule_idx == 0)
835                 ECORE_MEMSET(data, 0, sizeof(*data));
836
837         /* Setup a command header */
838         ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
839                                       &rule_entry->mac.header);
840
841         ECORE_MSG("About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
842                   (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
843                   mac[4], mac[5], raw->cl_id);
844
845         /* Set a MAC itself */
846         ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
847                               &rule_entry->mac.mac_mid,
848                               &rule_entry->mac.mac_lsb, mac);
849         rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
850
851         /* MOVE: Add a rule that will add this MAC to the target Queue */
852         if (cmd == ECORE_VLAN_MAC_MOVE) {
853                 rule_entry++;
854                 rule_cnt++;
855
856                 /* Setup ramrod data */
857                 ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data.
858                                               vlan_mac.target_obj, TRUE,
859                                               CLASSIFY_RULE_OPCODE_MAC,
860                                               &rule_entry->mac.header);
861
862                 /* Set a MAC itself */
863                 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
864                                       &rule_entry->mac.mac_mid,
865                                       &rule_entry->mac.mac_lsb, mac);
866                 rule_entry->mac.inner_mac =
867                     elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
868         }
869
870         /* Set the ramrod data header */
871         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
872                                         rule_cnt);
873 }
874
875 /**
876  * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
877  *
878  * @sc:         device handle
879  * @o:          queue
880  * @type:
881  * @cam_offset: offset in cam memory
882  * @hdr:        pointer to a header to setup
883  *
884  * E1H
885  */
886 static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj
887                                              *o, int type, int cam_offset, struct mac_configuration_hdr
888                                              *hdr)
889 {
890         struct ecore_raw_obj *r = &o->raw;
891
892         hdr->length = 1;
893         hdr->offset = (uint8_t) cam_offset;
894         hdr->client_id = ECORE_CPU_TO_LE16(0xff);
895         hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
896                                       (type << ECORE_SWCID_SHIFT));
897 }
898
899 static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj
900                                              *o, int add, int opcode,
901                                              uint8_t * mac,
902                                              uint16_t vlan_id, struct
903                                              mac_configuration_entry
904                                              *cfg_entry)
905 {
906         struct ecore_raw_obj *r = &o->raw;
907         uint32_t cl_bit_vec = (1 << r->cl_id);
908
909         cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
910         cfg_entry->pf_id = r->func_id;
911         cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
912
913         if (add) {
914                 ECORE_SET_FLAG(cfg_entry->flags,
915                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
916                                T_ETH_MAC_COMMAND_SET);
917                 ECORE_SET_FLAG(cfg_entry->flags,
918                                MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
919                                opcode);
920
921                 /* Set a MAC in a ramrod data */
922                 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
923                                       &cfg_entry->middle_mac_addr,
924                                       &cfg_entry->lsb_mac_addr, mac);
925         } else
926                 ECORE_SET_FLAG(cfg_entry->flags,
927                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
928                                T_ETH_MAC_COMMAND_INVALIDATE);
929 }
930
931 static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
932                                          __rte_unused,
933                                          struct ecore_vlan_mac_obj *o,
934                                          int type, int cam_offset,
935                                          int add, uint8_t * mac,
936                                          uint16_t vlan_id, int opcode,
937                                          struct mac_configuration_cmd
938                                          *config)
939 {
940         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
941
942         ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr);
943         ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
944                                          cfg_entry);
945
946         ECORE_MSG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
947                   (add ? "setting" : "clearing"),
948                   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
949                   o->raw.cl_id, cam_offset);
950 }
951
952 /**
953  * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
954  *
955  * @sc:         device handle
956  * @o:          ecore_vlan_mac_obj
957  * @elem:       ecore_exeq_elem
958  * @rule_idx:   rule_idx
959  * @cam_offset: cam_offset
960  */
961 static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc,
962                                   struct ecore_vlan_mac_obj *o,
963                                   struct ecore_exeq_elem *elem,
964                                   __rte_unused int rule_idx, int cam_offset)
965 {
966         struct ecore_raw_obj *raw = &o->raw;
967         struct mac_configuration_cmd *config =
968             (struct mac_configuration_cmd *)(raw->rdata);
969         /* 57711 do not support MOVE command,
970          * so it's either ADD or DEL
971          */
972         int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
973             TRUE : FALSE;
974
975         /* Reset the ramrod data buffer */
976         ECORE_MEMSET(config, 0, sizeof(*config));
977
978         ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
979                                      cam_offset, add,
980                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
981                                      ETH_VLAN_FILTER_ANY_VLAN, config);
982 }
983
984 /**
985  * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
986  *
987  * @sc:         device handle
988  * @p:          command parameters
989  * @ppos:       pointer to the cookie
990  *
991  * reconfigure next MAC/VLAN/VLAN-MAC element from the
992  * previously configured elements list.
993  *
994  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
995  * into an account
996  *
997  * pointer to the cookie  - that should be given back in the next call to make
998  * function handle the next element. If *ppos is set to NULL it will restart the
999  * iterator. If returned *ppos == NULL this means that the last element has been
1000  * handled.
1001  *
1002  */
1003 static int ecore_vlan_mac_restore(struct bnx2x_softc *sc,
1004                                   struct ecore_vlan_mac_ramrod_params *p,
1005                                   struct ecore_vlan_mac_registry_elem **ppos)
1006 {
1007         struct ecore_vlan_mac_registry_elem *pos;
1008         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1009
1010         /* If list is empty - there is nothing to do here */
1011         if (ECORE_LIST_IS_EMPTY(&o->head)) {
1012                 *ppos = NULL;
1013                 return 0;
1014         }
1015
1016         /* make a step... */
1017         if (*ppos == NULL)
1018                 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct
1019                                                ecore_vlan_mac_registry_elem,
1020                                                link);
1021         else
1022                 *ppos = ECORE_LIST_NEXT(*ppos, link,
1023                                         struct ecore_vlan_mac_registry_elem);
1024
1025         pos = *ppos;
1026
1027         /* If it's the last step - return NULL */
1028         if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1029                 *ppos = NULL;
1030
1031         /* Prepare a 'user_req' */
1032         ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1033
1034         /* Set the command */
1035         p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1036
1037         /* Set vlan_mac_flags */
1038         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1039
1040         /* Set a restore bit */
1041         ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1042
1043         return ecore_config_vlan_mac(sc, p);
1044 }
1045
1046 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1047  * pointer to an element with a specific criteria and NULL if such an element
1048  * hasn't been found.
1049  */
1050 static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o,
1051                                                   struct ecore_exeq_elem *elem)
1052 {
1053         struct ecore_exeq_elem *pos;
1054         struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1055
1056         /* Check pending for execution commands */
1057         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1058                                   struct ecore_exeq_elem)
1059         if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1060                           sizeof(*data)) &&
1061             (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1062                 return pos;
1063
1064         return NULL;
1065 }
1066
1067 /**
1068  * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1069  *
1070  * @sc:         device handle
1071  * @qo:         ecore_qable_obj
1072  * @elem:       ecore_exeq_elem
1073  *
1074  * Checks that the requested configuration can be added. If yes and if
1075  * requested, consume CAM credit.
1076  *
1077  * The 'validate' is run after the 'optimize'.
1078  *
1079  */
1080 static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
1081                                        union ecore_qable_obj *qo,
1082                                        struct ecore_exeq_elem *elem)
1083 {
1084         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1085         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1086         int rc;
1087
1088         /* Check the registry */
1089         rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1090         if (rc) {
1091                 ECORE_MSG
1092                     ("ADD command is not allowed considering current registry state.");
1093                 return rc;
1094         }
1095
1096         /* Check if there is a pending ADD command for this
1097          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1098          */
1099         if (exeq->get(exeq, elem)) {
1100                 ECORE_MSG("There is a pending ADD command already");
1101                 return ECORE_EXISTS;
1102         }
1103
1104         /* Consume the credit if not requested not to */
1105         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1106                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1107               o->get_credit(o)))
1108                 return ECORE_INVAL;
1109
1110         return ECORE_SUCCESS;
1111 }
1112
1113 /**
1114  * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1115  *
1116  * @sc:         device handle
1117  * @qo:         quable object to check
1118  * @elem:       element that needs to be deleted
1119  *
1120  * Checks that the requested configuration can be deleted. If yes and if
1121  * requested, returns a CAM credit.
1122  *
1123  * The 'validate' is run after the 'optimize'.
1124  */
1125 static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
1126                                        union ecore_qable_obj *qo,
1127                                        struct ecore_exeq_elem *elem)
1128 {
1129         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1130         struct ecore_vlan_mac_registry_elem *pos;
1131         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1132         struct ecore_exeq_elem query_elem;
1133
1134         /* If this classification can not be deleted (doesn't exist)
1135          * - return a ECORE_EXIST.
1136          */
1137         pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1138         if (!pos) {
1139                 ECORE_MSG
1140                     ("DEL command is not allowed considering current registry state");
1141                 return ECORE_EXISTS;
1142         }
1143
1144         /* Check if there are pending DEL or MOVE commands for this
1145          * MAC/VLAN/VLAN-MAC. Return an error if so.
1146          */
1147         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1148
1149         /* Check for MOVE commands */
1150         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1151         if (exeq->get(exeq, &query_elem)) {
1152                 PMD_DRV_LOG(ERR, "There is a pending MOVE command already");
1153                 return ECORE_INVAL;
1154         }
1155
1156         /* Check for DEL commands */
1157         if (exeq->get(exeq, elem)) {
1158                 ECORE_MSG("There is a pending DEL command already");
1159                 return ECORE_EXISTS;
1160         }
1161
1162         /* Return the credit to the credit pool if not requested not to */
1163         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1164                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1165               o->put_credit(o))) {
1166                 PMD_DRV_LOG(ERR, "Failed to return a credit");
1167                 return ECORE_INVAL;
1168         }
1169
1170         return ECORE_SUCCESS;
1171 }
1172
1173 /**
1174  * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1175  *
1176  * @sc:         device handle
1177  * @qo:         quable object to check (source)
1178  * @elem:       element that needs to be moved
1179  *
1180  * Checks that the requested configuration can be moved. If yes and if
1181  * requested, returns a CAM credit.
1182  *
1183  * The 'validate' is run after the 'optimize'.
1184  */
1185 static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
1186                                         union ecore_qable_obj *qo,
1187                                         struct ecore_exeq_elem *elem)
1188 {
1189         struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1190         struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1191         struct ecore_exeq_elem query_elem;
1192         struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1193         struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1194
1195         /* Check if we can perform this operation based on the current registry
1196          * state.
1197          */
1198         if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1199                 ECORE_MSG
1200                     ("MOVE command is not allowed considering current registry state");
1201                 return ECORE_INVAL;
1202         }
1203
1204         /* Check if there is an already pending DEL or MOVE command for the
1205          * source object or ADD command for a destination object. Return an
1206          * error if so.
1207          */
1208         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1209
1210         /* Check DEL on source */
1211         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1212         if (src_exeq->get(src_exeq, &query_elem)) {
1213                 PMD_DRV_LOG(ERR,
1214                             "There is a pending DEL command on the source queue already");
1215                 return ECORE_INVAL;
1216         }
1217
1218         /* Check MOVE on source */
1219         if (src_exeq->get(src_exeq, elem)) {
1220                 ECORE_MSG("There is a pending MOVE command already");
1221                 return ECORE_EXISTS;
1222         }
1223
1224         /* Check ADD on destination */
1225         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1226         if (dest_exeq->get(dest_exeq, &query_elem)) {
1227                 PMD_DRV_LOG(ERR,
1228                             "There is a pending ADD command on the destination queue already");
1229                 return ECORE_INVAL;
1230         }
1231
1232         /* Consume the credit if not requested not to */
1233         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1234                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1235               dest_o->get_credit(dest_o)))
1236                 return ECORE_INVAL;
1237
1238         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1239                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1240               src_o->put_credit(src_o))) {
1241                 /* return the credit taken from dest... */
1242                 dest_o->put_credit(dest_o);
1243                 return ECORE_INVAL;
1244         }
1245
1246         return ECORE_SUCCESS;
1247 }
1248
1249 static int ecore_validate_vlan_mac(struct bnx2x_softc *sc,
1250                                    union ecore_qable_obj *qo,
1251                                    struct ecore_exeq_elem *elem)
1252 {
1253         switch (elem->cmd_data.vlan_mac.cmd) {
1254         case ECORE_VLAN_MAC_ADD:
1255                 return ecore_validate_vlan_mac_add(sc, qo, elem);
1256         case ECORE_VLAN_MAC_DEL:
1257                 return ecore_validate_vlan_mac_del(sc, qo, elem);
1258         case ECORE_VLAN_MAC_MOVE:
1259                 return ecore_validate_vlan_mac_move(sc, qo, elem);
1260         default:
1261                 return ECORE_INVAL;
1262         }
1263 }
1264
1265 static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc,
1266                                  union ecore_qable_obj *qo,
1267                                  struct ecore_exeq_elem *elem)
1268 {
1269         int rc = 0;
1270
1271         /* If consumption wasn't required, nothing to do */
1272         if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1273                            &elem->cmd_data.vlan_mac.vlan_mac_flags))
1274                 return ECORE_SUCCESS;
1275
1276         switch (elem->cmd_data.vlan_mac.cmd) {
1277         case ECORE_VLAN_MAC_ADD:
1278         case ECORE_VLAN_MAC_MOVE:
1279                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1280                 break;
1281         case ECORE_VLAN_MAC_DEL:
1282                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1283                 break;
1284         default:
1285                 return ECORE_INVAL;
1286         }
1287
1288         if (rc != TRUE)
1289                 return ECORE_INVAL;
1290
1291         return ECORE_SUCCESS;
1292 }
1293
1294 /**
1295  * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1296  *
1297  * @sc:         device handle
1298  * @o:          ecore_vlan_mac_obj
1299  *
1300  */
1301 static int ecore_wait_vlan_mac(struct bnx2x_softc *sc,
1302                                struct ecore_vlan_mac_obj *o)
1303 {
1304         int cnt = 5000, rc;
1305         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1306         struct ecore_raw_obj *raw = &o->raw;
1307
1308         while (cnt--) {
1309                 /* Wait for the current command to complete */
1310                 rc = raw->wait_comp(sc, raw);
1311                 if (rc)
1312                         return rc;
1313
1314                 /* Wait until there are no pending commands */
1315                 if (!ecore_exe_queue_empty(exeq))
1316                         ECORE_WAIT(sc, 1000);
1317                 else
1318                         return ECORE_SUCCESS;
1319         }
1320
1321         return ECORE_TIMEOUT;
1322 }
1323
1324 static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
1325                                          struct ecore_vlan_mac_obj *o,
1326                                          unsigned long *ramrod_flags)
1327 {
1328         int rc = ECORE_SUCCESS;
1329
1330         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1331
1332         ECORE_MSG("vlan_mac_execute_step - trying to take writer lock");
1333         rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1334
1335         if (rc != ECORE_SUCCESS) {
1336                 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1337
1338                 /** Calling function should not diffrentiate between this case
1339                  *  and the case in which there is already a pending ramrod
1340                  */
1341                 rc = ECORE_PENDING;
1342         } else {
1343                 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1344         }
1345         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1346
1347         return rc;
1348 }
1349
1350 /**
1351  * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1352  *
1353  * @sc:         device handle
1354  * @o:          ecore_vlan_mac_obj
1355  * @cqe:
1356  * @cont:       if TRUE schedule next execution chunk
1357  *
1358  */
1359 static int ecore_complete_vlan_mac(struct bnx2x_softc *sc,
1360                                    struct ecore_vlan_mac_obj *o,
1361                                    union event_ring_elem *cqe,
1362                                    unsigned long *ramrod_flags)
1363 {
1364         struct ecore_raw_obj *r = &o->raw;
1365         int rc;
1366
1367         /* Reset pending list */
1368         ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1369
1370         /* Clear pending */
1371         r->clear_pending(r);
1372
1373         /* If ramrod failed this is most likely a SW bug */
1374         if (cqe->message.error)
1375                 return ECORE_INVAL;
1376
1377         /* Run the next bulk of pending commands if requested */
1378         if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1379                 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1380                 if (rc < 0)
1381                         return rc;
1382         }
1383
1384         /* If there is more work to do return PENDING */
1385         if (!ecore_exe_queue_empty(&o->exe_queue))
1386                 return ECORE_PENDING;
1387
1388         return ECORE_SUCCESS;
1389 }
1390
1391 /**
1392  * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1393  *
1394  * @sc:         device handle
1395  * @o:          ecore_qable_obj
1396  * @elem:       ecore_exeq_elem
1397  */
1398 static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
1399                                    union ecore_qable_obj *qo,
1400                                    struct ecore_exeq_elem *elem)
1401 {
1402         struct ecore_exeq_elem query, *pos;
1403         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1404         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1405
1406         ECORE_MEMCPY(&query, elem, sizeof(query));
1407
1408         switch (elem->cmd_data.vlan_mac.cmd) {
1409         case ECORE_VLAN_MAC_ADD:
1410                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1411                 break;
1412         case ECORE_VLAN_MAC_DEL:
1413                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1414                 break;
1415         default:
1416                 /* Don't handle anything other than ADD or DEL */
1417                 return 0;
1418         }
1419
1420         /* If we found the appropriate element - delete it */
1421         pos = exeq->get(exeq, &query);
1422         if (pos) {
1423
1424                 /* Return the credit of the optimized command */
1425                 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1426                                     &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1427                         if ((query.cmd_data.vlan_mac.cmd ==
1428                              ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1429                                 PMD_DRV_LOG(ERR,
1430                                             "Failed to return the credit for the optimized ADD command");
1431                                 return ECORE_INVAL;
1432                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1433                                 PMD_DRV_LOG(ERR,
1434                                             "Failed to recover the credit from the optimized DEL command");
1435                                 return ECORE_INVAL;
1436                         }
1437                 }
1438
1439                 ECORE_MSG("Optimizing %s command",
1440                           (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1441                           "ADD" : "DEL");
1442
1443                 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1444                 ecore_exe_queue_free_elem(sc, pos);
1445                 return 1;
1446         }
1447
1448         return 0;
1449 }
1450
1451 /**
1452  * ecore_vlan_mac_get_registry_elem - prepare a registry element
1453  *
1454  * @sc:   device handle
1455  * @o:
1456  * @elem:
1457  * @restore:
1458  * @re:
1459  *
1460  * prepare a registry element according to the current command request.
1461  */
1462 static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
1463                                             struct ecore_vlan_mac_obj *o,
1464                                             struct ecore_exeq_elem *elem,
1465                                             int restore, struct
1466                                             ecore_vlan_mac_registry_elem
1467                                             **re)
1468 {
1469         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1470         struct ecore_vlan_mac_registry_elem *reg_elem;
1471
1472         /* Allocate a new registry element if needed. */
1473         if (!restore &&
1474             ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1475                 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1476                 if (!reg_elem)
1477                         return ECORE_NOMEM;
1478
1479                 /* Get a new CAM offset */
1480                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1481                         /* This shall never happen, because we have checked the
1482                          * CAM availability in the 'validate'.
1483                          */
1484                         ECORE_DBG_BREAK_IF(1);
1485                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1486                         return ECORE_INVAL;
1487                 }
1488
1489                 ECORE_MSG("Got cam offset %d", reg_elem->cam_offset);
1490
1491                 /* Set a VLAN-MAC data */
1492                 ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1493                              sizeof(reg_elem->u));
1494
1495                 /* Copy the flags (needed for DEL and RESTORE flows) */
1496                 reg_elem->vlan_mac_flags =
1497                     elem->cmd_data.vlan_mac.vlan_mac_flags;
1498         } else                  /* DEL, RESTORE */
1499                 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1500
1501         *re = reg_elem;
1502         return ECORE_SUCCESS;
1503 }
1504
1505 /**
1506  * ecore_execute_vlan_mac - execute vlan mac command
1507  *
1508  * @sc:                 device handle
1509  * @qo:
1510  * @exe_chunk:
1511  * @ramrod_flags:
1512  *
1513  * go and send a ramrod!
1514  */
1515 static int ecore_execute_vlan_mac(struct bnx2x_softc *sc,
1516                                   union ecore_qable_obj *qo,
1517                                   ecore_list_t * exe_chunk,
1518                                   unsigned long *ramrod_flags)
1519 {
1520         struct ecore_exeq_elem *elem;
1521         struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1522         struct ecore_raw_obj *r = &o->raw;
1523         int rc, idx = 0;
1524         int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1525         int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1526         struct ecore_vlan_mac_registry_elem *reg_elem;
1527         enum ecore_vlan_mac_cmd cmd;
1528
1529         /* If DRIVER_ONLY execution is requested, cleanup a registry
1530          * and exit. Otherwise send a ramrod to FW.
1531          */
1532         if (!drv_only) {
1533
1534                 /* Set pending */
1535                 r->set_pending(r);
1536
1537                 /* Fill the ramrod data */
1538                 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1539                                           struct ecore_exeq_elem) {
1540                         cmd = elem->cmd_data.vlan_mac.cmd;
1541                         /* We will add to the target object in MOVE command, so
1542                          * change the object for a CAM search.
1543                          */
1544                         if (cmd == ECORE_VLAN_MAC_MOVE)
1545                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1546                         else
1547                                 cam_obj = o;
1548
1549                         rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1550                                                               elem, restore,
1551                                                               &reg_elem);
1552                         if (rc)
1553                                 goto error_exit;
1554
1555                         ECORE_DBG_BREAK_IF(!reg_elem);
1556
1557                         /* Push a new entry into the registry */
1558                         if (!restore &&
1559                             ((cmd == ECORE_VLAN_MAC_ADD) ||
1560                              (cmd == ECORE_VLAN_MAC_MOVE)))
1561                                 ECORE_LIST_PUSH_HEAD(&reg_elem->link,
1562                                                      &cam_obj->head);
1563
1564                         /* Configure a single command in a ramrod data buffer */
1565                         o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset);
1566
1567                         /* MOVE command consumes 2 entries in the ramrod data */
1568                         if (cmd == ECORE_VLAN_MAC_MOVE)
1569                                 idx += 2;
1570                         else
1571                                 idx++;
1572                 }
1573
1574                 /*
1575                  *  No need for an explicit memory barrier here as long we would
1576                  *  need to ensure the ordering of writing to the SPQ element
1577                  *  and updating of the SPQ producer which involves a memory
1578                  *  read and we will have to put a full memory barrier there
1579                  *  (inside ecore_sp_post()).
1580                  */
1581
1582                 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1583                                    r->rdata_mapping, ETH_CONNECTION_TYPE);
1584                 if (rc)
1585                         goto error_exit;
1586         }
1587
1588         /* Now, when we are done with the ramrod - clean up the registry */
1589         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1590                 cmd = elem->cmd_data.vlan_mac.cmd;
1591                 if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) {
1592                         reg_elem = o->check_del(sc, o,
1593                                                 &elem->cmd_data.vlan_mac.u);
1594
1595                         ECORE_DBG_BREAK_IF(!reg_elem);
1596
1597                         o->put_cam_offset(o, reg_elem->cam_offset);
1598                         ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
1599                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1600                 }
1601         }
1602
1603         if (!drv_only)
1604                 return ECORE_PENDING;
1605         else
1606                 return ECORE_SUCCESS;
1607
1608 error_exit:
1609         r->clear_pending(r);
1610
1611         /* Cleanup a registry in case of a failure */
1612         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1613                 cmd = elem->cmd_data.vlan_mac.cmd;
1614
1615                 if (cmd == ECORE_VLAN_MAC_MOVE)
1616                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1617                 else
1618                         cam_obj = o;
1619
1620                 /* Delete all newly added above entries */
1621                 if (!restore &&
1622                     ((cmd == ECORE_VLAN_MAC_ADD) ||
1623                      (cmd == ECORE_VLAN_MAC_MOVE))) {
1624                         reg_elem = o->check_del(sc, cam_obj,
1625                                                 &elem->cmd_data.vlan_mac.u);
1626                         if (reg_elem) {
1627                                 ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
1628                                                         &cam_obj->head);
1629                                 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1630                         }
1631                 }
1632         }
1633
1634         return rc;
1635 }
1636
1637 static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct
1638                                        ecore_vlan_mac_ramrod_params *p)
1639 {
1640         struct ecore_exeq_elem *elem;
1641         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1642         int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1643
1644         /* Allocate the execution queue element */
1645         elem = ecore_exe_queue_alloc_elem(sc);
1646         if (!elem)
1647                 return ECORE_NOMEM;
1648
1649         /* Set the command 'length' */
1650         switch (p->user_req.cmd) {
1651         case ECORE_VLAN_MAC_MOVE:
1652                 elem->cmd_len = 2;
1653                 break;
1654         default:
1655                 elem->cmd_len = 1;
1656         }
1657
1658         /* Fill the object specific info */
1659         ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req,
1660                      sizeof(p->user_req));
1661
1662         /* Try to add a new command to the pending list */
1663         return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1664 }
1665
1666 /**
1667  * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1668  *
1669  * @sc:   device handle
1670  * @p:
1671  *
1672  */
1673 int ecore_config_vlan_mac(struct bnx2x_softc *sc,
1674                           struct ecore_vlan_mac_ramrod_params *p)
1675 {
1676         int rc = ECORE_SUCCESS;
1677         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1678         unsigned long *ramrod_flags = &p->ramrod_flags;
1679         int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
1680         struct ecore_raw_obj *raw = &o->raw;
1681
1682         /*
1683          * Add new elements to the execution list for commands that require it.
1684          */
1685         if (!cont) {
1686                 rc = ecore_vlan_mac_push_new_cmd(sc, p);
1687                 if (rc)
1688                         return rc;
1689         }
1690
1691         /* If nothing will be executed further in this iteration we want to
1692          * return PENDING if there are pending commands
1693          */
1694         if (!ecore_exe_queue_empty(&o->exe_queue))
1695                 rc = ECORE_PENDING;
1696
1697         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1698                 ECORE_MSG
1699                     ("RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
1700                 raw->clear_pending(raw);
1701         }
1702
1703         /* Execute commands if required */
1704         if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
1705             ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
1706                 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
1707                                                    &p->ramrod_flags);
1708                 if (rc < 0)
1709                         return rc;
1710         }
1711
1712         /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1713          * then user want to wait until the last command is done.
1714          */
1715         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1716                 /* Wait maximum for the current exe_queue length iterations plus
1717                  * one (for the current pending command).
1718                  */
1719                 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
1720
1721                 while (!ecore_exe_queue_empty(&o->exe_queue) &&
1722                        max_iterations--) {
1723
1724                         /* Wait for the current command to complete */
1725                         rc = raw->wait_comp(sc, raw);
1726                         if (rc)
1727                                 return rc;
1728
1729                         /* Make a next step */
1730                         rc = __ecore_vlan_mac_execute_step(sc,
1731                                                            p->vlan_mac_obj,
1732                                                            &p->ramrod_flags);
1733                         if (rc < 0)
1734                                 return rc;
1735                 }
1736
1737                 return ECORE_SUCCESS;
1738         }
1739
1740         return rc;
1741 }
1742
1743 /**
1744  * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1745  *
1746  * @sc:                 device handle
1747  * @o:
1748  * @vlan_mac_flags:
1749  * @ramrod_flags:       execution flags to be used for this deletion
1750  *
1751  * if the last operation has completed successfully and there are no
1752  * more elements left, positive value if the last operation has completed
1753  * successfully and there are more previously configured elements, negative
1754  * value is current operation has failed.
1755  */
1756 static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
1757                                   struct ecore_vlan_mac_obj *o,
1758                                   unsigned long *vlan_mac_flags,
1759                                   unsigned long *ramrod_flags)
1760 {
1761         struct ecore_vlan_mac_registry_elem *pos = NULL;
1762         int rc = 0, read_lock;
1763         struct ecore_vlan_mac_ramrod_params p;
1764         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1765         struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
1766
1767         /* Clear pending commands first */
1768
1769         ECORE_SPIN_LOCK_BH(&exeq->lock);
1770
1771         ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
1772                                        &exeq->exe_queue, link,
1773                                        struct ecore_exeq_elem) {
1774                 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1775                     *vlan_mac_flags) {
1776                         rc = exeq->remove(sc, exeq->owner, exeq_pos);
1777                         if (rc) {
1778                                 PMD_DRV_LOG(ERR, "Failed to remove command");
1779                                 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1780                                 return rc;
1781                         }
1782                         ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
1783                                                 &exeq->exe_queue);
1784                         ecore_exe_queue_free_elem(sc, exeq_pos);
1785                 }
1786         }
1787
1788         ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1789
1790         /* Prepare a command request */
1791         ECORE_MEMSET(&p, 0, sizeof(p));
1792         p.vlan_mac_obj = o;
1793         p.ramrod_flags = *ramrod_flags;
1794         p.user_req.cmd = ECORE_VLAN_MAC_DEL;
1795
1796         /* Add all but the last VLAN-MAC to the execution queue without actually
1797          * execution anything.
1798          */
1799         ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
1800         ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
1801         ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1802
1803         ECORE_MSG("vlan_mac_del_all -- taking vlan_mac_lock (reader)");
1804         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
1805         if (read_lock != ECORE_SUCCESS)
1806                 return read_lock;
1807
1808         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
1809                                   struct ecore_vlan_mac_registry_elem) {
1810                 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1811                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1812                         ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
1813                         rc = ecore_config_vlan_mac(sc, &p);
1814                         if (rc < 0) {
1815                                 PMD_DRV_LOG(ERR,
1816                                             "Failed to add a new DEL command");
1817                                 ecore_vlan_mac_h_read_unlock(sc, o);
1818                                 return rc;
1819                         }
1820                 }
1821         }
1822
1823         ECORE_MSG("vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
1824         ecore_vlan_mac_h_read_unlock(sc, o);
1825
1826         p.ramrod_flags = *ramrod_flags;
1827         ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1828
1829         return ecore_config_vlan_mac(sc, &p);
1830 }
1831
1832 static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
1833                                uint32_t cid, uint8_t func_id,
1834                                void *rdata,
1835                                ecore_dma_addr_t rdata_mapping, int state,
1836                                unsigned long *pstate, ecore_obj_type type)
1837 {
1838         raw->func_id = func_id;
1839         raw->cid = cid;
1840         raw->cl_id = cl_id;
1841         raw->rdata = rdata;
1842         raw->rdata_mapping = rdata_mapping;
1843         raw->state = state;
1844         raw->pstate = pstate;
1845         raw->obj_type = type;
1846         raw->check_pending = ecore_raw_check_pending;
1847         raw->clear_pending = ecore_raw_clear_pending;
1848         raw->set_pending = ecore_raw_set_pending;
1849         raw->wait_comp = ecore_raw_wait;
1850 }
1851
1852 static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
1853                                        uint8_t cl_id, uint32_t cid,
1854                                        uint8_t func_id, void *rdata,
1855                                        ecore_dma_addr_t rdata_mapping,
1856                                        int state, unsigned long *pstate,
1857                                        ecore_obj_type type,
1858                                        struct ecore_credit_pool_obj
1859                                        *macs_pool, struct ecore_credit_pool_obj
1860                                        *vlans_pool)
1861 {
1862         ECORE_LIST_INIT(&o->head);
1863         o->head_reader = 0;
1864         o->head_exe_request = FALSE;
1865         o->saved_ramrod_flags = 0;
1866
1867         o->macs_pool = macs_pool;
1868         o->vlans_pool = vlans_pool;
1869
1870         o->delete_all = ecore_vlan_mac_del_all;
1871         o->restore = ecore_vlan_mac_restore;
1872         o->complete = ecore_complete_vlan_mac;
1873         o->wait = ecore_wait_vlan_mac;
1874
1875         ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1876                            state, pstate, type);
1877 }
1878
1879 void ecore_init_mac_obj(struct bnx2x_softc *sc,
1880                         struct ecore_vlan_mac_obj *mac_obj,
1881                         uint8_t cl_id, uint32_t cid, uint8_t func_id,
1882                         void *rdata, ecore_dma_addr_t rdata_mapping, int state,
1883                         unsigned long *pstate, ecore_obj_type type,
1884                         struct ecore_credit_pool_obj *macs_pool)
1885 {
1886         union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
1887
1888         ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1889                                    rdata_mapping, state, pstate, type,
1890                                    macs_pool, NULL);
1891
1892         /* CAM credit pool handling */
1893         mac_obj->get_credit = ecore_get_credit_mac;
1894         mac_obj->put_credit = ecore_put_credit_mac;
1895         mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
1896         mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
1897
1898         if (CHIP_IS_E1x(sc)) {
1899                 mac_obj->set_one_rule = ecore_set_one_mac_e1x;
1900                 mac_obj->check_del = ecore_check_mac_del;
1901                 mac_obj->check_add = ecore_check_mac_add;
1902                 mac_obj->check_move = ecore_check_move_always_err;
1903                 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1904
1905                 /* Exe Queue */
1906                 ecore_exe_queue_init(sc,
1907                                      &mac_obj->exe_queue, 1, qable_obj,
1908                                      ecore_validate_vlan_mac,
1909                                      ecore_remove_vlan_mac,
1910                                      ecore_optimize_vlan_mac,
1911                                      ecore_execute_vlan_mac,
1912                                      ecore_exeq_get_mac);
1913         } else {
1914                 mac_obj->set_one_rule = ecore_set_one_mac_e2;
1915                 mac_obj->check_del = ecore_check_mac_del;
1916                 mac_obj->check_add = ecore_check_mac_add;
1917                 mac_obj->check_move = ecore_check_move;
1918                 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1919                 mac_obj->get_n_elements = ecore_get_n_elements;
1920
1921                 /* Exe Queue */
1922                 ecore_exe_queue_init(sc,
1923                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1924                                      qable_obj, ecore_validate_vlan_mac,
1925                                      ecore_remove_vlan_mac,
1926                                      ecore_optimize_vlan_mac,
1927                                      ecore_execute_vlan_mac,
1928                                      ecore_exeq_get_mac);
1929         }
1930 }
1931
1932 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1933 static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct
1934                                        tstorm_eth_mac_filter_config
1935                                        *mac_filters, uint16_t pf_id)
1936 {
1937         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1938
1939         uint32_t addr = BAR_TSTRORM_INTMEM +
1940             TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
1941
1942         ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters);
1943 }
1944
1945 static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
1946                                  struct ecore_rx_mode_ramrod_params *p)
1947 {
1948         /* update the sc MAC filter structure */
1949         uint32_t mask = (1 << p->cl_id);
1950
1951         struct tstorm_eth_mac_filter_config *mac_filters =
1952             (struct tstorm_eth_mac_filter_config *)p->rdata;
1953
1954         /* initial setting is drop-all */
1955         uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
1956         uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
1957         uint8_t unmatched_unicast = 0;
1958
1959         /* In e1x there we only take into account rx accept flag since tx switching
1960          * isn't enabled. */
1961         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
1962                 /* accept matched ucast */
1963                 drop_all_ucast = 0;
1964
1965         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
1966                 /* accept matched mcast */
1967                 drop_all_mcast = 0;
1968
1969         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
1970                 /* accept all mcast */
1971                 drop_all_ucast = 0;
1972                 accp_all_ucast = 1;
1973         }
1974         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
1975                 /* accept all mcast */
1976                 drop_all_mcast = 0;
1977                 accp_all_mcast = 1;
1978         }
1979         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
1980                 /* accept (all) bcast */
1981                 accp_all_bcast = 1;
1982         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
1983                 /* accept unmatched unicasts */
1984                 unmatched_unicast = 1;
1985
1986         mac_filters->ucast_drop_all = drop_all_ucast ?
1987             mac_filters->ucast_drop_all | mask :
1988             mac_filters->ucast_drop_all & ~mask;
1989
1990         mac_filters->mcast_drop_all = drop_all_mcast ?
1991             mac_filters->mcast_drop_all | mask :
1992             mac_filters->mcast_drop_all & ~mask;
1993
1994         mac_filters->ucast_accept_all = accp_all_ucast ?
1995             mac_filters->ucast_accept_all | mask :
1996             mac_filters->ucast_accept_all & ~mask;
1997
1998         mac_filters->mcast_accept_all = accp_all_mcast ?
1999             mac_filters->mcast_accept_all | mask :
2000             mac_filters->mcast_accept_all & ~mask;
2001
2002         mac_filters->bcast_accept_all = accp_all_bcast ?
2003             mac_filters->bcast_accept_all | mask :
2004             mac_filters->bcast_accept_all & ~mask;
2005
2006         mac_filters->unmatched_unicast = unmatched_unicast ?
2007             mac_filters->unmatched_unicast | mask :
2008             mac_filters->unmatched_unicast & ~mask;
2009
2010         ECORE_MSG("drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
2011                   "accp_mcast 0x%xaccp_bcast 0x%x",
2012                   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2013                   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2014                   mac_filters->bcast_accept_all);
2015
2016         /* write the MAC filter structure */
2017         __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2018
2019         /* The operation is completed */
2020         ECORE_CLEAR_BIT(p->state, p->pstate);
2021         ECORE_SMP_MB_AFTER_CLEAR_BIT();
2022
2023         return ECORE_SUCCESS;
2024 }
2025
2026 /* Setup ramrod data */
2027 static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header
2028                                            *hdr, uint8_t rule_cnt)
2029 {
2030         hdr->echo = ECORE_CPU_TO_LE32(cid);
2031         hdr->rule_cnt = rule_cnt;
2032 }
2033
2034 static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd
2035                                            *cmd, int clear_accept_all)
2036 {
2037         uint16_t state;
2038
2039         /* start with 'drop-all' */
2040         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2041             ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2042
2043         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2044                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2045
2046         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2047                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2048
2049         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2050                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2051                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2052         }
2053
2054         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2055                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2056                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2057         }
2058         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2059                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2060
2061         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2062                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2063                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2064         }
2065         if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2066                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2067
2068         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2069         if (clear_accept_all) {
2070                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2071                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2072                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2073                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2074         }
2075
2076         cmd->state = ECORE_CPU_TO_LE16(state);
2077 }
2078
2079 static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
2080                                 struct ecore_rx_mode_ramrod_params *p)
2081 {
2082         struct eth_filter_rules_ramrod_data *data = p->rdata;
2083         int rc;
2084         uint8_t rule_idx = 0;
2085
2086         /* Reset the ramrod data buffer */
2087         ECORE_MEMSET(data, 0, sizeof(*data));
2088
2089         /* Setup ramrod data */
2090
2091         /* Tx (internal switching) */
2092         if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2093                 data->rules[rule_idx].client_id = p->cl_id;
2094                 data->rules[rule_idx].func_id = p->func_id;
2095
2096                 data->rules[rule_idx].cmd_general_data =
2097                     ETH_FILTER_RULES_CMD_TX_CMD;
2098
2099                 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2100                                                &(data->rules[rule_idx++]),
2101                                                FALSE);
2102         }
2103
2104         /* Rx */
2105         if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2106                 data->rules[rule_idx].client_id = p->cl_id;
2107                 data->rules[rule_idx].func_id = p->func_id;
2108
2109                 data->rules[rule_idx].cmd_general_data =
2110                     ETH_FILTER_RULES_CMD_RX_CMD;
2111
2112                 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2113                                                &(data->rules[rule_idx++]),
2114                                                FALSE);
2115         }
2116
2117         /* If FCoE Queue configuration has been requested configure the Rx and
2118          * internal switching modes for this queue in separate rules.
2119          *
2120          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2121          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2122          */
2123         if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2124                 /*  Tx (internal switching) */
2125                 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2126                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2127                         data->rules[rule_idx].func_id = p->func_id;
2128
2129                         data->rules[rule_idx].cmd_general_data =
2130                             ETH_FILTER_RULES_CMD_TX_CMD;
2131
2132                         ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2133                                                        &(data->rules
2134                                                          [rule_idx++]), TRUE);
2135                 }
2136
2137                 /* Rx */
2138                 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2139                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2140                         data->rules[rule_idx].func_id = p->func_id;
2141
2142                         data->rules[rule_idx].cmd_general_data =
2143                             ETH_FILTER_RULES_CMD_RX_CMD;
2144
2145                         ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2146                                                        &(data->rules
2147                                                          [rule_idx++]), TRUE);
2148                 }
2149         }
2150
2151         /* Set the ramrod header (most importantly - number of rules to
2152          * configure).
2153          */
2154         ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2155
2156         ECORE_MSG
2157             ("About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
2158              data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
2159
2160         /* No need for an explicit memory barrier here as long we would
2161          * need to ensure the ordering of writing to the SPQ element
2162          * and updating of the SPQ producer which involves a memory
2163          * read and we will have to put a full memory barrier there
2164          * (inside ecore_sp_post()).
2165          */
2166
2167         /* Send a ramrod */
2168         rc = ecore_sp_post(sc,
2169                            RAMROD_CMD_ID_ETH_FILTER_RULES,
2170                            p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE);
2171         if (rc)
2172                 return rc;
2173
2174         /* Ramrod completion is pending */
2175         return ECORE_PENDING;
2176 }
2177
2178 static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc,
2179                                       struct ecore_rx_mode_ramrod_params *p)
2180 {
2181         return ecore_state_wait(sc, p->state, p->pstate);
2182 }
2183
2184 static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc,
2185                                     __rte_unused struct
2186                                     ecore_rx_mode_ramrod_params *p)
2187 {
2188         /* Do nothing */
2189         return ECORE_SUCCESS;
2190 }
2191
2192 int ecore_config_rx_mode(struct bnx2x_softc *sc,
2193                          struct ecore_rx_mode_ramrod_params *p)
2194 {
2195         int rc;
2196
2197         /* Configure the new classification in the chip */
2198         if (p->rx_mode_obj->config_rx_mode) {
2199                 rc = p->rx_mode_obj->config_rx_mode(sc, p);
2200                 if (rc < 0)
2201                         return rc;
2202
2203                 /* Wait for a ramrod completion if was requested */
2204                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2205                         rc = p->rx_mode_obj->wait_comp(sc, p);
2206                         if (rc)
2207                                 return rc;
2208                 }
2209         } else {
2210                 ECORE_MSG("ERROR: config_rx_mode is NULL");
2211                 return -1;
2212         }
2213
2214         return rc;
2215 }
2216
2217 void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o)
2218 {
2219         if (CHIP_IS_E1x(sc)) {
2220                 o->wait_comp = ecore_empty_rx_mode_wait;
2221                 o->config_rx_mode = ecore_set_rx_mode_e1x;
2222         } else {
2223                 o->wait_comp = ecore_wait_rx_mode_comp_e2;
2224                 o->config_rx_mode = ecore_set_rx_mode_e2;
2225         }
2226 }
2227
2228 /********************* Multicast verbs: SET, CLEAR ****************************/
2229 static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac)
2230 {
2231         return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2232 }
2233
2234 struct ecore_mcast_mac_elem {
2235         ecore_list_entry_t link;
2236         uint8_t mac[ETH_ALEN];
2237         uint8_t pad[2];         /* For a natural alignment of the following buffer */
2238 };
2239
2240 struct ecore_pending_mcast_cmd {
2241         ecore_list_entry_t link;
2242         int type;               /* ECORE_MCAST_CMD_X */
2243         union {
2244                 ecore_list_t macs_head;
2245                 uint32_t macs_num;      /* Needed for DEL command */
2246                 int next_bin;   /* Needed for RESTORE flow with aprox match */
2247         } data;
2248
2249         int done;               /* set to TRUE, when the command has been handled,
2250                                  * practically used in 57712 handling only, where one pending
2251                                  * command may be handled in a few operations. As long as for
2252                                  * other chips every operation handling is completed in a
2253                                  * single ramrod, there is no need to utilize this field.
2254                                  */
2255 };
2256
2257 static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o)
2258 {
2259         if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2260             o->raw.wait_comp(sc, &o->raw))
2261                 return ECORE_TIMEOUT;
2262
2263         return ECORE_SUCCESS;
2264 }
2265
2266 static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
2267                                    struct ecore_mcast_obj *o,
2268                                    struct ecore_mcast_ramrod_params *p,
2269                                    enum ecore_mcast_cmd cmd)
2270 {
2271         int total_sz;
2272         struct ecore_pending_mcast_cmd *new_cmd;
2273         struct ecore_mcast_mac_elem *cur_mac = NULL;
2274         struct ecore_mcast_list_elem *pos;
2275         int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2276                              p->mcast_list_len : 0);
2277
2278         /* If the command is empty ("handle pending commands only"), break */
2279         if (!p->mcast_list_len)
2280                 return ECORE_SUCCESS;
2281
2282         total_sz = sizeof(*new_cmd) +
2283             macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2284
2285         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2286         new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2287
2288         if (!new_cmd)
2289                 return ECORE_NOMEM;
2290
2291         ECORE_MSG("About to enqueue a new %d command. macs_list_len=%d",
2292                   cmd, macs_list_len);
2293
2294         ECORE_LIST_INIT(&new_cmd->data.macs_head);
2295
2296         new_cmd->type = cmd;
2297         new_cmd->done = FALSE;
2298
2299         switch (cmd) {
2300         case ECORE_MCAST_CMD_ADD:
2301                 cur_mac = (struct ecore_mcast_mac_elem *)
2302                     ((uint8_t *) new_cmd + sizeof(*new_cmd));
2303
2304                 /* Push the MACs of the current command into the pending command
2305                  * MACs list: FIFO
2306                  */
2307                 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2308                                           struct ecore_mcast_list_elem) {
2309                         ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2310                         ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2311                                              &new_cmd->data.macs_head);
2312                         cur_mac++;
2313                 }
2314
2315                 break;
2316
2317         case ECORE_MCAST_CMD_DEL:
2318                 new_cmd->data.macs_num = p->mcast_list_len;
2319                 break;
2320
2321         case ECORE_MCAST_CMD_RESTORE:
2322                 new_cmd->data.next_bin = 0;
2323                 break;
2324
2325         default:
2326                 ECORE_FREE(sc, new_cmd, total_sz);
2327                 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2328                 return ECORE_INVAL;
2329         }
2330
2331         /* Push the new pending command to the tail of the pending list: FIFO */
2332         ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2333
2334         o->set_sched(o);
2335
2336         return ECORE_PENDING;
2337 }
2338
2339 /**
2340  * ecore_mcast_get_next_bin - get the next set bin (index)
2341  *
2342  * @o:
2343  * @last:       index to start looking from (including)
2344  *
2345  * returns the next found (set) bin or a negative value if none is found.
2346  */
2347 static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2348 {
2349         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2350
2351         for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2352                 if (o->registry.aprox_match.vec[i])
2353                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2354                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2355                                 if (BIT_VEC64_TEST_BIT
2356                                     (o->registry.aprox_match.vec, cur_bit)) {
2357                                         return cur_bit;
2358                                 }
2359                         }
2360                 inner_start = 0;
2361         }
2362
2363         /* None found */
2364         return -1;
2365 }
2366
2367 /**
2368  * ecore_mcast_clear_first_bin - find the first set bin and clear it
2369  *
2370  * @o:
2371  *
2372  * returns the index of the found bin or -1 if none is found
2373  */
2374 static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2375 {
2376         int cur_bit = ecore_mcast_get_next_bin(o, 0);
2377
2378         if (cur_bit >= 0)
2379                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2380
2381         return cur_bit;
2382 }
2383
2384 static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2385 {
2386         struct ecore_raw_obj *raw = &o->raw;
2387         uint8_t rx_tx_flag = 0;
2388
2389         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2390             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2391                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2392
2393         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2394             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2395                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2396
2397         return rx_tx_flag;
2398 }
2399
2400 static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
2401                                         struct ecore_mcast_obj *o, int idx,
2402                                         union ecore_mcast_config_data *cfg_data,
2403                                         enum ecore_mcast_cmd cmd)
2404 {
2405         struct ecore_raw_obj *r = &o->raw;
2406         struct eth_multicast_rules_ramrod_data *data =
2407             (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2408         uint8_t func_id = r->func_id;
2409         uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2410         int bin;
2411
2412         if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2413                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2414
2415         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2416
2417         /* Get a bin and update a bins' vector */
2418         switch (cmd) {
2419         case ECORE_MCAST_CMD_ADD:
2420                 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2421                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2422                 break;
2423
2424         case ECORE_MCAST_CMD_DEL:
2425                 /* If there were no more bins to clear
2426                  * (ecore_mcast_clear_first_bin() returns -1) then we would
2427                  * clear any (0xff) bin.
2428                  * See ecore_mcast_validate_e2() for explanation when it may
2429                  * happen.
2430                  */
2431                 bin = ecore_mcast_clear_first_bin(o);
2432                 break;
2433
2434         case ECORE_MCAST_CMD_RESTORE:
2435                 bin = cfg_data->bin;
2436                 break;
2437
2438         default:
2439                 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2440                 return;
2441         }
2442
2443         ECORE_MSG("%s bin %d",
2444                   ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2445                    "Setting" : "Clearing"), bin);
2446
2447         data->rules[idx].bin_id = (uint8_t) bin;
2448         data->rules[idx].func_id = func_id;
2449         data->rules[idx].engine_id = o->engine_id;
2450 }
2451
2452 /**
2453  * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2454  *
2455  * @sc:         device handle
2456  * @o:
2457  * @start_bin:  index in the registry to start from (including)
2458  * @rdata_idx:  index in the ramrod data to start from
2459  *
2460  * returns last handled bin index or -1 if all bins have been handled
2461  */
2462 static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
2463                                              struct ecore_mcast_obj *o,
2464                                              int start_bin, int *rdata_idx)
2465 {
2466         int cur_bin, cnt = *rdata_idx;
2467         union ecore_mcast_config_data cfg_data = { NULL };
2468
2469         /* go through the registry and configure the bins from it */
2470         for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2471              cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2472
2473                 cfg_data.bin = (uint8_t) cur_bin;
2474                 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE);
2475
2476                 cnt++;
2477
2478                 ECORE_MSG("About to configure a bin %d", cur_bin);
2479
2480                 /* Break if we reached the maximum number
2481                  * of rules.
2482                  */
2483                 if (cnt >= o->max_cmd_len)
2484                         break;
2485         }
2486
2487         *rdata_idx = cnt;
2488
2489         return cur_bin;
2490 }
2491
2492 static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
2493                                            struct ecore_mcast_obj *o,
2494                                            struct ecore_pending_mcast_cmd
2495                                            *cmd_pos, int *line_idx)
2496 {
2497         struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2498         int cnt = *line_idx;
2499         union ecore_mcast_config_data cfg_data = { NULL };
2500
2501         ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2502                                        &cmd_pos->data.macs_head, link,
2503                                        struct ecore_mcast_mac_elem) {
2504
2505                 cfg_data.mac = &pmac_pos->mac[0];
2506                 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2507
2508                 cnt++;
2509
2510                 ECORE_MSG
2511                     ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2512                      pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
2513                      pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2514
2515                 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2516                                         &cmd_pos->data.macs_head);
2517
2518                 /* Break if we reached the maximum number
2519                  * of rules.
2520                  */
2521                 if (cnt >= o->max_cmd_len)
2522                         break;
2523         }
2524
2525         *line_idx = cnt;
2526
2527         /* if no more MACs to configure - we are done */
2528         if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2529                 cmd_pos->done = TRUE;
2530 }
2531
2532 static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
2533                                            struct ecore_mcast_obj *o,
2534                                            struct ecore_pending_mcast_cmd
2535                                            *cmd_pos, int *line_idx)
2536 {
2537         int cnt = *line_idx;
2538
2539         while (cmd_pos->data.macs_num) {
2540                 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2541
2542                 cnt++;
2543
2544                 cmd_pos->data.macs_num--;
2545
2546                 ECORE_MSG("Deleting MAC. %d left,cnt is %d",
2547                           cmd_pos->data.macs_num, cnt);
2548
2549                 /* Break if we reached the maximum
2550                  * number of rules.
2551                  */
2552                 if (cnt >= o->max_cmd_len)
2553                         break;
2554         }
2555
2556         *line_idx = cnt;
2557
2558         /* If we cleared all bins - we are done */
2559         if (!cmd_pos->data.macs_num)
2560                 cmd_pos->done = TRUE;
2561 }
2562
2563 static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc,
2564                                                struct ecore_mcast_obj *o, struct
2565                                                ecore_pending_mcast_cmd
2566                                                *cmd_pos, int *line_idx)
2567 {
2568         cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2569                                                 line_idx);
2570
2571         if (cmd_pos->data.next_bin < 0)
2572                 /* If o->set_restore returned -1 we are done */
2573                 cmd_pos->done = TRUE;
2574         else
2575                 /* Start from the next bin next time */
2576                 cmd_pos->data.next_bin++;
2577 }
2578
2579 static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
2580                                               ecore_mcast_ramrod_params
2581                                               *p)
2582 {
2583         struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2584         int cnt = 0;
2585         struct ecore_mcast_obj *o = p->mcast_obj;
2586
2587         ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
2588                                        &o->pending_cmds_head, link,
2589                                        struct ecore_pending_mcast_cmd) {
2590                 switch (cmd_pos->type) {
2591                 case ECORE_MCAST_CMD_ADD:
2592                         ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
2593                         break;
2594
2595                 case ECORE_MCAST_CMD_DEL:
2596                         ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
2597                         break;
2598
2599                 case ECORE_MCAST_CMD_RESTORE:
2600                         ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
2601                                                            &cnt);
2602                         break;
2603
2604                 default:
2605                         PMD_DRV_LOG(ERR, "Unknown command: %d", cmd_pos->type);
2606                         return ECORE_INVAL;
2607                 }
2608
2609                 /* If the command has been completed - remove it from the list
2610                  * and free the memory
2611                  */
2612                 if (cmd_pos->done) {
2613                         ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
2614                                                 &o->pending_cmds_head);
2615                         ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
2616                 }
2617
2618                 /* Break if we reached the maximum number of rules */
2619                 if (cnt >= o->max_cmd_len)
2620                         break;
2621         }
2622
2623         return cnt;
2624 }
2625
2626 static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
2627                                 struct ecore_mcast_obj *o,
2628                                 struct ecore_mcast_ramrod_params *p,
2629                                 int *line_idx)
2630 {
2631         struct ecore_mcast_list_elem *mlist_pos;
2632         union ecore_mcast_config_data cfg_data = { NULL };
2633         int cnt = *line_idx;
2634
2635         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2636                                   struct ecore_mcast_list_elem) {
2637                 cfg_data.mac = mlist_pos->mac;
2638                 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
2639
2640                 cnt++;
2641
2642                 ECORE_MSG
2643                     ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2644                      mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2645                      mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
2646         }
2647
2648         *line_idx = cnt;
2649 }
2650
2651 static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
2652                                 struct ecore_mcast_obj *o,
2653                                 struct ecore_mcast_ramrod_params *p,
2654                                 int *line_idx)
2655 {
2656         int cnt = *line_idx, i;
2657
2658         for (i = 0; i < p->mcast_list_len; i++) {
2659                 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
2660
2661                 cnt++;
2662
2663                 ECORE_MSG("Deleting MAC. %d left", p->mcast_list_len - i - 1);
2664         }
2665
2666         *line_idx = cnt;
2667 }
2668
2669 /**
2670  * ecore_mcast_handle_current_cmd -
2671  *
2672  * @sc:         device handle
2673  * @p:
2674  * @cmd:
2675  * @start_cnt:  first line in the ramrod data that may be used
2676  *
2677  * This function is called if there is enough place for the current command in
2678  * the ramrod data.
2679  * Returns number of lines filled in the ramrod data in total.
2680  */
2681 static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
2682                                           ecore_mcast_ramrod_params *p,
2683                                           enum ecore_mcast_cmd cmd,
2684                                           int start_cnt)
2685 {
2686         struct ecore_mcast_obj *o = p->mcast_obj;
2687         int cnt = start_cnt;
2688
2689         ECORE_MSG("p->mcast_list_len=%d", p->mcast_list_len);
2690
2691         switch (cmd) {
2692         case ECORE_MCAST_CMD_ADD:
2693                 ecore_mcast_hdl_add(sc, o, p, &cnt);
2694                 break;
2695
2696         case ECORE_MCAST_CMD_DEL:
2697                 ecore_mcast_hdl_del(sc, o, p, &cnt);
2698                 break;
2699
2700         case ECORE_MCAST_CMD_RESTORE:
2701                 o->hdl_restore(sc, o, 0, &cnt);
2702                 break;
2703
2704         default:
2705                 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2706                 return ECORE_INVAL;
2707         }
2708
2709         /* The current command has been handled */
2710         p->mcast_list_len = 0;
2711
2712         return cnt;
2713 }
2714
2715 static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
2716                                    struct ecore_mcast_ramrod_params *p,
2717                                    enum ecore_mcast_cmd cmd)
2718 {
2719         struct ecore_mcast_obj *o = p->mcast_obj;
2720         int reg_sz = o->get_registry_size(o);
2721
2722         switch (cmd) {
2723                 /* DEL command deletes all currently configured MACs */
2724         case ECORE_MCAST_CMD_DEL:
2725                 o->set_registry_size(o, 0);
2726                 /* fall-through */
2727
2728                 /* RESTORE command will restore the entire multicast configuration */
2729         case ECORE_MCAST_CMD_RESTORE:
2730                 /* Here we set the approximate amount of work to do, which in
2731                  * fact may be only less as some MACs in postponed ADD
2732                  * command(s) scheduled before this command may fall into
2733                  * the same bin and the actual number of bins set in the
2734                  * registry would be less than we estimated here. See
2735                  * ecore_mcast_set_one_rule_e2() for further details.
2736                  */
2737                 p->mcast_list_len = reg_sz;
2738                 break;
2739
2740         case ECORE_MCAST_CMD_ADD:
2741         case ECORE_MCAST_CMD_CONT:
2742                 /* Here we assume that all new MACs will fall into new bins.
2743                  * However we will correct the real registry size after we
2744                  * handle all pending commands.
2745                  */
2746                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2747                 break;
2748
2749         default:
2750                 PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
2751                 return ECORE_INVAL;
2752         }
2753
2754         /* Increase the total number of MACs pending to be configured */
2755         o->total_pending_num += p->mcast_list_len;
2756
2757         return ECORE_SUCCESS;
2758 }
2759
2760 static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc,
2761                                   struct ecore_mcast_ramrod_params *p,
2762                                   int old_num_bins)
2763 {
2764         struct ecore_mcast_obj *o = p->mcast_obj;
2765
2766         o->set_registry_size(o, old_num_bins);
2767         o->total_pending_num -= p->mcast_list_len;
2768 }
2769
2770 /**
2771  * ecore_mcast_set_rdata_hdr_e2 - sets a header values
2772  *
2773  * @sc:         device handle
2774  * @p:
2775  * @len:        number of rules to handle
2776  */
2777 static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc
2778                                          *sc, struct ecore_mcast_ramrod_params
2779                                          *p, uint8_t len)
2780 {
2781         struct ecore_raw_obj *r = &p->mcast_obj->raw;
2782         struct eth_multicast_rules_ramrod_data *data =
2783             (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2784
2785         data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
2786                                               (ECORE_FILTER_MCAST_PENDING <<
2787                                                ECORE_SWCID_SHIFT));
2788         data->header.rule_cnt = len;
2789 }
2790
2791 /**
2792  * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2793  *
2794  * @sc:         device handle
2795  * @o:
2796  *
2797  * Recalculate the actual number of set bins in the registry using Brian
2798  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2799  */
2800 static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o)
2801 {
2802         int i, cnt = 0;
2803         uint64_t elem;
2804
2805         for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
2806                 elem = o->registry.aprox_match.vec[i];
2807                 for (; elem; cnt++)
2808                         elem &= elem - 1;
2809         }
2810
2811         o->set_registry_size(o, cnt);
2812
2813         return ECORE_SUCCESS;
2814 }
2815
2816 static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
2817                                 struct ecore_mcast_ramrod_params *p,
2818                                 enum ecore_mcast_cmd cmd)
2819 {
2820         struct ecore_raw_obj *raw = &p->mcast_obj->raw;
2821         struct ecore_mcast_obj *o = p->mcast_obj;
2822         struct eth_multicast_rules_ramrod_data *data =
2823             (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2824         int cnt = 0, rc;
2825
2826         /* Reset the ramrod data buffer */
2827         ECORE_MEMSET(data, 0, sizeof(*data));
2828
2829         cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
2830
2831         /* If there are no more pending commands - clear SCHEDULED state */
2832         if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
2833                 o->clear_sched(o);
2834
2835         /* The below may be TRUE if there was enough room in ramrod
2836          * data for all pending commands and for the current
2837          * command. Otherwise the current command would have been added
2838          * to the pending commands and p->mcast_list_len would have been
2839          * zeroed.
2840          */
2841         if (p->mcast_list_len > 0)
2842                 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
2843
2844         /* We've pulled out some MACs - update the total number of
2845          * outstanding.
2846          */
2847         o->total_pending_num -= cnt;
2848
2849         /* send a ramrod */
2850         ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
2851         ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
2852
2853         ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt);
2854
2855         /* Update a registry size if there are no more pending operations.
2856          *
2857          * We don't want to change the value of the registry size if there are
2858          * pending operations because we want it to always be equal to the
2859          * exact or the approximate number (see ecore_mcast_validate_e2()) of
2860          * set bins after the last requested operation in order to properly
2861          * evaluate the size of the next DEL/RESTORE operation.
2862          *
2863          * Note that we update the registry itself during command(s) handling
2864          * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
2865          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2866          * with a limited amount of update commands (per MAC/bin) and we don't
2867          * know in this scope what the actual state of bins configuration is
2868          * going to be after this ramrod.
2869          */
2870         if (!o->total_pending_num)
2871                 ecore_mcast_refresh_registry_e2(o);
2872
2873         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2874          * RAMROD_PENDING status immediately.
2875          */
2876         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2877                 raw->clear_pending(raw);
2878                 return ECORE_SUCCESS;
2879         } else {
2880                 /* No need for an explicit memory barrier here as long we would
2881                  * need to ensure the ordering of writing to the SPQ element
2882                  * and updating of the SPQ producer which involves a memory
2883                  * read and we will have to put a full memory barrier there
2884                  * (inside ecore_sp_post()).
2885                  */
2886
2887                 /* Send a ramrod */
2888                 rc = ecore_sp_post(sc,
2889                                    RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2890                                    raw->cid,
2891                                    raw->rdata_mapping, ETH_CONNECTION_TYPE);
2892                 if (rc)
2893                         return rc;
2894
2895                 /* Ramrod completion is pending */
2896                 return ECORE_PENDING;
2897         }
2898 }
2899
2900 static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc,
2901                                     struct ecore_mcast_ramrod_params *p,
2902                                     enum ecore_mcast_cmd cmd)
2903 {
2904         /* Mark, that there is a work to do */
2905         if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
2906                 p->mcast_list_len = 1;
2907
2908         return ECORE_SUCCESS;
2909 }
2910
2911 static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc,
2912                                    __rte_unused struct ecore_mcast_ramrod_params
2913                                    *p, __rte_unused int old_num_bins)
2914 {
2915         /* Do nothing */
2916 }
2917
2918 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
2919 do { \
2920         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2921 } while (0)
2922
2923 static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
2924                                     struct ecore_mcast_obj *o,
2925                                     struct ecore_mcast_ramrod_params *p,
2926                                     uint32_t * mc_filter)
2927 {
2928         struct ecore_mcast_list_elem *mlist_pos;
2929         int bit;
2930
2931         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2932                                   struct ecore_mcast_list_elem) {
2933                 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
2934                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2935
2936                 ECORE_MSG
2937                     ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
2938                      mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2939                      mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
2940                      bit);
2941
2942                 /* bookkeeping... */
2943                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit);
2944         }
2945 }
2946
2947 static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
2948                                         __rte_unused,
2949                                         struct ecore_mcast_obj *o,
2950                                         uint32_t * mc_filter)
2951 {
2952         int bit;
2953
2954         for (bit = ecore_mcast_get_next_bin(o, 0);
2955              bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
2956                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2957                 ECORE_MSG("About to set bin %d", bit);
2958         }
2959 }
2960
2961 /* On 57711 we write the multicast MACs' approximate match
2962  * table by directly into the TSTORM's internal RAM. So we don't
2963  * really need to handle any tricks to make it work.
2964  */
2965 static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
2966                                  struct ecore_mcast_ramrod_params *p,
2967                                  enum ecore_mcast_cmd cmd)
2968 {
2969         int i;
2970         struct ecore_mcast_obj *o = p->mcast_obj;
2971         struct ecore_raw_obj *r = &o->raw;
2972
2973         /* If CLEAR_ONLY has been requested - clear the registry
2974          * and clear a pending bit.
2975          */
2976         if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2977                 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 };
2978
2979                 /* Set the multicast filter bits before writing it into
2980                  * the internal memory.
2981                  */
2982                 switch (cmd) {
2983                 case ECORE_MCAST_CMD_ADD:
2984                         ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
2985                         break;
2986
2987                 case ECORE_MCAST_CMD_DEL:
2988                         ECORE_MSG("Invalidating multicast MACs configuration");
2989
2990                         /* clear the registry */
2991                         ECORE_MEMSET(o->registry.aprox_match.vec, 0,
2992                                      sizeof(o->registry.aprox_match.vec));
2993                         break;
2994
2995                 case ECORE_MCAST_CMD_RESTORE:
2996                         ecore_mcast_hdl_restore_e1h(sc, o, mc_filter);
2997                         break;
2998
2999                 default:
3000                         PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
3001                         return ECORE_INVAL;
3002                 }
3003
3004                 /* Set the mcast filter in the internal memory */
3005                 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3006                         REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3007         } else
3008                 /* clear the registry */
3009                 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3010                              sizeof(o->registry.aprox_match.vec));
3011
3012         /* We are done */
3013         r->clear_pending(r);
3014
3015         return ECORE_SUCCESS;
3016 }
3017
3018 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3019 {
3020         return o->registry.aprox_match.num_bins_set;
3021 }
3022
3023 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3024                                                 int n)
3025 {
3026         o->registry.aprox_match.num_bins_set = n;
3027 }
3028
3029 int ecore_config_mcast(struct bnx2x_softc *sc,
3030                        struct ecore_mcast_ramrod_params *p,
3031                        enum ecore_mcast_cmd cmd)
3032 {
3033         struct ecore_mcast_obj *o = p->mcast_obj;
3034         struct ecore_raw_obj *r = &o->raw;
3035         int rc = 0, old_reg_size;
3036
3037         /* This is needed to recover number of currently configured mcast macs
3038          * in case of failure.
3039          */
3040         old_reg_size = o->get_registry_size(o);
3041
3042         /* Do some calculations and checks */
3043         rc = o->validate(sc, p, cmd);
3044         if (rc)
3045                 return rc;
3046
3047         /* Return if there is no work to do */
3048         if ((!p->mcast_list_len) && (!o->check_sched(o)))
3049                 return ECORE_SUCCESS;
3050
3051         ECORE_MSG
3052             ("o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
3053              o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3054
3055         /* Enqueue the current command to the pending list if we can't complete
3056          * it in the current iteration
3057          */
3058         if (r->check_pending(r) ||
3059             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3060                 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3061                 if (rc < 0)
3062                         goto error_exit1;
3063
3064                 /* As long as the current command is in a command list we
3065                  * don't need to handle it separately.
3066                  */
3067                 p->mcast_list_len = 0;
3068         }
3069
3070         if (!r->check_pending(r)) {
3071
3072                 /* Set 'pending' state */
3073                 r->set_pending(r);
3074
3075                 /* Configure the new classification in the chip */
3076                 rc = o->config_mcast(sc, p, cmd);
3077                 if (rc < 0)
3078                         goto error_exit2;
3079
3080                 /* Wait for a ramrod completion if was requested */
3081                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3082                         rc = o->wait_comp(sc, o);
3083         }
3084
3085         return rc;
3086
3087 error_exit2:
3088         r->clear_pending(r);
3089
3090 error_exit1:
3091         o->revert(sc, p, old_reg_size);
3092
3093         return rc;
3094 }
3095
3096 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3097 {
3098         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3099         ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3100         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3101 }
3102
3103 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3104 {
3105         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3106         ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3107         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3108 }
3109
3110 static int ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3111 {
3112         return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3113 }
3114
3115 static int ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3116 {
3117         return o->raw.check_pending(&o->raw) || o->check_sched(o);
3118 }
3119
3120 void ecore_init_mcast_obj(struct bnx2x_softc *sc,
3121                           struct ecore_mcast_obj *mcast_obj,
3122                           uint8_t mcast_cl_id, uint32_t mcast_cid,
3123                           uint8_t func_id, uint8_t engine_id, void *rdata,
3124                           ecore_dma_addr_t rdata_mapping, int state,
3125                           unsigned long *pstate, ecore_obj_type type)
3126 {
3127         ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3128
3129         ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3130                            rdata, rdata_mapping, state, pstate, type);
3131
3132         mcast_obj->engine_id = engine_id;
3133
3134         ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3135
3136         mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3137         mcast_obj->check_sched = ecore_mcast_check_sched;
3138         mcast_obj->set_sched = ecore_mcast_set_sched;
3139         mcast_obj->clear_sched = ecore_mcast_clear_sched;
3140
3141         if (CHIP_IS_E1H(sc)) {
3142                 mcast_obj->config_mcast = ecore_mcast_setup_e1h;
3143                 mcast_obj->enqueue_cmd = NULL;
3144                 mcast_obj->hdl_restore = NULL;
3145                 mcast_obj->check_pending = ecore_mcast_check_pending;
3146
3147                 /* 57711 doesn't send a ramrod, so it has unlimited credit
3148                  * for one command.
3149                  */
3150                 mcast_obj->max_cmd_len = -1;
3151                 mcast_obj->wait_comp = ecore_mcast_wait;
3152                 mcast_obj->set_one_rule = NULL;
3153                 mcast_obj->validate = ecore_mcast_validate_e1h;
3154                 mcast_obj->revert = ecore_mcast_revert_e1h;
3155                 mcast_obj->get_registry_size =
3156                     ecore_mcast_get_registry_size_aprox;
3157                 mcast_obj->set_registry_size =
3158                     ecore_mcast_set_registry_size_aprox;
3159         } else {
3160                 mcast_obj->config_mcast = ecore_mcast_setup_e2;
3161                 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
3162                 mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2;
3163                 mcast_obj->check_pending = ecore_mcast_check_pending;
3164                 mcast_obj->max_cmd_len = 16;
3165                 mcast_obj->wait_comp = ecore_mcast_wait;
3166                 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
3167                 mcast_obj->validate = ecore_mcast_validate_e2;
3168                 mcast_obj->revert = ecore_mcast_revert_e2;
3169                 mcast_obj->get_registry_size =
3170                     ecore_mcast_get_registry_size_aprox;
3171                 mcast_obj->set_registry_size =
3172                     ecore_mcast_set_registry_size_aprox;
3173         }
3174 }
3175
3176 /*************************** Credit handling **********************************/
3177
3178 /**
3179  * atomic_add_ifless - add if the result is less than a given value.
3180  *
3181  * @v:  pointer of type ecore_atomic_t
3182  * @a:  the amount to add to v...
3183  * @u:  ...if (v + a) is less than u.
3184  *
3185  * returns TRUE if (v + a) was less than u, and FALSE otherwise.
3186  *
3187  */
3188 static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u)
3189 {
3190         int c, old;
3191
3192         c = ECORE_ATOMIC_READ(v);
3193         for (;;) {
3194                 if (ECORE_UNLIKELY(c + a >= u))
3195                         return FALSE;
3196
3197                 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
3198                 if (ECORE_LIKELY(old == c))
3199                         break;
3200                 c = old;
3201         }
3202
3203         return TRUE;
3204 }
3205
3206 /**
3207  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3208  *
3209  * @v:  pointer of type ecore_atomic_t
3210  * @a:  the amount to dec from v...
3211  * @u:  ...if (v - a) is more or equal than u.
3212  *
3213  * returns TRUE if (v - a) was more or equal than u, and FALSE
3214  * otherwise.
3215  */
3216 static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u)
3217 {
3218         int c, old;
3219
3220         c = ECORE_ATOMIC_READ(v);
3221         for (;;) {
3222                 if (ECORE_UNLIKELY(c - a < u))
3223                         return FALSE;
3224
3225                 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
3226                 if (ECORE_LIKELY(old == c))
3227                         break;
3228                 c = old;
3229         }
3230
3231         return TRUE;
3232 }
3233
3234 static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
3235 {
3236         int rc;
3237
3238         ECORE_SMP_MB();
3239         rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3240         ECORE_SMP_MB();
3241
3242         return rc;
3243 }
3244
3245 static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
3246 {
3247         int rc;
3248
3249         ECORE_SMP_MB();
3250
3251         /* Don't let to refill if credit + cnt > pool_sz */
3252         rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3253
3254         ECORE_SMP_MB();
3255
3256         return rc;
3257 }
3258
3259 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
3260 {
3261         int cur_credit;
3262
3263         ECORE_SMP_MB();
3264         cur_credit = ECORE_ATOMIC_READ(&o->credit);
3265
3266         return cur_credit;
3267 }
3268
3269 static int ecore_credit_pool_always_TRUE(__rte_unused struct
3270                                          ecore_credit_pool_obj *o,
3271                                          __rte_unused int cnt)
3272 {
3273         return TRUE;
3274 }
3275
3276 static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o,
3277                                        int *offset)
3278 {
3279         int idx, vec, i;
3280
3281         *offset = -1;
3282
3283         /* Find "internal cam-offset" then add to base for this object... */
3284         for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
3285
3286                 /* Skip the current vector if there are no free entries in it */
3287                 if (!o->pool_mirror[vec])
3288                         continue;
3289
3290                 /* If we've got here we are going to find a free entry */
3291                 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3292                      i < BIT_VEC64_ELEM_SZ; idx++, i++)
3293
3294                         if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3295                                 /* Got one!! */
3296                                 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3297                                 *offset = o->base_pool_offset + idx;
3298                                 return TRUE;
3299                         }
3300         }
3301
3302         return FALSE;
3303 }
3304
3305 static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o,
3306                                        int offset)
3307 {
3308         if (offset < o->base_pool_offset)
3309                 return FALSE;
3310
3311         offset -= o->base_pool_offset;
3312
3313         if (offset >= o->pool_sz)
3314                 return FALSE;
3315
3316         /* Return the entry to the pool */
3317         BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3318
3319         return TRUE;
3320 }
3321
3322 static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct
3323                                                    ecore_credit_pool_obj *o,
3324                                                    __rte_unused int offset)
3325 {
3326         return TRUE;
3327 }
3328
3329 static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct
3330                                                    ecore_credit_pool_obj *o,
3331                                                    __rte_unused int *offset)
3332 {
3333         *offset = -1;
3334         return TRUE;
3335 }
3336
3337 /**
3338  * ecore_init_credit_pool - initialize credit pool internals.
3339  *
3340  * @p:
3341  * @base:       Base entry in the CAM to use.
3342  * @credit:     pool size.
3343  *
3344  * If base is negative no CAM entries handling will be performed.
3345  * If credit is negative pool operations will always succeed (unlimited pool).
3346  *
3347  */
3348 static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
3349                                    int base, int credit)
3350 {
3351         /* Zero the object first */
3352         ECORE_MEMSET(p, 0, sizeof(*p));
3353
3354         /* Set the table to all 1s */
3355         ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3356
3357         /* Init a pool as full */
3358         ECORE_ATOMIC_SET(&p->credit, credit);
3359
3360         /* The total poll size */
3361         p->pool_sz = credit;
3362
3363         p->base_pool_offset = base;
3364
3365         /* Commit the change */
3366         ECORE_SMP_MB();
3367
3368         p->check = ecore_credit_pool_check;
3369
3370         /* if pool credit is negative - disable the checks */
3371         if (credit >= 0) {
3372                 p->put = ecore_credit_pool_put;
3373                 p->get = ecore_credit_pool_get;
3374                 p->put_entry = ecore_credit_pool_put_entry;
3375                 p->get_entry = ecore_credit_pool_get_entry;
3376         } else {
3377                 p->put = ecore_credit_pool_always_TRUE;
3378                 p->get = ecore_credit_pool_always_TRUE;
3379                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3380                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3381         }
3382
3383         /* If base is negative - disable entries handling */
3384         if (base < 0) {
3385                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3386                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3387         }
3388 }
3389
3390 void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
3391                                 struct ecore_credit_pool_obj *p,
3392                                 uint8_t func_id, uint8_t func_num)
3393 {
3394
3395 #define ECORE_CAM_SIZE_EMUL 5
3396
3397         int cam_sz;
3398
3399         if (CHIP_IS_E1H(sc)) {
3400                 /* CAM credit is equally divided between all active functions
3401                  * on the PORT!.
3402                  */
3403                 if (func_num > 0) {
3404                         if (!CHIP_REV_IS_SLOW(sc))
3405                                 cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num));
3406                         else
3407                                 cam_sz = ECORE_CAM_SIZE_EMUL;
3408                         ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
3409                 } else {
3410                         /* this should never happen! Block MAC operations. */
3411                         ecore_init_credit_pool(p, 0, 0);
3412                 }
3413
3414         } else {
3415
3416                 /*
3417                  * CAM credit is equaly divided between all active functions
3418                  * on the PATH.
3419                  */
3420                 if (func_num > 0) {
3421                         if (!CHIP_REV_IS_SLOW(sc))
3422                                 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3423                         else
3424                                 cam_sz = ECORE_CAM_SIZE_EMUL;
3425
3426                         /* No need for CAM entries handling for 57712 and
3427                          * newer.
3428                          */
3429                         ecore_init_credit_pool(p, -1, cam_sz);
3430                 } else {
3431                         /* this should never happen! Block MAC operations. */
3432                         ecore_init_credit_pool(p, 0, 0);
3433                 }
3434         }
3435 }
3436
3437 void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
3438                                  struct ecore_credit_pool_obj *p,
3439                                  uint8_t func_id, uint8_t func_num)
3440 {
3441         if (CHIP_IS_E1x(sc)) {
3442                 /* There is no VLAN credit in HW on 57711 only
3443                  * MAC / MAC-VLAN can be set
3444                  */
3445                 ecore_init_credit_pool(p, 0, -1);
3446         } else {
3447                 /* CAM credit is equally divided between all active functions
3448                  * on the PATH.
3449                  */
3450                 if (func_num > 0) {
3451                         int credit = MAX_VLAN_CREDIT_E2 / func_num;
3452                         ecore_init_credit_pool(p, func_id * credit, credit);
3453                 } else
3454                         /* this should never happen! Block VLAN operations. */
3455                         ecore_init_credit_pool(p, 0, 0);
3456         }
3457 }
3458
3459 /****************** RSS Configuration ******************/
3460
3461 /**
3462  * ecore_setup_rss - configure RSS
3463  *
3464  * @sc:         device handle
3465  * @p:          rss configuration
3466  *
3467  * sends on UPDATE ramrod for that matter.
3468  */
3469 static int ecore_setup_rss(struct bnx2x_softc *sc,
3470                            struct ecore_config_rss_params *p)
3471 {
3472         struct ecore_rss_config_obj *o = p->rss_obj;
3473         struct ecore_raw_obj *r = &o->raw;
3474         struct eth_rss_update_ramrod_data *data =
3475             (struct eth_rss_update_ramrod_data *)(r->rdata);
3476         uint8_t rss_mode = 0;
3477         int rc;
3478
3479         ECORE_MEMSET(data, 0, sizeof(*data));
3480
3481         ECORE_MSG("Configuring RSS");
3482
3483         /* Set an echo field */
3484         data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3485                                        (r->state << ECORE_SWCID_SHIFT));
3486
3487         /* RSS mode */
3488         if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
3489                 rss_mode = ETH_RSS_MODE_DISABLED;
3490         else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
3491                 rss_mode = ETH_RSS_MODE_REGULAR;
3492
3493         data->rss_mode = rss_mode;
3494
3495         ECORE_MSG("rss_mode=%d", rss_mode);
3496
3497         /* RSS capabilities */
3498         if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
3499                 data->capabilities |=
3500                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
3501
3502         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
3503                 data->capabilities |=
3504                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
3505
3506         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
3507                 data->capabilities |=
3508                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
3509
3510         if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
3511                 data->capabilities |=
3512                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
3513
3514         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
3515                 data->capabilities |=
3516                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
3517
3518         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
3519                 data->capabilities |=
3520                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
3521
3522         if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
3523                 data->udp_4tuple_dst_port_mask =
3524                     ECORE_CPU_TO_LE16(p->tunnel_mask);
3525                 data->udp_4tuple_dst_port_value =
3526                     ECORE_CPU_TO_LE16(p->tunnel_value);
3527         }
3528
3529         /* Hashing mask */
3530         data->rss_result_mask = p->rss_result_mask;
3531
3532         /* RSS engine ID */
3533         data->rss_engine_id = o->engine_id;
3534
3535         ECORE_MSG("rss_engine_id=%d", data->rss_engine_id);
3536
3537         /* Indirection table */
3538         ECORE_MEMCPY(data->indirection_table, p->ind_table,
3539                      T_ETH_INDIRECTION_TABLE_SIZE);
3540
3541         /* Remember the last configuration */
3542         ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
3543
3544         /* RSS keys */
3545         if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
3546                 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
3547                              sizeof(data->rss_key));
3548                 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
3549         }
3550
3551         /* No need for an explicit memory barrier here as long we would
3552          * need to ensure the ordering of writing to the SPQ element
3553          * and updating of the SPQ producer which involves a memory
3554          * read and we will have to put a full memory barrier there
3555          * (inside ecore_sp_post()).
3556          */
3557
3558         /* Send a ramrod */
3559         rc = ecore_sp_post(sc,
3560                            RAMROD_CMD_ID_ETH_RSS_UPDATE,
3561                            r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE);
3562
3563         if (rc < 0)
3564                 return rc;
3565
3566         return ECORE_PENDING;
3567 }
3568
3569 int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p)
3570 {
3571         int rc;
3572         struct ecore_rss_config_obj *o = p->rss_obj;
3573         struct ecore_raw_obj *r = &o->raw;
3574
3575         /* Do nothing if only driver cleanup was requested */
3576         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
3577                 return ECORE_SUCCESS;
3578
3579         r->set_pending(r);
3580
3581         rc = o->config_rss(sc, p);
3582         if (rc < 0) {
3583                 r->clear_pending(r);
3584                 return rc;
3585         }
3586
3587         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3588                 rc = r->wait_comp(sc, r);
3589
3590         return rc;
3591 }
3592
3593 void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
3594                                uint8_t cl_id, uint32_t cid, uint8_t func_id,
3595                                uint8_t engine_id, void *rdata,
3596                                ecore_dma_addr_t rdata_mapping, int state,
3597                                unsigned long *pstate, ecore_obj_type type)
3598 {
3599         ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
3600                            rdata_mapping, state, pstate, type);
3601
3602         rss_obj->engine_id = engine_id;
3603         rss_obj->config_rss = ecore_setup_rss;
3604 }
3605
3606 /********************** Queue state object ***********************************/
3607
3608 /**
3609  * ecore_queue_state_change - perform Queue state change transition
3610  *
3611  * @sc:         device handle
3612  * @params:     parameters to perform the transition
3613  *
3614  * returns 0 in case of successfully completed transition, negative error
3615  * code in case of failure, positive (EBUSY) value if there is a completion
3616  * to that is still pending (possible only if RAMROD_COMP_WAIT is
3617  * not set in params->ramrod_flags for asynchronous commands).
3618  *
3619  */
3620 int ecore_queue_state_change(struct bnx2x_softc *sc,
3621                              struct ecore_queue_state_params *params)
3622 {
3623         struct ecore_queue_sp_obj *o = params->q_obj;
3624         int rc, pending_bit;
3625         unsigned long *pending = &o->pending;
3626
3627         /* Check that the requested transition is legal */
3628         rc = o->check_transition(sc, o, params);
3629         if (rc) {
3630                 PMD_DRV_LOG(ERR, "check transition returned an error. rc %d",
3631                             rc);
3632                 return ECORE_INVAL;
3633         }
3634
3635         /* Set "pending" bit */
3636         ECORE_MSG("pending bit was=%lx", o->pending);
3637         pending_bit = o->set_pending(o, params);
3638         ECORE_MSG("pending bit now=%lx", o->pending);
3639
3640         /* Don't send a command if only driver cleanup was requested */
3641         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
3642                 o->complete_cmd(sc, o, pending_bit);
3643         else {
3644                 /* Send a ramrod */
3645                 rc = o->send_cmd(sc, params);
3646                 if (rc) {
3647                         o->next_state = ECORE_Q_STATE_MAX;
3648                         ECORE_CLEAR_BIT(pending_bit, pending);
3649                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3650                         return rc;
3651                 }
3652
3653                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
3654                         rc = o->wait_comp(sc, o, pending_bit);
3655                         if (rc)
3656                                 return rc;
3657
3658                         return ECORE_SUCCESS;
3659                 }
3660         }
3661
3662         return ECORE_RET_PENDING(pending_bit, pending);
3663 }
3664
3665 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
3666                                    struct ecore_queue_state_params *params)
3667 {
3668         enum ecore_queue_cmd cmd = params->cmd, bit;
3669
3670         /* ACTIVATE and DEACTIVATE commands are implemented on top of
3671          * UPDATE command.
3672          */
3673         if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE))
3674                 bit = ECORE_Q_CMD_UPDATE;
3675         else
3676                 bit = cmd;
3677
3678         ECORE_SET_BIT(bit, &obj->pending);
3679         return bit;
3680 }
3681
3682 static int ecore_queue_wait_comp(struct bnx2x_softc *sc,
3683                                  struct ecore_queue_sp_obj *o,
3684                                  enum ecore_queue_cmd cmd)
3685 {
3686         return ecore_state_wait(sc, cmd, &o->pending);
3687 }
3688
3689 /**
3690  * ecore_queue_comp_cmd - complete the state change command.
3691  *
3692  * @sc:         device handle
3693  * @o:
3694  * @cmd:
3695  *
3696  * Checks that the arrived completion is expected.
3697  */
3698 static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
3699                                 struct ecore_queue_sp_obj *o,
3700                                 enum ecore_queue_cmd cmd)
3701 {
3702         unsigned long cur_pending = o->pending;
3703
3704         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
3705                 PMD_DRV_LOG(ERR,
3706                             "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
3707                             cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
3708                             cur_pending, o->next_state);
3709                 return ECORE_INVAL;
3710         }
3711
3712         if (o->next_tx_only >= o->max_cos)
3713                 /* >= because tx only must always be smaller than cos since the
3714                  * primary connection supports COS 0
3715                  */
3716                 PMD_DRV_LOG(ERR,
3717                             "illegal value for next tx_only: %d. max cos was %d",
3718                             o->next_tx_only, o->max_cos);
3719
3720         ECORE_MSG("Completing command %d for queue %d, setting state to %d",
3721                   cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
3722
3723         if (o->next_tx_only)    /* print num tx-only if any exist */
3724                 ECORE_MSG("primary cid %d: num tx-only cons %d",
3725                           o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
3726
3727         o->state = o->next_state;
3728         o->num_tx_only = o->next_tx_only;
3729         o->next_state = ECORE_Q_STATE_MAX;
3730
3731         /* It's important that o->state and o->next_state are
3732          * updated before o->pending.
3733          */
3734         wmb();
3735
3736         ECORE_CLEAR_BIT(cmd, &o->pending);
3737         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3738
3739         return ECORE_SUCCESS;
3740 }
3741
3742 static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params
3743                                        *cmd_params,
3744                                        struct client_init_ramrod_data *data)
3745 {
3746         struct ecore_queue_setup_params *params = &cmd_params->params.setup;
3747
3748         /* Rx data */
3749
3750         /* IPv6 TPA supported for E2 and above only */
3751         data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
3752                                           &params->flags) *
3753             CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
3754 }
3755
3756 static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
3757                                            struct ecore_queue_sp_obj *o,
3758                                            struct ecore_general_setup_params
3759                                            *params, struct client_init_general_data
3760                                            *gen_data, unsigned long *flags)
3761 {
3762         gen_data->client_id = o->cl_id;
3763
3764         if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
3765                 gen_data->statistics_counter_id = params->stat_id;
3766                 gen_data->statistics_en_flg = 1;
3767                 gen_data->statistics_zero_flg =
3768                     ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
3769         } else
3770                 gen_data->statistics_counter_id =
3771                     DISABLE_STATISTIC_COUNTER_ID_VALUE;
3772
3773         gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags);
3774         gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags);
3775         gen_data->sp_client_id = params->spcl_id;
3776         gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
3777         gen_data->func_id = o->func_id;
3778
3779         gen_data->cos = params->cos;
3780
3781         gen_data->traffic_type =
3782             ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
3783             LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
3784
3785         ECORE_MSG("flags: active %d, cos %d, stats en %d",
3786                   gen_data->activate_flg, gen_data->cos,
3787                   gen_data->statistics_en_flg);
3788 }
3789
3790 static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params,
3791                                       struct client_init_tx_data *tx_data,
3792                                       unsigned long *flags)
3793 {
3794         tx_data->enforce_security_flg =
3795             ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
3796         tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan);
3797         tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
3798         tx_data->tx_switching_flg =
3799             ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
3800         tx_data->anti_spoofing_flg =
3801             ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
3802         tx_data->force_default_pri_flg =
3803             ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
3804         tx_data->refuse_outband_vlan_flg =
3805             ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
3806         tx_data->tunnel_non_lso_pcsum_location =
3807             ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
3808             CSUM_ON_BD;
3809
3810         tx_data->tx_status_block_id = params->fw_sb_id;
3811         tx_data->tx_sb_index_number = params->sb_cq_index;
3812         tx_data->tss_leading_client_id = params->tss_leading_cl_id;
3813
3814         tx_data->tx_bd_page_base.lo =
3815             ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3816         tx_data->tx_bd_page_base.hi =
3817             ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3818
3819         /* Don't configure any Tx switching mode during queue SETUP */
3820         tx_data->state = 0;
3821 }
3822
3823 static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params,
3824                                          struct client_init_rx_data *rx_data)
3825 {
3826         /* flow control data */
3827         rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
3828         rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
3829         rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
3830         rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
3831         rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
3832         rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
3833         rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
3834 }
3835
3836 static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params,
3837                                       struct client_init_rx_data *rx_data,
3838                                       unsigned long *flags)
3839 {
3840         rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
3841             CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
3842         rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
3843             CLIENT_INIT_RX_DATA_TPA_MODE;
3844         rx_data->vmqueue_mode_en_flg = 0;
3845
3846         rx_data->extra_data_over_sgl_en_flg =
3847             ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
3848         rx_data->cache_line_alignment_log_size = params->cache_line_log;
3849         rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
3850         rx_data->client_qzone_id = params->cl_qzone_id;
3851         rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
3852
3853         /* Always start in DROP_ALL mode */
3854         rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
3855                                            CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
3856
3857         /* We don't set drop flags */
3858         rx_data->drop_ip_cs_err_flg = 0;
3859         rx_data->drop_tcp_cs_err_flg = 0;
3860         rx_data->drop_ttl0_flg = 0;
3861         rx_data->drop_udp_cs_err_flg = 0;
3862         rx_data->inner_vlan_removal_enable_flg =
3863             ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
3864         rx_data->outer_vlan_removal_enable_flg =
3865             ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
3866         rx_data->status_block_id = params->fw_sb_id;
3867         rx_data->rx_sb_index_number = params->sb_cq_index;
3868         rx_data->max_tpa_queues = params->max_tpa_queues;
3869         rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
3870         rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3871         rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3872         rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
3873         rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
3874         rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
3875                                                  flags);
3876
3877         if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
3878                 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
3879                 rx_data->is_approx_mcast = 1;
3880         }
3881
3882         rx_data->rss_engine_id = params->rss_engine_id;
3883
3884         /* silent vlan removal */
3885         rx_data->silent_vlan_removal_flg =
3886             ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
3887         rx_data->silent_vlan_value =
3888             ECORE_CPU_TO_LE16(params->silent_removal_value);
3889         rx_data->silent_vlan_mask =
3890             ECORE_CPU_TO_LE16(params->silent_removal_mask);
3891 }
3892
3893 /* initialize the general, tx and rx parts of a queue object */
3894 static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
3895                                         *cmd_params,
3896                                         struct client_init_ramrod_data *data)
3897 {
3898         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3899                                        &cmd_params->params.setup.gen_params,
3900                                        &data->general,
3901                                        &cmd_params->params.setup.flags);
3902
3903         ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params,
3904                                   &data->tx, &cmd_params->params.setup.flags);
3905
3906         ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params,
3907                                   &data->rx, &cmd_params->params.setup.flags);
3908
3909         ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params,
3910                                      &data->rx);
3911 }
3912
3913 /* initialize the general and tx parts of a tx-only queue object */
3914 static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
3915                                        *cmd_params,
3916                                        struct tx_queue_init_ramrod_data *data)
3917 {
3918         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3919                                        &cmd_params->params.tx_only.gen_params,
3920                                        &data->general,
3921                                        &cmd_params->params.tx_only.flags);
3922
3923         ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
3924                                   &data->tx, &cmd_params->params.tx_only.flags);
3925
3926         ECORE_MSG("cid %d, tx bd page lo %x hi %x",
3927                   cmd_params->q_obj->cids[0],
3928                   data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
3929 }
3930
3931 /**
3932  * ecore_q_init - init HW/FW queue
3933  *
3934  * @sc:         device handle
3935  * @params:
3936  *
3937  * HW/FW initial Queue configuration:
3938  *      - HC: Rx and Tx
3939  *      - CDU context validation
3940  *
3941  */
3942 static int ecore_q_init(struct bnx2x_softc *sc,
3943                         struct ecore_queue_state_params *params)
3944 {
3945         struct ecore_queue_sp_obj *o = params->q_obj;
3946         struct ecore_queue_init_params *init = &params->params.init;
3947         uint16_t hc_usec;
3948         uint8_t cos;
3949
3950         /* Tx HC configuration */
3951         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
3952             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
3953                 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
3954
3955                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
3956                                                init->tx.sb_cq_index,
3957                                                !ECORE_TEST_BIT
3958                                                (ECORE_Q_FLG_HC_EN,
3959                                                 &init->tx.flags), hc_usec);
3960         }
3961
3962         /* Rx HC configuration */
3963         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
3964             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
3965                 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
3966
3967                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
3968                                                init->rx.sb_cq_index,
3969                                                !ECORE_TEST_BIT
3970                                                (ECORE_Q_FLG_HC_EN,
3971                                                 &init->rx.flags), hc_usec);
3972         }
3973
3974         /* Set CDU context validation values */
3975         for (cos = 0; cos < o->max_cos; cos++) {
3976                 ECORE_MSG("setting context validation. cid %d, cos %d",
3977                           o->cids[cos], cos);
3978                 ECORE_MSG("context pointer %p", init->cxts[cos]);
3979                 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
3980         }
3981
3982         /* As no ramrod is sent, complete the command immediately  */
3983         o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
3984
3985         ECORE_MMIOWB();
3986         ECORE_SMP_MB();
3987
3988         return ECORE_SUCCESS;
3989 }
3990
3991 static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params
3992                                   *params)
3993 {
3994         struct ecore_queue_sp_obj *o = params->q_obj;
3995         struct client_init_ramrod_data *rdata =
3996             (struct client_init_ramrod_data *)o->rdata;
3997         ecore_dma_addr_t data_mapping = o->rdata_mapping;
3998         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
3999
4000         /* Clear the ramrod data */
4001         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4002
4003         /* Fill the ramrod data */
4004         ecore_q_fill_setup_data_cmn(sc, params, rdata);
4005
4006         /* No need for an explicit memory barrier here as long we would
4007          * need to ensure the ordering of writing to the SPQ element
4008          * and updating of the SPQ producer which involves a memory
4009          * read and we will have to put a full memory barrier there
4010          * (inside ecore_sp_post()).
4011          */
4012
4013         return ecore_sp_post(sc,
4014                              ramrod,
4015                              o->cids[ECORE_PRIMARY_CID_INDEX],
4016                              data_mapping, ETH_CONNECTION_TYPE);
4017 }
4018
4019 static int ecore_q_send_setup_e2(struct bnx2x_softc *sc,
4020                                  struct ecore_queue_state_params *params)
4021 {
4022         struct ecore_queue_sp_obj *o = params->q_obj;
4023         struct client_init_ramrod_data *rdata =
4024             (struct client_init_ramrod_data *)o->rdata;
4025         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4026         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4027
4028         /* Clear the ramrod data */
4029         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4030
4031         /* Fill the ramrod data */
4032         ecore_q_fill_setup_data_cmn(sc, params, rdata);
4033         ecore_q_fill_setup_data_e2(params, rdata);
4034
4035         /* No need for an explicit memory barrier here as long we would
4036          * need to ensure the ordering of writing to the SPQ element
4037          * and updating of the SPQ producer which involves a memory
4038          * read and we will have to put a full memory barrier there
4039          * (inside ecore_sp_post()).
4040          */
4041
4042         return ecore_sp_post(sc,
4043                              ramrod,
4044                              o->cids[ECORE_PRIMARY_CID_INDEX],
4045                              data_mapping, ETH_CONNECTION_TYPE);
4046 }
4047
4048 static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
4049                                       *params)
4050 {
4051         struct ecore_queue_sp_obj *o = params->q_obj;
4052         struct tx_queue_init_ramrod_data *rdata =
4053             (struct tx_queue_init_ramrod_data *)o->rdata;
4054         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4055         int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4056         struct ecore_queue_setup_tx_only_params *tx_only_params =
4057             &params->params.tx_only;
4058         uint8_t cid_index = tx_only_params->cid_index;
4059
4060         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4061                 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4062         ECORE_MSG("sending forward tx-only ramrod");
4063
4064         if (cid_index >= o->max_cos) {
4065                 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4066                             o->cl_id, cid_index);
4067                 return ECORE_INVAL;
4068         }
4069
4070         ECORE_MSG("parameters received: cos: %d sp-id: %d",
4071                   tx_only_params->gen_params.cos,
4072                   tx_only_params->gen_params.spcl_id);
4073
4074         /* Clear the ramrod data */
4075         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4076
4077         /* Fill the ramrod data */
4078         ecore_q_fill_setup_tx_only(sc, params, rdata);
4079
4080         ECORE_MSG
4081             ("sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
4082              o->cids[cid_index], rdata->general.client_id,
4083              rdata->general.sp_client_id, rdata->general.cos);
4084
4085         /* No need for an explicit memory barrier here as long we would
4086          * need to ensure the ordering of writing to the SPQ element
4087          * and updating of the SPQ producer which involves a memory
4088          * read and we will have to put a full memory barrier there
4089          * (inside ecore_sp_post()).
4090          */
4091
4092         return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4093                              data_mapping, ETH_CONNECTION_TYPE);
4094 }
4095
4096 static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj,
4097                                      struct ecore_queue_update_params *params,
4098                                      struct client_update_ramrod_data *data)
4099 {
4100         /* Client ID of the client to update */
4101         data->client_id = obj->cl_id;
4102
4103         /* Function ID of the client to update */
4104         data->func_id = obj->func_id;
4105
4106         /* Default VLAN value */
4107         data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
4108
4109         /* Inner VLAN stripping */
4110         data->inner_vlan_removal_enable_flg =
4111             ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4112         data->inner_vlan_removal_change_flg =
4113             ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
4114                            &params->update_flags);
4115
4116         /* Outer VLAN stripping */
4117         data->outer_vlan_removal_enable_flg =
4118             ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4119         data->outer_vlan_removal_change_flg =
4120             ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
4121                            &params->update_flags);
4122
4123         /* Drop packets that have source MAC that doesn't belong to this
4124          * Queue.
4125          */
4126         data->anti_spoofing_enable_flg =
4127             ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4128         data->anti_spoofing_change_flg =
4129             ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
4130                            &params->update_flags);
4131
4132         /* Activate/Deactivate */
4133         data->activate_flg =
4134             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
4135         data->activate_change_flg =
4136             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4137
4138         /* Enable default VLAN */
4139         data->default_vlan_enable_flg =
4140             ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4141         data->default_vlan_change_flg =
4142             ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
4143                            &params->update_flags);
4144
4145         /* silent vlan removal */
4146         data->silent_vlan_change_flg =
4147             ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4148                            &params->update_flags);
4149         data->silent_vlan_removal_flg =
4150             ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
4151                            &params->update_flags);
4152         data->silent_vlan_value =
4153             ECORE_CPU_TO_LE16(params->silent_removal_value);
4154         data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
4155
4156         /* tx switching */
4157         data->tx_switching_flg =
4158             ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, &params->update_flags);
4159         data->tx_switching_change_flg =
4160             ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
4161                            &params->update_flags);
4162 }
4163
4164 static int ecore_q_send_update(struct bnx2x_softc *sc,
4165                                struct ecore_queue_state_params *params)
4166 {
4167         struct ecore_queue_sp_obj *o = params->q_obj;
4168         struct client_update_ramrod_data *rdata =
4169             (struct client_update_ramrod_data *)o->rdata;
4170         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4171         struct ecore_queue_update_params *update_params =
4172             &params->params.update;
4173         uint8_t cid_index = update_params->cid_index;
4174
4175         if (cid_index >= o->max_cos) {
4176                 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4177                             o->cl_id, cid_index);
4178                 return ECORE_INVAL;
4179         }
4180
4181         /* Clear the ramrod data */
4182         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4183
4184         /* Fill the ramrod data */
4185         ecore_q_fill_update_data(o, update_params, rdata);
4186
4187         /* No need for an explicit memory barrier here as long we would
4188          * need to ensure the ordering of writing to the SPQ element
4189          * and updating of the SPQ producer which involves a memory
4190          * read and we will have to put a full memory barrier there
4191          * (inside ecore_sp_post()).
4192          */
4193
4194         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4195                              o->cids[cid_index], data_mapping,
4196                              ETH_CONNECTION_TYPE);
4197 }
4198
4199 /**
4200  * ecore_q_send_deactivate - send DEACTIVATE command
4201  *
4202  * @sc:         device handle
4203  * @params:
4204  *
4205  * implemented using the UPDATE command.
4206  */
4207 static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4208                                    *params)
4209 {
4210         struct ecore_queue_update_params *update = &params->params.update;
4211
4212         ECORE_MEMSET(update, 0, sizeof(*update));
4213
4214         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4215
4216         return ecore_q_send_update(sc, params);
4217 }
4218
4219 /**
4220  * ecore_q_send_activate - send ACTIVATE command
4221  *
4222  * @sc:         device handle
4223  * @params:
4224  *
4225  * implemented using the UPDATE command.
4226  */
4227 static int ecore_q_send_activate(struct bnx2x_softc *sc,
4228                                  struct ecore_queue_state_params *params)
4229 {
4230         struct ecore_queue_update_params *update = &params->params.update;
4231
4232         ECORE_MEMSET(update, 0, sizeof(*update));
4233
4234         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
4235         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4236
4237         return ecore_q_send_update(sc, params);
4238 }
4239
4240 static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc,
4241                                    __rte_unused struct
4242                                    ecore_queue_state_params *params)
4243 {
4244         /* Not implemented yet. */
4245         return -1;
4246 }
4247
4248 static int ecore_q_send_halt(struct bnx2x_softc *sc,
4249                              struct ecore_queue_state_params *params)
4250 {
4251         struct ecore_queue_sp_obj *o = params->q_obj;
4252
4253         /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
4254         ecore_dma_addr_t data_mapping = 0;
4255         data_mapping = (ecore_dma_addr_t) o->cl_id;
4256
4257         return ecore_sp_post(sc,
4258                              RAMROD_CMD_ID_ETH_HALT,
4259                              o->cids[ECORE_PRIMARY_CID_INDEX],
4260                              data_mapping, ETH_CONNECTION_TYPE);
4261 }
4262
4263 static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
4264                                 struct ecore_queue_state_params *params)
4265 {
4266         struct ecore_queue_sp_obj *o = params->q_obj;
4267         uint8_t cid_idx = params->params.cfc_del.cid_index;
4268
4269         if (cid_idx >= o->max_cos) {
4270                 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4271                             o->cl_id, cid_idx);
4272                 return ECORE_INVAL;
4273         }
4274
4275         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
4276                              o->cids[cid_idx], 0, NONE_CONNECTION_TYPE);
4277 }
4278
4279 static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4280                                   *params)
4281 {
4282         struct ecore_queue_sp_obj *o = params->q_obj;
4283         uint8_t cid_index = params->params.terminate.cid_index;
4284
4285         if (cid_index >= o->max_cos) {
4286                 PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
4287                             o->cl_id, cid_index);
4288                 return ECORE_INVAL;
4289         }
4290
4291         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
4292                              o->cids[cid_index], 0, ETH_CONNECTION_TYPE);
4293 }
4294
4295 static int ecore_q_send_empty(struct bnx2x_softc *sc,
4296                               struct ecore_queue_state_params *params)
4297 {
4298         struct ecore_queue_sp_obj *o = params->q_obj;
4299
4300         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
4301                              o->cids[ECORE_PRIMARY_CID_INDEX], 0,
4302                              ETH_CONNECTION_TYPE);
4303 }
4304
4305 static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
4306                                     *params)
4307 {
4308         switch (params->cmd) {
4309         case ECORE_Q_CMD_INIT:
4310                 return ecore_q_init(sc, params);
4311         case ECORE_Q_CMD_SETUP_TX_ONLY:
4312                 return ecore_q_send_setup_tx_only(sc, params);
4313         case ECORE_Q_CMD_DEACTIVATE:
4314                 return ecore_q_send_deactivate(sc, params);
4315         case ECORE_Q_CMD_ACTIVATE:
4316                 return ecore_q_send_activate(sc, params);
4317         case ECORE_Q_CMD_UPDATE:
4318                 return ecore_q_send_update(sc, params);
4319         case ECORE_Q_CMD_UPDATE_TPA:
4320                 return ecore_q_send_update_tpa(sc, params);
4321         case ECORE_Q_CMD_HALT:
4322                 return ecore_q_send_halt(sc, params);
4323         case ECORE_Q_CMD_CFC_DEL:
4324                 return ecore_q_send_cfc_del(sc, params);
4325         case ECORE_Q_CMD_TERMINATE:
4326                 return ecore_q_send_terminate(sc, params);
4327         case ECORE_Q_CMD_EMPTY:
4328                 return ecore_q_send_empty(sc, params);
4329         default:
4330                 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
4331                 return ECORE_INVAL;
4332         }
4333 }
4334
4335 static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
4336                                     struct ecore_queue_state_params *params)
4337 {
4338         switch (params->cmd) {
4339         case ECORE_Q_CMD_SETUP:
4340                 return ecore_q_send_setup_e1x(sc, params);
4341         case ECORE_Q_CMD_INIT:
4342         case ECORE_Q_CMD_SETUP_TX_ONLY:
4343         case ECORE_Q_CMD_DEACTIVATE:
4344         case ECORE_Q_CMD_ACTIVATE:
4345         case ECORE_Q_CMD_UPDATE:
4346         case ECORE_Q_CMD_UPDATE_TPA:
4347         case ECORE_Q_CMD_HALT:
4348         case ECORE_Q_CMD_CFC_DEL:
4349         case ECORE_Q_CMD_TERMINATE:
4350         case ECORE_Q_CMD_EMPTY:
4351                 return ecore_queue_send_cmd_cmn(sc, params);
4352         default:
4353                 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
4354                 return ECORE_INVAL;
4355         }
4356 }
4357
4358 static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
4359                                    struct ecore_queue_state_params *params)
4360 {
4361         switch (params->cmd) {
4362         case ECORE_Q_CMD_SETUP:
4363                 return ecore_q_send_setup_e2(sc, params);
4364         case ECORE_Q_CMD_INIT:
4365         case ECORE_Q_CMD_SETUP_TX_ONLY:
4366         case ECORE_Q_CMD_DEACTIVATE:
4367         case ECORE_Q_CMD_ACTIVATE:
4368         case ECORE_Q_CMD_UPDATE:
4369         case ECORE_Q_CMD_UPDATE_TPA:
4370         case ECORE_Q_CMD_HALT:
4371         case ECORE_Q_CMD_CFC_DEL:
4372         case ECORE_Q_CMD_TERMINATE:
4373         case ECORE_Q_CMD_EMPTY:
4374                 return ecore_queue_send_cmd_cmn(sc, params);
4375         default:
4376                 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
4377                 return ECORE_INVAL;
4378         }
4379 }
4380
4381 /**
4382  * ecore_queue_chk_transition - check state machine of a regular Queue
4383  *
4384  * @sc:         device handle
4385  * @o:
4386  * @params:
4387  *
4388  * (not Forwarding)
4389  * It both checks if the requested command is legal in a current
4390  * state and, if it's legal, sets a `next_state' in the object
4391  * that will be used in the completion flow to set the `state'
4392  * of the object.
4393  *
4394  * returns 0 if a requested command is a legal transition,
4395  *         ECORE_INVAL otherwise.
4396  */
4397 static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
4398                                       struct ecore_queue_sp_obj *o,
4399                                       struct ecore_queue_state_params *params)
4400 {
4401         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4402         enum ecore_queue_cmd cmd = params->cmd;
4403         struct ecore_queue_update_params *update_params =
4404             &params->params.update;
4405         uint8_t next_tx_only = o->num_tx_only;
4406
4407         /* Forget all pending for completion commands if a driver only state
4408          * transition has been requested.
4409          */
4410         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4411                 o->pending = 0;
4412                 o->next_state = ECORE_Q_STATE_MAX;
4413         }
4414
4415         /* Don't allow a next state transition if we are in the middle of
4416          * the previous one.
4417          */
4418         if (o->pending) {
4419                 PMD_DRV_LOG(ERR, "Blocking transition since pending was %lx",
4420                             o->pending);
4421                 return ECORE_BUSY;
4422         }
4423
4424         switch (state) {
4425         case ECORE_Q_STATE_RESET:
4426                 if (cmd == ECORE_Q_CMD_INIT)
4427                         next_state = ECORE_Q_STATE_INITIALIZED;
4428
4429                 break;
4430         case ECORE_Q_STATE_INITIALIZED:
4431                 if (cmd == ECORE_Q_CMD_SETUP) {
4432                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4433                                            &params->params.setup.flags))
4434                                 next_state = ECORE_Q_STATE_ACTIVE;
4435                         else
4436                                 next_state = ECORE_Q_STATE_INACTIVE;
4437                 }
4438
4439                 break;
4440         case ECORE_Q_STATE_ACTIVE:
4441                 if (cmd == ECORE_Q_CMD_DEACTIVATE)
4442                         next_state = ECORE_Q_STATE_INACTIVE;
4443
4444                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4445                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4446                         next_state = ECORE_Q_STATE_ACTIVE;
4447
4448                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4449                         next_state = ECORE_Q_STATE_MULTI_COS;
4450                         next_tx_only = 1;
4451                 }
4452
4453                 else if (cmd == ECORE_Q_CMD_HALT)
4454                         next_state = ECORE_Q_STATE_STOPPED;
4455
4456                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4457                         /* If "active" state change is requested, update the
4458                          *  state accordingly.
4459                          */
4460                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4461                                            &update_params->update_flags) &&
4462                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4463                                             &update_params->update_flags))
4464                                 next_state = ECORE_Q_STATE_INACTIVE;
4465                         else
4466                                 next_state = ECORE_Q_STATE_ACTIVE;
4467                 }
4468
4469                 break;
4470         case ECORE_Q_STATE_MULTI_COS:
4471                 if (cmd == ECORE_Q_CMD_TERMINATE)
4472                         next_state = ECORE_Q_STATE_MCOS_TERMINATED;
4473
4474                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4475                         next_state = ECORE_Q_STATE_MULTI_COS;
4476                         next_tx_only = o->num_tx_only + 1;
4477                 }
4478
4479                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4480                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4481                         next_state = ECORE_Q_STATE_MULTI_COS;
4482
4483                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4484                         /* If "active" state change is requested, update the
4485                          *  state accordingly.
4486                          */
4487                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4488                                            &update_params->update_flags) &&
4489                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4490                                             &update_params->update_flags))
4491                                 next_state = ECORE_Q_STATE_INACTIVE;
4492                         else
4493                                 next_state = ECORE_Q_STATE_MULTI_COS;
4494                 }
4495
4496                 break;
4497         case ECORE_Q_STATE_MCOS_TERMINATED:
4498                 if (cmd == ECORE_Q_CMD_CFC_DEL) {
4499                         next_tx_only = o->num_tx_only - 1;
4500                         if (next_tx_only == 0)
4501                                 next_state = ECORE_Q_STATE_ACTIVE;
4502                         else
4503                                 next_state = ECORE_Q_STATE_MULTI_COS;
4504                 }
4505
4506                 break;
4507         case ECORE_Q_STATE_INACTIVE:
4508                 if (cmd == ECORE_Q_CMD_ACTIVATE)
4509                         next_state = ECORE_Q_STATE_ACTIVE;
4510
4511                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4512                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4513                         next_state = ECORE_Q_STATE_INACTIVE;
4514
4515                 else if (cmd == ECORE_Q_CMD_HALT)
4516                         next_state = ECORE_Q_STATE_STOPPED;
4517
4518                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4519                         /* If "active" state change is requested, update the
4520                          * state accordingly.
4521                          */
4522                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4523                                            &update_params->update_flags) &&
4524                             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4525                                            &update_params->update_flags)) {
4526                                 if (o->num_tx_only == 0)
4527                                         next_state = ECORE_Q_STATE_ACTIVE;
4528                                 else    /* tx only queues exist for this queue */
4529                                         next_state = ECORE_Q_STATE_MULTI_COS;
4530                         } else
4531                                 next_state = ECORE_Q_STATE_INACTIVE;
4532                 }
4533
4534                 break;
4535         case ECORE_Q_STATE_STOPPED:
4536                 if (cmd == ECORE_Q_CMD_TERMINATE)
4537                         next_state = ECORE_Q_STATE_TERMINATED;
4538
4539                 break;
4540         case ECORE_Q_STATE_TERMINATED:
4541                 if (cmd == ECORE_Q_CMD_CFC_DEL)
4542                         next_state = ECORE_Q_STATE_RESET;
4543
4544                 break;
4545         default:
4546                 PMD_DRV_LOG(ERR, "Illegal state: %d", state);
4547         }
4548
4549         /* Transition is assured */
4550         if (next_state != ECORE_Q_STATE_MAX) {
4551                 ECORE_MSG("Good state transition: %d(%d)->%d",
4552                           state, cmd, next_state);
4553                 o->next_state = next_state;
4554                 o->next_tx_only = next_tx_only;
4555                 return ECORE_SUCCESS;
4556         }
4557
4558         ECORE_MSG("Bad state transition request: %d %d", state, cmd);
4559
4560         return ECORE_INVAL;
4561 }
4562
4563 /**
4564  * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
4565  *
4566  * @sc:         device handle
4567  * @o:
4568  * @params:
4569  *
4570  * It both checks if the requested command is legal in a current
4571  * state and, if it's legal, sets a `next_state' in the object
4572  * that will be used in the completion flow to set the `state'
4573  * of the object.
4574  *
4575  * returns 0 if a requested command is a legal transition,
4576  *         ECORE_INVAL otherwise.
4577  */
4578 static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
4579                                           struct ecore_queue_sp_obj *o,
4580                                           struct ecore_queue_state_params
4581                                           *params)
4582 {
4583         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4584         enum ecore_queue_cmd cmd = params->cmd;
4585
4586         switch (state) {
4587         case ECORE_Q_STATE_RESET:
4588                 if (cmd == ECORE_Q_CMD_INIT)
4589                         next_state = ECORE_Q_STATE_INITIALIZED;
4590
4591                 break;
4592         case ECORE_Q_STATE_INITIALIZED:
4593                 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4594                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4595                                            &params->params.tx_only.flags))
4596                                 next_state = ECORE_Q_STATE_ACTIVE;
4597                         else
4598                                 next_state = ECORE_Q_STATE_INACTIVE;
4599                 }
4600
4601                 break;
4602         case ECORE_Q_STATE_ACTIVE:
4603         case ECORE_Q_STATE_INACTIVE:
4604                 if (cmd == ECORE_Q_CMD_CFC_DEL)
4605                         next_state = ECORE_Q_STATE_RESET;
4606
4607                 break;
4608         default:
4609                 PMD_DRV_LOG(ERR, "Illegal state: %d", state);
4610         }
4611
4612         /* Transition is assured */
4613         if (next_state != ECORE_Q_STATE_MAX) {
4614                 ECORE_MSG("Good state transition: %d(%d)->%d",
4615                           state, cmd, next_state);
4616                 o->next_state = next_state;
4617                 return ECORE_SUCCESS;
4618         }
4619
4620         ECORE_MSG("Bad state transition request: %d %d", state, cmd);
4621         return ECORE_INVAL;
4622 }
4623
4624 void ecore_init_queue_obj(struct bnx2x_softc *sc,
4625                           struct ecore_queue_sp_obj *obj,
4626                           uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt,
4627                           uint8_t func_id, void *rdata,
4628                           ecore_dma_addr_t rdata_mapping, unsigned long type)
4629 {
4630         ECORE_MEMSET(obj, 0, sizeof(*obj));
4631
4632         /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
4633         ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
4634
4635         rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
4636         obj->max_cos = cid_cnt;
4637         obj->cl_id = cl_id;
4638         obj->func_id = func_id;
4639         obj->rdata = rdata;
4640         obj->rdata_mapping = rdata_mapping;
4641         obj->type = type;
4642         obj->next_state = ECORE_Q_STATE_MAX;
4643
4644         if (CHIP_IS_E1x(sc))
4645                 obj->send_cmd = ecore_queue_send_cmd_e1x;
4646         else
4647                 obj->send_cmd = ecore_queue_send_cmd_e2;
4648
4649         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
4650                 obj->check_transition = ecore_queue_chk_fwd_transition;
4651         else
4652                 obj->check_transition = ecore_queue_chk_transition;
4653
4654         obj->complete_cmd = ecore_queue_comp_cmd;
4655         obj->wait_comp = ecore_queue_wait_comp;
4656         obj->set_pending = ecore_queue_set_pending;
4657 }
4658
4659 /********************** Function state object *********************************/
4660 enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc,
4661                                            struct ecore_func_sp_obj *o)
4662 {
4663         /* in the middle of transaction - return INVALID state */
4664         if (o->pending)
4665                 return ECORE_F_STATE_MAX;
4666
4667         /* unsure the order of reading of o->pending and o->state
4668          * o->pending should be read first
4669          */
4670         rmb();
4671
4672         return o->state;
4673 }
4674
4675 static int ecore_func_wait_comp(struct bnx2x_softc *sc,
4676                                 struct ecore_func_sp_obj *o,
4677                                 enum ecore_func_cmd cmd)
4678 {
4679         return ecore_state_wait(sc, cmd, &o->pending);
4680 }
4681
4682 /**
4683  * ecore_func_state_change_comp - complete the state machine transition
4684  *
4685  * @sc:         device handle
4686  * @o:
4687  * @cmd:
4688  *
4689  * Called on state change transition. Completes the state
4690  * machine transition only - no HW interaction.
4691  */
4692 static int
4693 ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
4694                              struct ecore_func_sp_obj *o,
4695                              enum ecore_func_cmd cmd)
4696 {
4697         unsigned long cur_pending = o->pending;
4698
4699         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4700                 PMD_DRV_LOG(ERR,
4701                             "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
4702                             cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
4703                             o->next_state);
4704                 return ECORE_INVAL;
4705         }
4706
4707         ECORE_MSG("Completing command %d for func %d, setting state to %d",
4708                   cmd, ECORE_FUNC_ID(sc), o->next_state);
4709
4710         o->state = o->next_state;
4711         o->next_state = ECORE_F_STATE_MAX;
4712
4713         /* It's important that o->state and o->next_state are
4714          * updated before o->pending.
4715          */
4716         wmb();
4717
4718         ECORE_CLEAR_BIT(cmd, &o->pending);
4719         ECORE_SMP_MB_AFTER_CLEAR_BIT();
4720
4721         return ECORE_SUCCESS;
4722 }
4723
4724 /**
4725  * ecore_func_comp_cmd - complete the state change command
4726  *
4727  * @sc:         device handle
4728  * @o:
4729  * @cmd:
4730  *
4731  * Checks that the arrived completion is expected.
4732  */
4733 static int ecore_func_comp_cmd(struct bnx2x_softc *sc,
4734                                struct ecore_func_sp_obj *o,
4735                                enum ecore_func_cmd cmd)
4736 {
4737         /* Complete the state machine part first, check if it's a
4738          * legal completion.
4739          */
4740         int rc = ecore_func_state_change_comp(sc, o, cmd);
4741         return rc;
4742 }
4743
4744 /**
4745  * ecore_func_chk_transition - perform function state machine transition
4746  *
4747  * @sc:         device handle
4748  * @o:
4749  * @params:
4750  *
4751  * It both checks if the requested command is legal in a current
4752  * state and, if it's legal, sets a `next_state' in the object
4753  * that will be used in the completion flow to set the `state'
4754  * of the object.
4755  *
4756  * returns 0 if a requested command is a legal transition,
4757  *         ECORE_INVAL otherwise.
4758  */
4759 static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
4760                                      struct ecore_func_sp_obj *o,
4761                                      struct ecore_func_state_params *params)
4762 {
4763         enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
4764         enum ecore_func_cmd cmd = params->cmd;
4765
4766         /* Forget all pending for completion commands if a driver only state
4767          * transition has been requested.
4768          */
4769         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4770                 o->pending = 0;
4771                 o->next_state = ECORE_F_STATE_MAX;
4772         }
4773
4774         /* Don't allow a next state transition if we are in the middle of
4775          * the previous one.
4776          */
4777         if (o->pending)
4778                 return ECORE_BUSY;
4779
4780         switch (state) {
4781         case ECORE_F_STATE_RESET:
4782                 if (cmd == ECORE_F_CMD_HW_INIT)
4783                         next_state = ECORE_F_STATE_INITIALIZED;
4784
4785                 break;
4786         case ECORE_F_STATE_INITIALIZED:
4787                 if (cmd == ECORE_F_CMD_START)
4788                         next_state = ECORE_F_STATE_STARTED;
4789
4790                 else if (cmd == ECORE_F_CMD_HW_RESET)
4791                         next_state = ECORE_F_STATE_RESET;
4792
4793                 break;
4794         case ECORE_F_STATE_STARTED:
4795                 if (cmd == ECORE_F_CMD_STOP)
4796                         next_state = ECORE_F_STATE_INITIALIZED;
4797                 /* afex ramrods can be sent only in started mode, and only
4798                  * if not pending for function_stop ramrod completion
4799                  * for these events - next state remained STARTED.
4800                  */
4801                 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
4802                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4803                         next_state = ECORE_F_STATE_STARTED;
4804
4805                 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
4806                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4807                         next_state = ECORE_F_STATE_STARTED;
4808
4809                 /* Switch_update ramrod can be sent in either started or
4810                  * tx_stopped state, and it doesn't change the state.
4811                  */
4812                 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4813                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4814                         next_state = ECORE_F_STATE_STARTED;
4815
4816                 else if (cmd == ECORE_F_CMD_TX_STOP)
4817                         next_state = ECORE_F_STATE_TX_STOPPED;
4818
4819                 break;
4820         case ECORE_F_STATE_TX_STOPPED:
4821                 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4822                     (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4823                         next_state = ECORE_F_STATE_TX_STOPPED;
4824
4825                 else if (cmd == ECORE_F_CMD_TX_START)
4826                         next_state = ECORE_F_STATE_STARTED;
4827
4828                 break;
4829         default:
4830                 PMD_DRV_LOG(ERR, "Unknown state: %d", state);
4831         }
4832
4833         /* Transition is assured */
4834         if (next_state != ECORE_F_STATE_MAX) {
4835                 ECORE_MSG("Good function state transition: %d(%d)->%d",
4836                           state, cmd, next_state);
4837                 o->next_state = next_state;
4838                 return ECORE_SUCCESS;
4839         }
4840
4841         ECORE_MSG("Bad function state transition request: %d %d", state, cmd);
4842
4843         return ECORE_INVAL;
4844 }
4845
4846 /**
4847  * ecore_func_init_func - performs HW init at function stage
4848  *
4849  * @sc:         device handle
4850  * @drv:
4851  *
4852  * Init HW when the current phase is
4853  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4854  * HW blocks.
4855  */
4856 static int ecore_func_init_func(struct bnx2x_softc *sc,
4857                                 const struct ecore_func_sp_drv_ops *drv)
4858 {
4859         return drv->init_hw_func(sc);
4860 }
4861
4862 /**
4863  * ecore_func_init_port - performs HW init at port stage
4864  *
4865  * @sc:         device handle
4866  * @drv:
4867  *
4868  * Init HW when the current phase is
4869  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4870  * FUNCTION-only HW blocks.
4871  *
4872  */
4873 static int ecore_func_init_port(struct bnx2x_softc *sc,
4874                                 const struct ecore_func_sp_drv_ops *drv)
4875 {
4876         int rc = drv->init_hw_port(sc);
4877         if (rc)
4878                 return rc;
4879
4880         return ecore_func_init_func(sc, drv);
4881 }
4882
4883 /**
4884  * ecore_func_init_cmn_chip - performs HW init at chip-common stage
4885  *
4886  * @sc:         device handle
4887  * @drv:
4888  *
4889  * Init HW when the current phase is
4890  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
4891  * PORT-only and FUNCTION-only HW blocks.
4892  */
4893 static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4894                                     *drv)
4895 {
4896         int rc = drv->init_hw_cmn_chip(sc);
4897         if (rc)
4898                 return rc;
4899
4900         return ecore_func_init_port(sc, drv);
4901 }
4902
4903 /**
4904  * ecore_func_init_cmn - performs HW init at common stage
4905  *
4906  * @sc:         device handle
4907  * @drv:
4908  *
4909  * Init HW when the current phase is
4910  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
4911  * PORT-only and FUNCTION-only HW blocks.
4912  */
4913 static int ecore_func_init_cmn(struct bnx2x_softc *sc,
4914                                const struct ecore_func_sp_drv_ops *drv)
4915 {
4916         int rc = drv->init_hw_cmn(sc);
4917         if (rc)
4918                 return rc;
4919
4920         return ecore_func_init_port(sc, drv);
4921 }
4922
4923 static int ecore_func_hw_init(struct bnx2x_softc *sc,
4924                               struct ecore_func_state_params *params)
4925 {
4926         uint32_t load_code = params->params.hw_init.load_phase;
4927         struct ecore_func_sp_obj *o = params->f_obj;
4928         const struct ecore_func_sp_drv_ops *drv = o->drv;
4929         int rc = 0;
4930
4931         ECORE_MSG("function %d  load_code %x",
4932                   ECORE_ABS_FUNC_ID(sc), load_code);
4933
4934         /* Prepare FW */
4935         rc = drv->init_fw(sc);
4936         if (rc) {
4937                 PMD_DRV_LOG(ERR, "Error loading firmware");
4938                 goto init_err;
4939         }
4940
4941         /* Handle the beginning of COMMON_XXX pases separately... */
4942         switch (load_code) {
4943         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4944                 rc = ecore_func_init_cmn_chip(sc, drv);
4945                 if (rc)
4946                         goto init_err;
4947
4948                 break;
4949         case FW_MSG_CODE_DRV_LOAD_COMMON:
4950                 rc = ecore_func_init_cmn(sc, drv);
4951                 if (rc)
4952                         goto init_err;
4953
4954                 break;
4955         case FW_MSG_CODE_DRV_LOAD_PORT:
4956                 rc = ecore_func_init_port(sc, drv);
4957                 if (rc)
4958                         goto init_err;
4959
4960                 break;
4961         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4962                 rc = ecore_func_init_func(sc, drv);
4963                 if (rc)
4964                         goto init_err;
4965
4966                 break;
4967         default:
4968                 PMD_DRV_LOG(ERR, "Unknown load_code (0x%x) from MCP",
4969                             load_code);
4970                 rc = ECORE_INVAL;
4971         }
4972
4973 init_err:
4974         /* In case of success, complete the command immediately: no ramrods
4975          * have been sent.
4976          */
4977         if (!rc)
4978                 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
4979
4980         return rc;
4981 }
4982
4983 /**
4984  * ecore_func_reset_func - reset HW at function stage
4985  *
4986  * @sc:         device handle
4987  * @drv:
4988  *
4989  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
4990  * FUNCTION-only HW blocks.
4991  */
4992 static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4993                                   *drv)
4994 {
4995         drv->reset_hw_func(sc);
4996 }
4997
4998 /**
4999  * ecore_func_reset_port - reser HW at port stage
5000  *
5001  * @sc:         device handle
5002  * @drv:
5003  *
5004  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5005  * FUNCTION-only and PORT-only HW blocks.
5006  *
5007  *                 !!!IMPORTANT!!!
5008  *
5009  * It's important to call reset_port before reset_func() as the last thing
5010  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5011  * makes impossible any DMAE transactions.
5012  */
5013 static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
5014                                   *drv)
5015 {
5016         drv->reset_hw_port(sc);
5017         ecore_func_reset_func(sc, drv);
5018 }
5019
5020 /**
5021  * ecore_func_reset_cmn - reser HW at common stage
5022  *
5023  * @sc:         device handle
5024  * @drv:
5025  *
5026  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5027  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5028  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5029  */
5030 static void ecore_func_reset_cmn(struct bnx2x_softc *sc,
5031                                  const struct ecore_func_sp_drv_ops *drv)
5032 {
5033         ecore_func_reset_port(sc, drv);
5034         drv->reset_hw_cmn(sc);
5035 }
5036
5037 static int ecore_func_hw_reset(struct bnx2x_softc *sc,
5038                                struct ecore_func_state_params *params)
5039 {
5040         uint32_t reset_phase = params->params.hw_reset.reset_phase;
5041         struct ecore_func_sp_obj *o = params->f_obj;
5042         const struct ecore_func_sp_drv_ops *drv = o->drv;
5043
5044         ECORE_MSG("function %d  reset_phase %x", ECORE_ABS_FUNC_ID(sc),
5045                   reset_phase);
5046
5047         switch (reset_phase) {
5048         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5049                 ecore_func_reset_cmn(sc, drv);
5050                 break;
5051         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5052                 ecore_func_reset_port(sc, drv);
5053                 break;
5054         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5055                 ecore_func_reset_func(sc, drv);
5056                 break;
5057         default:
5058                 PMD_DRV_LOG(ERR, "Unknown reset_phase (0x%x) from MCP",
5059                             reset_phase);
5060                 break;
5061         }
5062
5063         /* Complete the command immediately: no ramrods have been sent. */
5064         o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
5065
5066         return ECORE_SUCCESS;
5067 }
5068
5069 static int ecore_func_send_start(struct bnx2x_softc *sc,
5070                                  struct ecore_func_state_params *params)
5071 {
5072         struct ecore_func_sp_obj *o = params->f_obj;
5073         struct function_start_data *rdata =
5074             (struct function_start_data *)o->rdata;
5075         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5076         struct ecore_func_start_params *start_params = &params->params.start;
5077
5078         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5079
5080         /* Fill the ramrod data with provided parameters */
5081         rdata->function_mode = (uint8_t) start_params->mf_mode;
5082         rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
5083         rdata->path_id = ECORE_PATH_ID(sc);
5084         rdata->network_cos_mode = start_params->network_cos_mode;
5085         rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5086         rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5087
5088         /*
5089          *  No need for an explicit memory barrier here as long we would
5090          *  need to ensure the ordering of writing to the SPQ element
5091          *  and updating of the SPQ producer which involves a memory
5092          *  read and we will have to put a full memory barrier there
5093          *  (inside ecore_sp_post()).
5094          */
5095
5096         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5097                              data_mapping, NONE_CONNECTION_TYPE);
5098 }
5099
5100 static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5101                                          *params)
5102 {
5103         struct ecore_func_sp_obj *o = params->f_obj;
5104         struct function_update_data *rdata =
5105             (struct function_update_data *)o->rdata;
5106         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5107         struct ecore_func_switch_update_params *switch_update_params =
5108             &params->params.switch_update;
5109
5110         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5111
5112         /* Fill the ramrod data with provided parameters */
5113         rdata->tx_switch_suspend_change_flg = 1;
5114         rdata->tx_switch_suspend = switch_update_params->suspend;
5115         rdata->echo = SWITCH_UPDATE;
5116
5117         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5118                              data_mapping, NONE_CONNECTION_TYPE);
5119 }
5120
5121 static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5122                                        *params)
5123 {
5124         struct ecore_func_sp_obj *o = params->f_obj;
5125         struct function_update_data *rdata =
5126             (struct function_update_data *)o->afex_rdata;
5127         ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
5128         struct ecore_func_afex_update_params *afex_update_params =
5129             &params->params.afex_update;
5130
5131         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5132
5133         /* Fill the ramrod data with provided parameters */
5134         rdata->vif_id_change_flg = 1;
5135         rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
5136         rdata->afex_default_vlan_change_flg = 1;
5137         rdata->afex_default_vlan =
5138             ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
5139         rdata->allowed_priorities_change_flg = 1;
5140         rdata->allowed_priorities = afex_update_params->allowed_priorities;
5141         rdata->echo = AFEX_UPDATE;
5142
5143         /*  No need for an explicit memory barrier here as long we would
5144          *  need to ensure the ordering of writing to the SPQ element
5145          *  and updating of the SPQ producer which involves a memory
5146          *  read and we will have to put a full memory barrier there
5147          *  (inside ecore_sp_post()).
5148          */
5149         ECORE_MSG("afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
5150                   rdata->vif_id,
5151                   rdata->afex_default_vlan, rdata->allowed_priorities);
5152
5153         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5154                              data_mapping, NONE_CONNECTION_TYPE);
5155 }
5156
5157 static
5158 inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
5159                                          struct ecore_func_state_params *params)
5160 {
5161         struct ecore_func_sp_obj *o = params->f_obj;
5162         struct afex_vif_list_ramrod_data *rdata =
5163             (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5164         struct ecore_func_afex_viflists_params *afex_vif_params =
5165             &params->params.afex_viflists;
5166         uint64_t *p_rdata = (uint64_t *) rdata;
5167
5168         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5169
5170         /* Fill the ramrod data with provided parameters */
5171         rdata->vif_list_index =
5172             ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
5173         rdata->func_bit_map = afex_vif_params->func_bit_map;
5174         rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5175         rdata->func_to_clear = afex_vif_params->func_to_clear;
5176
5177         /* send in echo type of sub command */
5178         rdata->echo = afex_vif_params->afex_vif_list_command;
5179
5180         /*  No need for an explicit memory barrier here as long we would
5181          *  need to ensure the ordering of writing to the SPQ element
5182          *  and updating of the SPQ producer which involves a memory
5183          *  read and we will have to put a full memory barrier there
5184          *  (inside ecore_sp_post()).
5185          */
5186
5187         ECORE_MSG
5188             ("afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
5189              rdata->afex_vif_list_command, rdata->vif_list_index,
5190              rdata->func_bit_map, rdata->func_to_clear);
5191
5192         /* this ramrod sends data directly and not through DMA mapping */
5193         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5194                              *p_rdata, NONE_CONNECTION_TYPE);
5195 }
5196
5197 static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct
5198                                 ecore_func_state_params *params)
5199 {
5200         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
5201                              NONE_CONNECTION_TYPE);
5202 }
5203
5204 static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct
5205                                    ecore_func_state_params *params)
5206 {
5207         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
5208                              NONE_CONNECTION_TYPE);
5209 }
5210
5211 static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params
5212                                     *params)
5213 {
5214         struct ecore_func_sp_obj *o = params->f_obj;
5215         struct flow_control_configuration *rdata =
5216             (struct flow_control_configuration *)o->rdata;
5217         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5218         struct ecore_func_tx_start_params *tx_start_params =
5219             &params->params.tx_start;
5220         uint32_t i;
5221
5222         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5223
5224         rdata->dcb_enabled = tx_start_params->dcb_enabled;
5225         rdata->dcb_version = tx_start_params->dcb_version;
5226         rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
5227
5228         for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5229                 rdata->traffic_type_to_priority_cos[i] =
5230                     tx_start_params->traffic_type_to_priority_cos[i];
5231
5232         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5233                              data_mapping, NONE_CONNECTION_TYPE);
5234 }
5235
5236 static int ecore_func_send_cmd(struct bnx2x_softc *sc,
5237                                struct ecore_func_state_params *params)
5238 {
5239         switch (params->cmd) {
5240         case ECORE_F_CMD_HW_INIT:
5241                 return ecore_func_hw_init(sc, params);
5242         case ECORE_F_CMD_START:
5243                 return ecore_func_send_start(sc, params);
5244         case ECORE_F_CMD_STOP:
5245                 return ecore_func_send_stop(sc, params);
5246         case ECORE_F_CMD_HW_RESET:
5247                 return ecore_func_hw_reset(sc, params);
5248         case ECORE_F_CMD_AFEX_UPDATE:
5249                 return ecore_func_send_afex_update(sc, params);
5250         case ECORE_F_CMD_AFEX_VIFLISTS:
5251                 return ecore_func_send_afex_viflists(sc, params);
5252         case ECORE_F_CMD_TX_STOP:
5253                 return ecore_func_send_tx_stop(sc, params);
5254         case ECORE_F_CMD_TX_START:
5255                 return ecore_func_send_tx_start(sc, params);
5256         case ECORE_F_CMD_SWITCH_UPDATE:
5257                 return ecore_func_send_switch_update(sc, params);
5258         default:
5259                 PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
5260                 return ECORE_INVAL;
5261         }
5262 }
5263
5264 void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc,
5265                          struct ecore_func_sp_obj *obj,
5266                          void *rdata, ecore_dma_addr_t rdata_mapping,
5267                          void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
5268                          struct ecore_func_sp_drv_ops *drv_iface)
5269 {
5270         ECORE_MEMSET(obj, 0, sizeof(*obj));
5271
5272         ECORE_MUTEX_INIT(&obj->one_pending_mutex);
5273
5274         obj->rdata = rdata;
5275         obj->rdata_mapping = rdata_mapping;
5276         obj->afex_rdata = afex_rdata;
5277         obj->afex_rdata_mapping = afex_rdata_mapping;
5278         obj->send_cmd = ecore_func_send_cmd;
5279         obj->check_transition = ecore_func_chk_transition;
5280         obj->complete_cmd = ecore_func_comp_cmd;
5281         obj->wait_comp = ecore_func_wait_comp;
5282         obj->drv = drv_iface;
5283 }
5284
5285 /**
5286  * ecore_func_state_change - perform Function state change transition
5287  *
5288  * @sc:         device handle
5289  * @params:     parameters to perform the transaction
5290  *
5291  * returns 0 in case of successfully completed transition,
5292  *         negative error code in case of failure, positive
5293  *         (EBUSY) value if there is a completion to that is
5294  *         still pending (possible only if RAMROD_COMP_WAIT is
5295  *         not set in params->ramrod_flags for asynchronous
5296  *         commands).
5297  */
5298 int ecore_func_state_change(struct bnx2x_softc *sc,
5299                             struct ecore_func_state_params *params)
5300 {
5301         struct ecore_func_sp_obj *o = params->f_obj;
5302         int rc, cnt = 300;
5303         enum ecore_func_cmd cmd = params->cmd;
5304         unsigned long *pending = &o->pending;
5305
5306         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5307
5308         /* Check that the requested transition is legal */
5309         rc = o->check_transition(sc, o, params);
5310         if ((rc == ECORE_BUSY) &&
5311             (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
5312                 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
5313                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5314                         ECORE_MSLEEP(10);
5315                         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5316                         rc = o->check_transition(sc, o, params);
5317                 }
5318                 if (rc == ECORE_BUSY) {
5319                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5320                         PMD_DRV_LOG(ERR,
5321                                     "timeout waiting for previous ramrod completion");
5322                         return rc;
5323                 }
5324         } else if (rc) {
5325                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5326                 return rc;
5327         }
5328
5329         /* Set "pending" bit */
5330         ECORE_SET_BIT(cmd, pending);
5331
5332         /* Don't send a command if only driver cleanup was requested */
5333         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5334                 ecore_func_state_change_comp(sc, o, cmd);
5335                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5336         } else {
5337                 /* Send a ramrod */
5338                 rc = o->send_cmd(sc, params);
5339
5340                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5341
5342                 if (rc) {
5343                         o->next_state = ECORE_F_STATE_MAX;
5344                         ECORE_CLEAR_BIT(cmd, pending);
5345                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
5346                         return rc;
5347                 }
5348
5349                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5350                         rc = o->wait_comp(sc, o, cmd);
5351                         if (rc)
5352                                 return rc;
5353
5354                         return ECORE_SUCCESS;
5355                 }
5356         }
5357
5358         return ECORE_RET_PENDING(cmd, pending);
5359 }
5360
5361 /******************************************************************************
5362  * Description:
5363  *         Calculates crc 8 on a word value: polynomial 0-1-2-8
5364  *         Code was translated from Verilog.
5365  * Return:
5366  *****************************************************************************/
5367 uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc)
5368 {
5369         uint8_t D[32];
5370         uint8_t NewCRC[8];
5371         uint8_t C[8];
5372         uint8_t crc_res;
5373         uint8_t i;
5374
5375         /* split the data into 31 bits */
5376         for (i = 0; i < 32; i++) {
5377                 D[i] = (uint8_t) (data & 1);
5378                 data = data >> 1;
5379         }
5380
5381         /* split the crc into 8 bits */
5382         for (i = 0; i < 8; i++) {
5383                 C[i] = crc & 1;
5384                 crc = crc >> 1;
5385         }
5386
5387         NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
5388             D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
5389             C[6] ^ C[7];
5390         NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
5391             D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
5392             D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
5393         NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
5394             D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
5395             C[0] ^ C[1] ^ C[4] ^ C[5];
5396         NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
5397             D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
5398             C[1] ^ C[2] ^ C[5] ^ C[6];
5399         NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
5400             D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
5401             C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
5402         NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
5403             D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
5404             C[3] ^ C[4] ^ C[7];
5405         NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
5406             D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5];
5407         NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
5408             D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6];
5409
5410         crc_res = 0;
5411         for (i = 0; i < 8; i++) {
5412                 crc_res |= (NewCRC[i] << i);
5413         }
5414
5415         return crc_res;
5416 }
5417
5418 uint32_t
5419 ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic)
5420 {
5421         int i;
5422         while (len--) {
5423                 crc ^= *p++;
5424                 for (i = 0; i < 8; i++)
5425                         crc = (crc >> 1) ^ ((crc & 1) ? magic : 0);
5426         }
5427         return crc;
5428 }