6d2bb815c47d061ae259fcb9057c73e61f8af9ce
[deb_dpdk.git] / drivers / net / bnx2x / ecore_sp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2007-2013 Broadcom Corporation.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9  * Copyright (c) 2015-2018 Cavium Inc.
10  * All rights reserved.
11  * www.cavium.com
12  */
13
14 #include "bnx2x.h"
15 #include "ecore_init.h"
16
17 /**** Exe Queue interfaces ****/
18
19 /**
20  * ecore_exe_queue_init - init the Exe Queue object
21  *
22  * @o:          pointer to the object
23  * @exe_len:    length
24  * @owner:      pointer to the owner
25  * @validate:   validate function pointer
26  * @optimize:   optimize function pointer
27  * @exec:       execute function pointer
28  * @get:        get function pointer
29  */
30 static void
31 ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
32                      struct ecore_exe_queue_obj *o,
33                      int exe_len,
34                      union ecore_qable_obj *owner,
35                      exe_q_validate validate,
36                      exe_q_remove remove,
37                      exe_q_optimize optimize, exe_q_execute exec, exe_q_get get)
38 {
39         ECORE_MEMSET(o, 0, sizeof(*o));
40
41         ECORE_LIST_INIT(&o->exe_queue);
42         ECORE_LIST_INIT(&o->pending_comp);
43
44         ECORE_SPIN_LOCK_INIT(&o->lock, sc);
45
46         o->exe_chunk_len = exe_len;
47         o->owner = owner;
48
49         /* Owner specific callbacks */
50         o->validate = validate;
51         o->remove = remove;
52         o->optimize = optimize;
53         o->execute = exec;
54         o->get = get;
55
56         ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d",
57                   exe_len);
58 }
59
60 static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
61                                       struct ecore_exeq_elem *elem)
62 {
63         ECORE_MSG(sc, "Deleting an exe_queue element");
64         ECORE_FREE(sc, elem, sizeof(*elem));
65 }
66
67 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
68 {
69         struct ecore_exeq_elem *elem;
70         int cnt = 0;
71
72         ECORE_SPIN_LOCK_BH(&o->lock);
73
74         ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
75                                   struct ecore_exeq_elem) cnt++;
76
77         ECORE_SPIN_UNLOCK_BH(&o->lock);
78
79         return cnt;
80 }
81
82 /**
83  * ecore_exe_queue_add - add a new element to the execution queue
84  *
85  * @sc:         driver handle
86  * @o:          queue
87  * @cmd:        new command to add
88  * @restore:    true - do not optimize the command
89  *
90  * If the element is optimized or is illegal, frees it.
91  */
92 static int ecore_exe_queue_add(struct bnx2x_softc *sc,
93                                struct ecore_exe_queue_obj *o,
94                                struct ecore_exeq_elem *elem, int restore)
95 {
96         int rc;
97
98         ECORE_SPIN_LOCK_BH(&o->lock);
99
100         if (!restore) {
101                 /* Try to cancel this element queue */
102                 rc = o->optimize(sc, o->owner, elem);
103                 if (rc)
104                         goto free_and_exit;
105
106                 /* Check if this request is ok */
107                 rc = o->validate(sc, o->owner, elem);
108                 if (rc) {
109                         ECORE_MSG(sc, "Preamble failed: %d", rc);
110                         goto free_and_exit;
111                 }
112         }
113
114         /* If so, add it to the execution queue */
115         ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
116
117         ECORE_SPIN_UNLOCK_BH(&o->lock);
118
119         return ECORE_SUCCESS;
120
121 free_and_exit:
122         ecore_exe_queue_free_elem(sc, elem);
123
124         ECORE_SPIN_UNLOCK_BH(&o->lock);
125
126         return rc;
127 }
128
129 static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj
130                                             *o)
131 {
132         struct ecore_exeq_elem *elem;
133
134         while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
135                 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
136                                               struct ecore_exeq_elem, link);
137
138                 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
139                 ecore_exe_queue_free_elem(sc, elem);
140         }
141 }
142
143 static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc,
144                                                  struct ecore_exe_queue_obj *o)
145 {
146         ECORE_SPIN_LOCK_BH(&o->lock);
147
148         __ecore_exe_queue_reset_pending(sc, o);
149
150         ECORE_SPIN_UNLOCK_BH(&o->lock);
151 }
152
153 /**
154  * ecore_exe_queue_step - execute one execution chunk atomically
155  *
156  * @sc:                 driver handle
157  * @o:                  queue
158  * @ramrod_flags:       flags
159  *
160  * (Should be called while holding the exe_queue->lock).
161  */
162 static int ecore_exe_queue_step(struct bnx2x_softc *sc,
163                                 struct ecore_exe_queue_obj *o,
164                                 unsigned long *ramrod_flags)
165 {
166         struct ecore_exeq_elem *elem, spacer;
167         int cur_len = 0, rc;
168
169         ECORE_MEMSET(&spacer, 0, sizeof(spacer));
170
171         /* Next step should not be performed until the current is finished,
172          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
173          * properly clear object internals without sending any command to the FW
174          * which also implies there won't be any completion to clear the
175          * 'pending' list.
176          */
177         if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
178                 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
179                         ECORE_MSG(sc,
180                                   "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
181                         __ecore_exe_queue_reset_pending(sc, o);
182                 } else {
183                         return ECORE_PENDING;
184                 }
185         }
186
187         /* Run through the pending commands list and create a next
188          * execution chunk.
189          */
190         while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
191                 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
192                                               struct ecore_exeq_elem, link);
193                 ECORE_DBG_BREAK_IF(!elem->cmd_len);
194
195                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
196                         cur_len += elem->cmd_len;
197                         /* Prevent from both lists being empty when moving an
198                          * element. This will allow the call of
199                          * ecore_exe_queue_empty() without locking.
200                          */
201                         ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
202                         mb();
203                         ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
204                         ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
205                         ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
206                 } else
207                         break;
208         }
209
210         /* Sanity check */
211         if (!cur_len)
212                 return ECORE_SUCCESS;
213
214         rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
215         if (rc < 0)
216                 /* In case of an error return the commands back to the queue
217                  *  and reset the pending_comp.
218                  */
219                 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
220         else if (!rc)
221                 /* If zero is returned, means there are no outstanding pending
222                  * completions and we may dismiss the pending list.
223                  */
224                 __ecore_exe_queue_reset_pending(sc, o);
225
226         return rc;
227 }
228
229 static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
230 {
231         int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
232
233         /* Don't reorder!!! */
234         mb();
235
236         return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
237 }
238
239 static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
240                                                           bnx2x_softc *sc
241                                                           __rte_unused)
242 {
243         ECORE_MSG(sc, "Allocating a new exe_queue element");
244         return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
245 }
246
247 /************************ raw_obj functions ***********************************/
248 static int ecore_raw_check_pending(struct ecore_raw_obj *o)
249 {
250         /*
251          * !! converts the value returned by ECORE_TEST_BIT such that it
252          * is guaranteed not to be truncated regardless of int definition.
253          *
254          * Note we cannot simply define the function's return value type
255          * to match the type returned by ECORE_TEST_BIT, as it varies by
256          * platform/implementation.
257          */
258
259         return ! !ECORE_TEST_BIT(o->state, o->pstate);
260 }
261
262 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
263 {
264         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
265         ECORE_CLEAR_BIT(o->state, o->pstate);
266         ECORE_SMP_MB_AFTER_CLEAR_BIT();
267 }
268
269 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
270 {
271         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
272         ECORE_SET_BIT(o->state, o->pstate);
273         ECORE_SMP_MB_AFTER_CLEAR_BIT();
274 }
275
276 /**
277  * ecore_state_wait - wait until the given bit(state) is cleared
278  *
279  * @sc:         device handle
280  * @state:      state which is to be cleared
281  * @state_p:    state buffer
282  *
283  */
284 static int ecore_state_wait(struct bnx2x_softc *sc, int state,
285                             unsigned long *pstate)
286 {
287         /* can take a while if any port is running */
288         int cnt = 5000;
289
290         if (CHIP_REV_IS_EMUL(sc))
291                 cnt *= 20;
292
293         ECORE_MSG(sc, "waiting for state to become %d", state);
294
295         ECORE_MIGHT_SLEEP();
296         while (cnt--) {
297                 bnx2x_intr_legacy(sc, 1);
298                 if (!ECORE_TEST_BIT(state, pstate)) {
299 #ifdef ECORE_STOP_ON_ERROR
300                         ECORE_MSG(sc, "exit  (cnt %d)", 5000 - cnt);
301 #endif
302                         return ECORE_SUCCESS;
303                 }
304
305                 ECORE_WAIT(sc, delay_us);
306
307                 if (sc->panic)
308                         return ECORE_IO;
309         }
310
311         /* timeout! */
312         PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
313 #ifdef ECORE_STOP_ON_ERROR
314         ecore_panic();
315 #endif
316
317         return ECORE_TIMEOUT;
318 }
319
320 static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw)
321 {
322         return ecore_state_wait(sc, raw->state, raw->pstate);
323 }
324
325 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
326 /* credit handling callbacks */
327 static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
328 {
329         struct ecore_credit_pool_obj *mp = o->macs_pool;
330
331         ECORE_DBG_BREAK_IF(!mp);
332
333         return mp->get_entry(mp, offset);
334 }
335
336 static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
337 {
338         struct ecore_credit_pool_obj *mp = o->macs_pool;
339
340         ECORE_DBG_BREAK_IF(!mp);
341
342         return mp->get(mp, 1);
343 }
344
345 static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
346 {
347         struct ecore_credit_pool_obj *mp = o->macs_pool;
348
349         return mp->put_entry(mp, offset);
350 }
351
352 static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
353 {
354         struct ecore_credit_pool_obj *mp = o->macs_pool;
355
356         return mp->put(mp, 1);
357 }
358
359 /**
360  * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
361  * head list.
362  *
363  * @sc:         device handle
364  * @o:          vlan_mac object
365  *
366  * @details: Non-blocking implementation; should be called under execution
367  *           queue lock.
368  */
369 static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
370                                             struct ecore_vlan_mac_obj *o)
371 {
372         if (o->head_reader) {
373                 ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy");
374                 return ECORE_BUSY;
375         }
376
377         ECORE_MSG(sc, "vlan_mac_lock writer - Taken");
378         return ECORE_SUCCESS;
379 }
380
381 /**
382  * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
383  * which wasn't able to run due to a taken lock on vlan mac head list.
384  *
385  * @sc:         device handle
386  * @o:          vlan_mac object
387  *
388  * @details Should be called under execution queue lock; notice it might release
389  *          and reclaim it during its run.
390  */
391 static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
392                                             struct ecore_vlan_mac_obj *o)
393 {
394         int rc;
395         unsigned long ramrod_flags = o->saved_ramrod_flags;
396
397         ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu",
398                   ramrod_flags);
399         o->head_exe_request = FALSE;
400         o->saved_ramrod_flags = 0;
401         rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
402         if (rc != ECORE_SUCCESS) {
403                 PMD_DRV_LOG(ERR, sc,
404                             "execution of pending commands failed with rc %d",
405                             rc);
406 #ifdef ECORE_STOP_ON_ERROR
407                 ecore_panic();
408 #endif
409         }
410 }
411
412 /**
413  * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
414  * called due to vlan mac head list lock being taken.
415  *
416  * @sc:                 device handle
417  * @o:                  vlan_mac object
418  * @ramrod_flags:       ramrod flags of missed execution
419  *
420  * @details Should be called under execution queue lock.
421  */
422 static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
423                                     struct ecore_vlan_mac_obj *o,
424                                     unsigned long ramrod_flags)
425 {
426         o->head_exe_request = TRUE;
427         o->saved_ramrod_flags = ramrod_flags;
428         ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu",
429                   ramrod_flags);
430 }
431
432 /**
433  * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
434  *
435  * @sc:                 device handle
436  * @o:                  vlan_mac object
437  *
438  * @details Should be called under execution queue lock. Notice if a pending
439  *          execution exists, it would perform it - possibly releasing and
440  *          reclaiming the execution queue lock.
441  */
442 static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
443                                             struct ecore_vlan_mac_obj *o)
444 {
445         /* It's possible a new pending execution was added since this writer
446          * executed. If so, execute again. [Ad infinitum]
447          */
448         while (o->head_exe_request) {
449                 ECORE_MSG(sc,
450                           "vlan_mac_lock - writer release encountered a pending request");
451                 __ecore_vlan_mac_h_exec_pending(sc, o);
452         }
453 }
454
455 /**
456  * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
457  *
458  * @sc:                 device handle
459  * @o:                  vlan_mac object
460  *
461  * @details Notice if a pending execution exists, it would perform it -
462  *          possibly releasing and reclaiming the execution queue lock.
463  */
464 void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
465                                    struct ecore_vlan_mac_obj *o)
466 {
467         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
468         __ecore_vlan_mac_h_write_unlock(sc, o);
469         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
470 }
471
472 /**
473  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
474  *
475  * @sc:                 device handle
476  * @o:                  vlan_mac object
477  *
478  * @details Should be called under the execution queue lock. May sleep. May
479  *          release and reclaim execution queue lock during its run.
480  */
481 static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
482                                         struct ecore_vlan_mac_obj *o)
483 {
484         /* If we got here, we're holding lock --> no WRITER exists */
485         o->head_reader++;
486         ECORE_MSG(sc,
487                   "vlan_mac_lock - locked reader - number %d", o->head_reader);
488
489         return ECORE_SUCCESS;
490 }
491
492 /**
493  * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
494  *
495  * @sc:                 device handle
496  * @o:                  vlan_mac object
497  *
498  * @details May sleep. Claims and releases execution queue lock during its run.
499  */
500 static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
501                                       struct ecore_vlan_mac_obj *o)
502 {
503         int rc;
504
505         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
506         rc = __ecore_vlan_mac_h_read_lock(sc, o);
507         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
508
509         return rc;
510 }
511
512 /**
513  * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
514  *
515  * @sc:                 device handle
516  * @o:                  vlan_mac object
517  *
518  * @details Should be called under execution queue lock. Notice if a pending
519  *          execution exists, it would be performed if this was the last
520  *          reader. possibly releasing and reclaiming the execution queue lock.
521  */
522 static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
523                                            struct ecore_vlan_mac_obj *o)
524 {
525         if (!o->head_reader) {
526                 PMD_DRV_LOG(ERR, sc,
527                             "Need to release vlan mac reader lock, but lock isn't taken");
528 #ifdef ECORE_STOP_ON_ERROR
529                 ecore_panic();
530 #endif
531         } else {
532                 o->head_reader--;
533                 ECORE_MSG(sc, "vlan_mac_lock - decreased readers to %d",
534                           o->head_reader);
535         }
536
537         /* It's possible a new pending execution was added, and that this reader
538          * was last - if so we need to execute the command.
539          */
540         if (!o->head_reader && o->head_exe_request) {
541                 ECORE_MSG(sc, "vlan_mac_lock - reader release encountered a pending request");
542
543                 /* Writer release will do the trick */
544                 __ecore_vlan_mac_h_write_unlock(sc, o);
545         }
546 }
547
548 /**
549  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
550  *
551  * @sc:                 device handle
552  * @o:                  vlan_mac object
553  *
554  * @details Notice if a pending execution exists, it would be performed if this
555  *          was the last reader. Claims and releases the execution queue lock
556  *          during its run.
557  */
558 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
559                                   struct ecore_vlan_mac_obj *o)
560 {
561         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
562         __ecore_vlan_mac_h_read_unlock(sc, o);
563         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
564 }
565
566 /**
567  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
568  *
569  * @sc:                 device handle
570  * @o:                  vlan_mac object
571  * @n:                  number of elements to get
572  * @base:               base address for element placement
573  * @stride:             stride between elements (in bytes)
574  */
575 static int ecore_get_n_elements(struct bnx2x_softc *sc,
576                                 struct ecore_vlan_mac_obj *o, int n,
577                                 uint8_t * base, uint8_t stride, uint8_t size)
578 {
579         struct ecore_vlan_mac_registry_elem *pos;
580         uint8_t *next = base;
581         int counter = 0, read_lock;
582
583         ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)");
584         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
585         if (read_lock != ECORE_SUCCESS)
586                 PMD_DRV_LOG(ERR, sc,
587                             "get_n_elements failed to get vlan mac reader lock; Access without lock");
588
589         /* traverse list */
590         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
591                                   struct ecore_vlan_mac_registry_elem) {
592                 if (counter < n) {
593                         ECORE_MEMCPY(next, &pos->u, size);
594                         counter++;
595                             ECORE_MSG
596                             (sc, "copied element number %d to address %p element was:",
597                              counter, next);
598                         next += stride + size;
599                 }
600         }
601
602         if (read_lock == ECORE_SUCCESS) {
603                 ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)");
604                 ecore_vlan_mac_h_read_unlock(sc, o);
605         }
606
607         return counter * ETH_ALEN;
608 }
609
610 /* check_add() callbacks */
611 static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
612                                struct ecore_vlan_mac_obj *o,
613                                union ecore_classification_ramrod_data *data)
614 {
615         struct ecore_vlan_mac_registry_elem *pos;
616
617         ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
618                   data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
619                   data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
620
621         if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
622                 return ECORE_INVAL;
623
624         /* Check if a requested MAC already exists */
625         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
626                                   struct ecore_vlan_mac_registry_elem)
627             if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
628                 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
629                 return ECORE_EXISTS;
630
631         return ECORE_SUCCESS;
632 }
633
634 /* check_del() callbacks */
635 static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc
636                                                                 *sc
637                                                                 __rte_unused,
638                                                                 struct
639                                                                 ecore_vlan_mac_obj
640                                                                 *o, union
641                                                                 ecore_classification_ramrod_data
642                                                                 *data)
643 {
644         struct ecore_vlan_mac_registry_elem *pos;
645
646         ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
647                   data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
648                   data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
649
650         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
651                                   struct ecore_vlan_mac_registry_elem)
652         if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
653             (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
654                 return pos;
655
656         return NULL;
657 }
658
659 /* check_move() callback */
660 static int ecore_check_move(struct bnx2x_softc *sc,
661                             struct ecore_vlan_mac_obj *src_o,
662                             struct ecore_vlan_mac_obj *dst_o,
663                             union ecore_classification_ramrod_data *data)
664 {
665         struct ecore_vlan_mac_registry_elem *pos;
666         int rc;
667
668         /* Check if we can delete the requested configuration from the first
669          * object.
670          */
671         pos = src_o->check_del(sc, src_o, data);
672
673         /*  check if configuration can be added */
674         rc = dst_o->check_add(sc, dst_o, data);
675
676         /* If this classification can not be added (is already set)
677          * or can't be deleted - return an error.
678          */
679         if (rc || !pos)
680                 return FALSE;
681
682         return TRUE;
683 }
684
685 static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc,
686                                        __rte_unused struct ecore_vlan_mac_obj
687                                        *src_o, __rte_unused struct ecore_vlan_mac_obj
688                                        *dst_o, __rte_unused union
689                                        ecore_classification_ramrod_data *data)
690 {
691         return FALSE;
692 }
693
694 static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
695                                              *o)
696 {
697         struct ecore_raw_obj *raw = &o->raw;
698         uint8_t rx_tx_flag = 0;
699
700         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
701             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
702                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
703
704         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
705             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
706                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
707
708         return rx_tx_flag;
709 }
710
711 static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
712                                  int add, unsigned char *dev_addr, int index)
713 {
714         uint32_t wb_data[2];
715         uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
716             NIG_REG_LLH0_FUNC_MEM;
717
718         if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
719                 return;
720
721         if (index > ECORE_LLH_CAM_MAX_PF_LINE)
722                 return;
723
724         ECORE_MSG(sc, "Going to %s LLH configuration at entry %d",
725                   (add ? "ADD" : "DELETE"), index);
726
727         if (add) {
728                 /* LLH_FUNC_MEM is a uint64_t WB register */
729                 reg_offset += 8 * index;
730
731                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
732                               (dev_addr[4] << 8) | dev_addr[5]);
733                 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
734
735                 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
736         }
737
738         REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
739                     NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add);
740 }
741
742 /**
743  * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
744  *
745  * @sc:         device handle
746  * @o:          queue for which we want to configure this rule
747  * @add:        if TRUE the command is an ADD command, DEL otherwise
748  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
749  * @hdr:        pointer to a header to setup
750  *
751  */
752 static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o,
753                                           int add, int opcode,
754                                           struct eth_classify_cmd_header
755                                           *hdr)
756 {
757         struct ecore_raw_obj *raw = &o->raw;
758
759         hdr->client_id = raw->cl_id;
760         hdr->func_id = raw->func_id;
761
762         /* Rx or/and Tx (internal switching) configuration ? */
763         hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o);
764
765         if (add)
766                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
767
768         hdr->cmd_general_data |=
769             (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
770 }
771
772 /**
773  * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
774  *
775  * @cid:        connection id
776  * @type:       ECORE_FILTER_XXX_PENDING
777  * @hdr:        pointer to header to setup
778  * @rule_cnt:
779  *
780  * currently we always configure one rule and echo field to contain a CID and an
781  * opcode type.
782  */
783 static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header
784                                             *hdr, int rule_cnt)
785 {
786         hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
787                                       (type << ECORE_SWCID_SHIFT));
788         hdr->rule_cnt = (uint8_t) rule_cnt;
789 }
790
791 /* hw_config() callbacks */
792 static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
793                                  struct ecore_vlan_mac_obj *o,
794                                  struct ecore_exeq_elem *elem, int rule_idx,
795                                  __rte_unused int cam_offset)
796 {
797         struct ecore_raw_obj *raw = &o->raw;
798         struct eth_classify_rules_ramrod_data *data =
799             (struct eth_classify_rules_ramrod_data *)(raw->rdata);
800         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
801         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
802         int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
803         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
804         uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
805
806         /* Set LLH CAM entry: currently only iSCSI and ETH macs are
807          * relevant. In addition, current implementation is tuned for a
808          * single ETH MAC.
809          *
810          * When multiple unicast ETH MACs PF configuration in switch
811          * independent mode is required (NetQ, multiple netdev MACs,
812          * etc.), consider better utilisation of 8 per function MAC
813          * entries in the LLH register. There is also
814          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
815          * total number of CAM entries to 16.
816          *
817          * Currently we won't configure NIG for MACs other than a primary ETH
818          * MAC and iSCSI L2 MAC.
819          *
820          * If this MAC is moving from one Queue to another, no need to change
821          * NIG configuration.
822          */
823         if (cmd != ECORE_VLAN_MAC_MOVE) {
824                 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
825                         ecore_set_mac_in_nig(sc, add, mac,
826                                              ECORE_LLH_CAM_ISCSI_ETH_LINE);
827                 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
828                         ecore_set_mac_in_nig(sc, add, mac,
829                                              ECORE_LLH_CAM_ETH_LINE);
830         }
831
832         /* Reset the ramrod data buffer for the first rule */
833         if (rule_idx == 0)
834                 ECORE_MEMSET(data, 0, sizeof(*data));
835
836         /* Setup a command header */
837         ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
838                                       &rule_entry->mac.header);
839
840         ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
841                   (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
842                   mac[4], mac[5], raw->cl_id);
843
844         /* Set a MAC itself */
845         ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
846                               &rule_entry->mac.mac_mid,
847                               &rule_entry->mac.mac_lsb, mac);
848         rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
849
850         /* MOVE: Add a rule that will add this MAC to the target Queue */
851         if (cmd == ECORE_VLAN_MAC_MOVE) {
852                 rule_entry++;
853                 rule_cnt++;
854
855                 /* Setup ramrod data */
856                 ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data.
857                                               vlan_mac.target_obj, TRUE,
858                                               CLASSIFY_RULE_OPCODE_MAC,
859                                               &rule_entry->mac.header);
860
861                 /* Set a MAC itself */
862                 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
863                                       &rule_entry->mac.mac_mid,
864                                       &rule_entry->mac.mac_lsb, mac);
865                 rule_entry->mac.inner_mac =
866                     elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
867         }
868
869         /* Set the ramrod data header */
870         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
871                                         rule_cnt);
872 }
873
874 /**
875  * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
876  *
877  * @sc:         device handle
878  * @o:          queue
879  * @type:
880  * @cam_offset: offset in cam memory
881  * @hdr:        pointer to a header to setup
882  *
883  * E1H
884  */
885 static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj
886                                              *o, int type, int cam_offset, struct mac_configuration_hdr
887                                              *hdr)
888 {
889         struct ecore_raw_obj *r = &o->raw;
890
891         hdr->length = 1;
892         hdr->offset = (uint8_t) cam_offset;
893         hdr->client_id = ECORE_CPU_TO_LE16(0xff);
894         hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
895                                       (type << ECORE_SWCID_SHIFT));
896 }
897
898 static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj
899                                              *o, int add, int opcode,
900                                              uint8_t * mac,
901                                              uint16_t vlan_id, struct
902                                              mac_configuration_entry
903                                              *cfg_entry)
904 {
905         struct ecore_raw_obj *r = &o->raw;
906         uint32_t cl_bit_vec = (1 << r->cl_id);
907
908         cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
909         cfg_entry->pf_id = r->func_id;
910         cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
911
912         if (add) {
913                 ECORE_SET_FLAG(cfg_entry->flags,
914                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
915                                T_ETH_MAC_COMMAND_SET);
916                 ECORE_SET_FLAG(cfg_entry->flags,
917                                MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
918                                opcode);
919
920                 /* Set a MAC in a ramrod data */
921                 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
922                                       &cfg_entry->middle_mac_addr,
923                                       &cfg_entry->lsb_mac_addr, mac);
924         } else
925                 ECORE_SET_FLAG(cfg_entry->flags,
926                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
927                                T_ETH_MAC_COMMAND_INVALIDATE);
928 }
929
930 static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
931                                          __rte_unused,
932                                          struct ecore_vlan_mac_obj *o,
933                                          int type, int cam_offset,
934                                          int add, uint8_t * mac,
935                                          uint16_t vlan_id, int opcode,
936                                          struct mac_configuration_cmd
937                                          *config)
938 {
939         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
940
941         ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr);
942         ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
943                                          cfg_entry);
944
945         ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
946                   (add ? "setting" : "clearing"),
947                   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
948                   o->raw.cl_id, cam_offset);
949 }
950
951 /**
952  * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
953  *
954  * @sc:         device handle
955  * @o:          ecore_vlan_mac_obj
956  * @elem:       ecore_exeq_elem
957  * @rule_idx:   rule_idx
958  * @cam_offset: cam_offset
959  */
960 static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc,
961                                   struct ecore_vlan_mac_obj *o,
962                                   struct ecore_exeq_elem *elem,
963                                   __rte_unused int rule_idx, int cam_offset)
964 {
965         struct ecore_raw_obj *raw = &o->raw;
966         struct mac_configuration_cmd *config =
967             (struct mac_configuration_cmd *)(raw->rdata);
968         /* 57711 do not support MOVE command,
969          * so it's either ADD or DEL
970          */
971         int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
972             TRUE : FALSE;
973
974         /* Reset the ramrod data buffer */
975         ECORE_MEMSET(config, 0, sizeof(*config));
976
977         ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
978                                      cam_offset, add,
979                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
980                                      ETH_VLAN_FILTER_ANY_VLAN, config);
981 }
982
983 /**
984  * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
985  *
986  * @sc:         device handle
987  * @p:          command parameters
988  * @ppos:       pointer to the cookie
989  *
990  * reconfigure next MAC/VLAN/VLAN-MAC element from the
991  * previously configured elements list.
992  *
993  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
994  * into an account
995  *
996  * pointer to the cookie  - that should be given back in the next call to make
997  * function handle the next element. If *ppos is set to NULL it will restart the
998  * iterator. If returned *ppos == NULL this means that the last element has been
999  * handled.
1000  *
1001  */
1002 static int ecore_vlan_mac_restore(struct bnx2x_softc *sc,
1003                                   struct ecore_vlan_mac_ramrod_params *p,
1004                                   struct ecore_vlan_mac_registry_elem **ppos)
1005 {
1006         struct ecore_vlan_mac_registry_elem *pos;
1007         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1008
1009         /* If list is empty - there is nothing to do here */
1010         if (ECORE_LIST_IS_EMPTY(&o->head)) {
1011                 *ppos = NULL;
1012                 return 0;
1013         }
1014
1015         /* make a step... */
1016         if (*ppos == NULL)
1017                 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct
1018                                                ecore_vlan_mac_registry_elem,
1019                                                link);
1020         else
1021                 *ppos = ECORE_LIST_NEXT(*ppos, link,
1022                                         struct ecore_vlan_mac_registry_elem);
1023
1024         pos = *ppos;
1025
1026         /* If it's the last step - return NULL */
1027         if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1028                 *ppos = NULL;
1029
1030         /* Prepare a 'user_req' */
1031         ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1032
1033         /* Set the command */
1034         p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1035
1036         /* Set vlan_mac_flags */
1037         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1038
1039         /* Set a restore bit */
1040         ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1041
1042         return ecore_config_vlan_mac(sc, p);
1043 }
1044
1045 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1046  * pointer to an element with a specific criteria and NULL if such an element
1047  * hasn't been found.
1048  */
1049 static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o,
1050                                                   struct ecore_exeq_elem *elem)
1051 {
1052         struct ecore_exeq_elem *pos;
1053         struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1054
1055         /* Check pending for execution commands */
1056         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1057                                   struct ecore_exeq_elem)
1058         if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1059                           sizeof(*data)) &&
1060             (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1061                 return pos;
1062
1063         return NULL;
1064 }
1065
1066 /**
1067  * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1068  *
1069  * @sc:         device handle
1070  * @qo:         ecore_qable_obj
1071  * @elem:       ecore_exeq_elem
1072  *
1073  * Checks that the requested configuration can be added. If yes and if
1074  * requested, consume CAM credit.
1075  *
1076  * The 'validate' is run after the 'optimize'.
1077  *
1078  */
1079 static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
1080                                        union ecore_qable_obj *qo,
1081                                        struct ecore_exeq_elem *elem)
1082 {
1083         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1084         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1085         int rc;
1086
1087         /* Check the registry */
1088         rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1089         if (rc) {
1090                 ECORE_MSG(sc,
1091                           "ADD command is not allowed considering current registry state.");
1092                 return rc;
1093         }
1094
1095         /* Check if there is a pending ADD command for this
1096          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1097          */
1098         if (exeq->get(exeq, elem)) {
1099                 ECORE_MSG(sc, "There is a pending ADD command already");
1100                 return ECORE_EXISTS;
1101         }
1102
1103         /* Consume the credit if not requested not to */
1104         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1105                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1106               o->get_credit(o)))
1107                 return ECORE_INVAL;
1108
1109         return ECORE_SUCCESS;
1110 }
1111
1112 /**
1113  * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1114  *
1115  * @sc:         device handle
1116  * @qo:         quable object to check
1117  * @elem:       element that needs to be deleted
1118  *
1119  * Checks that the requested configuration can be deleted. If yes and if
1120  * requested, returns a CAM credit.
1121  *
1122  * The 'validate' is run after the 'optimize'.
1123  */
1124 static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
1125                                        union ecore_qable_obj *qo,
1126                                        struct ecore_exeq_elem *elem)
1127 {
1128         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1129         struct ecore_vlan_mac_registry_elem *pos;
1130         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1131         struct ecore_exeq_elem query_elem;
1132
1133         /* If this classification can not be deleted (doesn't exist)
1134          * - return a ECORE_EXIST.
1135          */
1136         pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1137         if (!pos) {
1138                 ECORE_MSG(sc,
1139                           "DEL command is not allowed considering current registry state");
1140                 return ECORE_EXISTS;
1141         }
1142
1143         /* Check if there are pending DEL or MOVE commands for this
1144          * MAC/VLAN/VLAN-MAC. Return an error if so.
1145          */
1146         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1147
1148         /* Check for MOVE commands */
1149         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1150         if (exeq->get(exeq, &query_elem)) {
1151                 PMD_DRV_LOG(ERR, sc, "There is a pending MOVE command already");
1152                 return ECORE_INVAL;
1153         }
1154
1155         /* Check for DEL commands */
1156         if (exeq->get(exeq, elem)) {
1157                 ECORE_MSG(sc, "There is a pending DEL command already");
1158                 return ECORE_EXISTS;
1159         }
1160
1161         /* Return the credit to the credit pool if not requested not to */
1162         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1163                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1164               o->put_credit(o))) {
1165                 PMD_DRV_LOG(ERR, sc, "Failed to return a credit");
1166                 return ECORE_INVAL;
1167         }
1168
1169         return ECORE_SUCCESS;
1170 }
1171
1172 /**
1173  * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1174  *
1175  * @sc:         device handle
1176  * @qo:         quable object to check (source)
1177  * @elem:       element that needs to be moved
1178  *
1179  * Checks that the requested configuration can be moved. If yes and if
1180  * requested, returns a CAM credit.
1181  *
1182  * The 'validate' is run after the 'optimize'.
1183  */
1184 static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
1185                                         union ecore_qable_obj *qo,
1186                                         struct ecore_exeq_elem *elem)
1187 {
1188         struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1189         struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1190         struct ecore_exeq_elem query_elem;
1191         struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1192         struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1193
1194         /* Check if we can perform this operation based on the current registry
1195          * state.
1196          */
1197         if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1198                 ECORE_MSG(sc,
1199                           "MOVE command is not allowed considering current registry state");
1200                 return ECORE_INVAL;
1201         }
1202
1203         /* Check if there is an already pending DEL or MOVE command for the
1204          * source object or ADD command for a destination object. Return an
1205          * error if so.
1206          */
1207         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1208
1209         /* Check DEL on source */
1210         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1211         if (src_exeq->get(src_exeq, &query_elem)) {
1212                 PMD_DRV_LOG(ERR, sc,
1213                             "There is a pending DEL command on the source queue already");
1214                 return ECORE_INVAL;
1215         }
1216
1217         /* Check MOVE on source */
1218         if (src_exeq->get(src_exeq, elem)) {
1219                 ECORE_MSG(sc, "There is a pending MOVE command already");
1220                 return ECORE_EXISTS;
1221         }
1222
1223         /* Check ADD on destination */
1224         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1225         if (dest_exeq->get(dest_exeq, &query_elem)) {
1226                 PMD_DRV_LOG(ERR, sc,
1227                             "There is a pending ADD command on the destination queue already");
1228                 return ECORE_INVAL;
1229         }
1230
1231         /* Consume the credit if not requested not to */
1232         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1233                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1234               dest_o->get_credit(dest_o)))
1235                 return ECORE_INVAL;
1236
1237         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1238                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1239               src_o->put_credit(src_o))) {
1240                 /* return the credit taken from dest... */
1241                 dest_o->put_credit(dest_o);
1242                 return ECORE_INVAL;
1243         }
1244
1245         return ECORE_SUCCESS;
1246 }
1247
1248 static int ecore_validate_vlan_mac(struct bnx2x_softc *sc,
1249                                    union ecore_qable_obj *qo,
1250                                    struct ecore_exeq_elem *elem)
1251 {
1252         switch (elem->cmd_data.vlan_mac.cmd) {
1253         case ECORE_VLAN_MAC_ADD:
1254                 return ecore_validate_vlan_mac_add(sc, qo, elem);
1255         case ECORE_VLAN_MAC_DEL:
1256                 return ecore_validate_vlan_mac_del(sc, qo, elem);
1257         case ECORE_VLAN_MAC_MOVE:
1258                 return ecore_validate_vlan_mac_move(sc, qo, elem);
1259         default:
1260                 return ECORE_INVAL;
1261         }
1262 }
1263
1264 static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc,
1265                                  union ecore_qable_obj *qo,
1266                                  struct ecore_exeq_elem *elem)
1267 {
1268         int rc = 0;
1269
1270         /* If consumption wasn't required, nothing to do */
1271         if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1272                            &elem->cmd_data.vlan_mac.vlan_mac_flags))
1273                 return ECORE_SUCCESS;
1274
1275         switch (elem->cmd_data.vlan_mac.cmd) {
1276         case ECORE_VLAN_MAC_ADD:
1277         case ECORE_VLAN_MAC_MOVE:
1278                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1279                 break;
1280         case ECORE_VLAN_MAC_DEL:
1281                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1282                 break;
1283         default:
1284                 return ECORE_INVAL;
1285         }
1286
1287         if (rc != TRUE)
1288                 return ECORE_INVAL;
1289
1290         return ECORE_SUCCESS;
1291 }
1292
1293 /**
1294  * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1295  *
1296  * @sc:         device handle
1297  * @o:          ecore_vlan_mac_obj
1298  *
1299  */
1300 static int ecore_wait_vlan_mac(struct bnx2x_softc *sc,
1301                                struct ecore_vlan_mac_obj *o)
1302 {
1303         int cnt = 5000, rc;
1304         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1305         struct ecore_raw_obj *raw = &o->raw;
1306
1307         while (cnt--) {
1308                 /* Wait for the current command to complete */
1309                 rc = raw->wait_comp(sc, raw);
1310                 if (rc)
1311                         return rc;
1312
1313                 /* Wait until there are no pending commands */
1314                 if (!ecore_exe_queue_empty(exeq))
1315                         ECORE_WAIT(sc, 1000);
1316                 else
1317                         return ECORE_SUCCESS;
1318         }
1319
1320         return ECORE_TIMEOUT;
1321 }
1322
1323 static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
1324                                          struct ecore_vlan_mac_obj *o,
1325                                          unsigned long *ramrod_flags)
1326 {
1327         int rc = ECORE_SUCCESS;
1328
1329         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1330
1331         ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock");
1332         rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1333
1334         if (rc != ECORE_SUCCESS) {
1335                 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1336
1337                 /** Calling function should not diffrentiate between this case
1338                  *  and the case in which there is already a pending ramrod
1339                  */
1340                 rc = ECORE_PENDING;
1341         } else {
1342                 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1343         }
1344         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1345
1346         return rc;
1347 }
1348
1349 /**
1350  * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1351  *
1352  * @sc:         device handle
1353  * @o:          ecore_vlan_mac_obj
1354  * @cqe:
1355  * @cont:       if TRUE schedule next execution chunk
1356  *
1357  */
1358 static int ecore_complete_vlan_mac(struct bnx2x_softc *sc,
1359                                    struct ecore_vlan_mac_obj *o,
1360                                    union event_ring_elem *cqe,
1361                                    unsigned long *ramrod_flags)
1362 {
1363         struct ecore_raw_obj *r = &o->raw;
1364         int rc;
1365
1366         /* Reset pending list */
1367         ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1368
1369         /* Clear pending */
1370         r->clear_pending(r);
1371
1372         /* If ramrod failed this is most likely a SW bug */
1373         if (cqe->message.error)
1374                 return ECORE_INVAL;
1375
1376         /* Run the next bulk of pending commands if requested */
1377         if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1378                 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1379                 if (rc < 0)
1380                         return rc;
1381         }
1382
1383         /* If there is more work to do return PENDING */
1384         if (!ecore_exe_queue_empty(&o->exe_queue))
1385                 return ECORE_PENDING;
1386
1387         return ECORE_SUCCESS;
1388 }
1389
1390 /**
1391  * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1392  *
1393  * @sc:         device handle
1394  * @o:          ecore_qable_obj
1395  * @elem:       ecore_exeq_elem
1396  */
1397 static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
1398                                    union ecore_qable_obj *qo,
1399                                    struct ecore_exeq_elem *elem)
1400 {
1401         struct ecore_exeq_elem query, *pos;
1402         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1403         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1404
1405         ECORE_MEMCPY(&query, elem, sizeof(query));
1406
1407         switch (elem->cmd_data.vlan_mac.cmd) {
1408         case ECORE_VLAN_MAC_ADD:
1409                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1410                 break;
1411         case ECORE_VLAN_MAC_DEL:
1412                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1413                 break;
1414         default:
1415                 /* Don't handle anything other than ADD or DEL */
1416                 return 0;
1417         }
1418
1419         /* If we found the appropriate element - delete it */
1420         pos = exeq->get(exeq, &query);
1421         if (pos) {
1422
1423                 /* Return the credit of the optimized command */
1424                 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1425                                     &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1426                         if ((query.cmd_data.vlan_mac.cmd ==
1427                              ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1428                                 PMD_DRV_LOG(ERR, sc,
1429                                             "Failed to return the credit for the optimized ADD command");
1430                                 return ECORE_INVAL;
1431                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1432                                 PMD_DRV_LOG(ERR, sc,
1433                                             "Failed to recover the credit from the optimized DEL command");
1434                                 return ECORE_INVAL;
1435                         }
1436                 }
1437
1438                 ECORE_MSG(sc, "Optimizing %s command",
1439                           (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1440                           "ADD" : "DEL");
1441
1442                 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1443                 ecore_exe_queue_free_elem(sc, pos);
1444                 return 1;
1445         }
1446
1447         return 0;
1448 }
1449
1450 /**
1451  * ecore_vlan_mac_get_registry_elem - prepare a registry element
1452  *
1453  * @sc:   device handle
1454  * @o:
1455  * @elem:
1456  * @restore:
1457  * @re:
1458  *
1459  * prepare a registry element according to the current command request.
1460  */
1461 static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
1462                                             struct ecore_vlan_mac_obj *o,
1463                                             struct ecore_exeq_elem *elem,
1464                                             int restore, struct
1465                                             ecore_vlan_mac_registry_elem
1466                                             **re)
1467 {
1468         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1469         struct ecore_vlan_mac_registry_elem *reg_elem;
1470
1471         /* Allocate a new registry element if needed. */
1472         if (!restore &&
1473             ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1474                 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1475                 if (!reg_elem)
1476                         return ECORE_NOMEM;
1477
1478                 /* Get a new CAM offset */
1479                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1480                         /* This shall never happen, because we have checked the
1481                          * CAM availability in the 'validate'.
1482                          */
1483                         ECORE_DBG_BREAK_IF(1);
1484                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1485                         return ECORE_INVAL;
1486                 }
1487
1488                 ECORE_MSG(sc, "Got cam offset %d", reg_elem->cam_offset);
1489
1490                 /* Set a VLAN-MAC data */
1491                 ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1492                              sizeof(reg_elem->u));
1493
1494                 /* Copy the flags (needed for DEL and RESTORE flows) */
1495                 reg_elem->vlan_mac_flags =
1496                     elem->cmd_data.vlan_mac.vlan_mac_flags;
1497         } else                  /* DEL, RESTORE */
1498                 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1499
1500         *re = reg_elem;
1501         return ECORE_SUCCESS;
1502 }
1503
1504 /**
1505  * ecore_execute_vlan_mac - execute vlan mac command
1506  *
1507  * @sc:                 device handle
1508  * @qo:
1509  * @exe_chunk:
1510  * @ramrod_flags:
1511  *
1512  * go and send a ramrod!
1513  */
1514 static int ecore_execute_vlan_mac(struct bnx2x_softc *sc,
1515                                   union ecore_qable_obj *qo,
1516                                   ecore_list_t * exe_chunk,
1517                                   unsigned long *ramrod_flags)
1518 {
1519         struct ecore_exeq_elem *elem;
1520         struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1521         struct ecore_raw_obj *r = &o->raw;
1522         int rc, idx = 0;
1523         int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1524         int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1525         struct ecore_vlan_mac_registry_elem *reg_elem;
1526         enum ecore_vlan_mac_cmd cmd;
1527
1528         /* If DRIVER_ONLY execution is requested, cleanup a registry
1529          * and exit. Otherwise send a ramrod to FW.
1530          */
1531         if (!drv_only) {
1532
1533                 /* Set pending */
1534                 r->set_pending(r);
1535
1536                 /* Fill the ramrod data */
1537                 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1538                                           struct ecore_exeq_elem) {
1539                         cmd = elem->cmd_data.vlan_mac.cmd;
1540                         /* We will add to the target object in MOVE command, so
1541                          * change the object for a CAM search.
1542                          */
1543                         if (cmd == ECORE_VLAN_MAC_MOVE)
1544                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1545                         else
1546                                 cam_obj = o;
1547
1548                         rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1549                                                               elem, restore,
1550                                                               &reg_elem);
1551                         if (rc)
1552                                 goto error_exit;
1553
1554                         ECORE_DBG_BREAK_IF(!reg_elem);
1555
1556                         /* Push a new entry into the registry */
1557                         if (!restore &&
1558                             ((cmd == ECORE_VLAN_MAC_ADD) ||
1559                              (cmd == ECORE_VLAN_MAC_MOVE)))
1560                                 ECORE_LIST_PUSH_HEAD(&reg_elem->link,
1561                                                      &cam_obj->head);
1562
1563                         /* Configure a single command in a ramrod data buffer */
1564                         o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset);
1565
1566                         /* MOVE command consumes 2 entries in the ramrod data */
1567                         if (cmd == ECORE_VLAN_MAC_MOVE)
1568                                 idx += 2;
1569                         else
1570                                 idx++;
1571                 }
1572
1573                 /*
1574                  *  No need for an explicit memory barrier here as long we would
1575                  *  need to ensure the ordering of writing to the SPQ element
1576                  *  and updating of the SPQ producer which involves a memory
1577                  *  read and we will have to put a full memory barrier there
1578                  *  (inside ecore_sp_post()).
1579                  */
1580
1581                 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1582                                    r->rdata_mapping, ETH_CONNECTION_TYPE);
1583                 if (rc)
1584                         goto error_exit;
1585         }
1586
1587         /* Now, when we are done with the ramrod - clean up the registry */
1588         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1589                 cmd = elem->cmd_data.vlan_mac.cmd;
1590                 if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) {
1591                         reg_elem = o->check_del(sc, o,
1592                                                 &elem->cmd_data.vlan_mac.u);
1593
1594                         ECORE_DBG_BREAK_IF(!reg_elem);
1595
1596                         o->put_cam_offset(o, reg_elem->cam_offset);
1597                         ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
1598                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1599                 }
1600         }
1601
1602         if (!drv_only)
1603                 return ECORE_PENDING;
1604         else
1605                 return ECORE_SUCCESS;
1606
1607 error_exit:
1608         r->clear_pending(r);
1609
1610         /* Cleanup a registry in case of a failure */
1611         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1612                 cmd = elem->cmd_data.vlan_mac.cmd;
1613
1614                 if (cmd == ECORE_VLAN_MAC_MOVE)
1615                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1616                 else
1617                         cam_obj = o;
1618
1619                 /* Delete all newly added above entries */
1620                 if (!restore &&
1621                     ((cmd == ECORE_VLAN_MAC_ADD) ||
1622                      (cmd == ECORE_VLAN_MAC_MOVE))) {
1623                         reg_elem = o->check_del(sc, cam_obj,
1624                                                 &elem->cmd_data.vlan_mac.u);
1625                         if (reg_elem) {
1626                                 ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
1627                                                         &cam_obj->head);
1628                                 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1629                         }
1630                 }
1631         }
1632
1633         return rc;
1634 }
1635
1636 static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct
1637                                        ecore_vlan_mac_ramrod_params *p)
1638 {
1639         struct ecore_exeq_elem *elem;
1640         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1641         int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1642
1643         /* Allocate the execution queue element */
1644         elem = ecore_exe_queue_alloc_elem(sc);
1645         if (!elem)
1646                 return ECORE_NOMEM;
1647
1648         /* Set the command 'length' */
1649         switch (p->user_req.cmd) {
1650         case ECORE_VLAN_MAC_MOVE:
1651                 elem->cmd_len = 2;
1652                 break;
1653         default:
1654                 elem->cmd_len = 1;
1655         }
1656
1657         /* Fill the object specific info */
1658         ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req,
1659                      sizeof(p->user_req));
1660
1661         /* Try to add a new command to the pending list */
1662         return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1663 }
1664
1665 /**
1666  * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1667  *
1668  * @sc:   device handle
1669  * @p:
1670  *
1671  */
1672 int ecore_config_vlan_mac(struct bnx2x_softc *sc,
1673                           struct ecore_vlan_mac_ramrod_params *p)
1674 {
1675         int rc = ECORE_SUCCESS;
1676         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1677         unsigned long *ramrod_flags = &p->ramrod_flags;
1678         int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
1679         struct ecore_raw_obj *raw = &o->raw;
1680
1681         /*
1682          * Add new elements to the execution list for commands that require it.
1683          */
1684         if (!cont) {
1685                 rc = ecore_vlan_mac_push_new_cmd(sc, p);
1686                 if (rc)
1687                         return rc;
1688         }
1689
1690         /* If nothing will be executed further in this iteration we want to
1691          * return PENDING if there are pending commands
1692          */
1693         if (!ecore_exe_queue_empty(&o->exe_queue))
1694                 rc = ECORE_PENDING;
1695
1696         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1697                 ECORE_MSG(sc,
1698                           "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
1699                 raw->clear_pending(raw);
1700         }
1701
1702         /* Execute commands if required */
1703         if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
1704             ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
1705                 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
1706                                                    &p->ramrod_flags);
1707                 if (rc < 0)
1708                         return rc;
1709         }
1710
1711         /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1712          * then user want to wait until the last command is done.
1713          */
1714         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1715                 /* Wait maximum for the current exe_queue length iterations plus
1716                  * one (for the current pending command).
1717                  */
1718                 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
1719
1720                 while (!ecore_exe_queue_empty(&o->exe_queue) &&
1721                        max_iterations--) {
1722
1723                         /* Wait for the current command to complete */
1724                         rc = raw->wait_comp(sc, raw);
1725                         if (rc)
1726                                 return rc;
1727
1728                         /* Make a next step */
1729                         rc = __ecore_vlan_mac_execute_step(sc,
1730                                                            p->vlan_mac_obj,
1731                                                            &p->ramrod_flags);
1732                         if (rc < 0)
1733                                 return rc;
1734                 }
1735
1736                 return ECORE_SUCCESS;
1737         }
1738
1739         return rc;
1740 }
1741
1742 /**
1743  * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1744  *
1745  * @sc:                 device handle
1746  * @o:
1747  * @vlan_mac_flags:
1748  * @ramrod_flags:       execution flags to be used for this deletion
1749  *
1750  * if the last operation has completed successfully and there are no
1751  * more elements left, positive value if the last operation has completed
1752  * successfully and there are more previously configured elements, negative
1753  * value is current operation has failed.
1754  */
1755 static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
1756                                   struct ecore_vlan_mac_obj *o,
1757                                   unsigned long *vlan_mac_flags,
1758                                   unsigned long *ramrod_flags)
1759 {
1760         struct ecore_vlan_mac_registry_elem *pos = NULL;
1761         int rc = 0, read_lock;
1762         struct ecore_vlan_mac_ramrod_params p;
1763         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1764         struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
1765
1766         /* Clear pending commands first */
1767
1768         ECORE_SPIN_LOCK_BH(&exeq->lock);
1769
1770         ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
1771                                        &exeq->exe_queue, link,
1772                                        struct ecore_exeq_elem) {
1773                 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1774                     *vlan_mac_flags) {
1775                         rc = exeq->remove(sc, exeq->owner, exeq_pos);
1776                         if (rc) {
1777                                 PMD_DRV_LOG(ERR, sc, "Failed to remove command");
1778                                 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1779                                 return rc;
1780                         }
1781                         ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
1782                                                 &exeq->exe_queue);
1783                         ecore_exe_queue_free_elem(sc, exeq_pos);
1784                 }
1785         }
1786
1787         ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1788
1789         /* Prepare a command request */
1790         ECORE_MEMSET(&p, 0, sizeof(p));
1791         p.vlan_mac_obj = o;
1792         p.ramrod_flags = *ramrod_flags;
1793         p.user_req.cmd = ECORE_VLAN_MAC_DEL;
1794
1795         /* Add all but the last VLAN-MAC to the execution queue without actually
1796          * execution anything.
1797          */
1798         ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
1799         ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
1800         ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1801
1802         ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)");
1803         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
1804         if (read_lock != ECORE_SUCCESS)
1805                 return read_lock;
1806
1807         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
1808                                   struct ecore_vlan_mac_registry_elem) {
1809                 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1810                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1811                         ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
1812                         rc = ecore_config_vlan_mac(sc, &p);
1813                         if (rc < 0) {
1814                                 PMD_DRV_LOG(ERR, sc,
1815                                             "Failed to add a new DEL command");
1816                                 ecore_vlan_mac_h_read_unlock(sc, o);
1817                                 return rc;
1818                         }
1819                 }
1820         }
1821
1822         ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
1823         ecore_vlan_mac_h_read_unlock(sc, o);
1824
1825         p.ramrod_flags = *ramrod_flags;
1826         ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1827
1828         return ecore_config_vlan_mac(sc, &p);
1829 }
1830
1831 static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
1832                                uint32_t cid, uint8_t func_id,
1833                                void *rdata,
1834                                ecore_dma_addr_t rdata_mapping, int state,
1835                                unsigned long *pstate, ecore_obj_type type)
1836 {
1837         raw->func_id = func_id;
1838         raw->cid = cid;
1839         raw->cl_id = cl_id;
1840         raw->rdata = rdata;
1841         raw->rdata_mapping = rdata_mapping;
1842         raw->state = state;
1843         raw->pstate = pstate;
1844         raw->obj_type = type;
1845         raw->check_pending = ecore_raw_check_pending;
1846         raw->clear_pending = ecore_raw_clear_pending;
1847         raw->set_pending = ecore_raw_set_pending;
1848         raw->wait_comp = ecore_raw_wait;
1849 }
1850
1851 static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
1852                                        uint8_t cl_id, uint32_t cid,
1853                                        uint8_t func_id, void *rdata,
1854                                        ecore_dma_addr_t rdata_mapping,
1855                                        int state, unsigned long *pstate,
1856                                        ecore_obj_type type,
1857                                        struct ecore_credit_pool_obj
1858                                        *macs_pool, struct ecore_credit_pool_obj
1859                                        *vlans_pool)
1860 {
1861         ECORE_LIST_INIT(&o->head);
1862         o->head_reader = 0;
1863         o->head_exe_request = FALSE;
1864         o->saved_ramrod_flags = 0;
1865
1866         o->macs_pool = macs_pool;
1867         o->vlans_pool = vlans_pool;
1868
1869         o->delete_all = ecore_vlan_mac_del_all;
1870         o->restore = ecore_vlan_mac_restore;
1871         o->complete = ecore_complete_vlan_mac;
1872         o->wait = ecore_wait_vlan_mac;
1873
1874         ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1875                            state, pstate, type);
1876 }
1877
1878 void ecore_init_mac_obj(struct bnx2x_softc *sc,
1879                         struct ecore_vlan_mac_obj *mac_obj,
1880                         uint8_t cl_id, uint32_t cid, uint8_t func_id,
1881                         void *rdata, ecore_dma_addr_t rdata_mapping, int state,
1882                         unsigned long *pstate, ecore_obj_type type,
1883                         struct ecore_credit_pool_obj *macs_pool)
1884 {
1885         union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
1886
1887         ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1888                                    rdata_mapping, state, pstate, type,
1889                                    macs_pool, NULL);
1890
1891         /* CAM credit pool handling */
1892         mac_obj->get_credit = ecore_get_credit_mac;
1893         mac_obj->put_credit = ecore_put_credit_mac;
1894         mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
1895         mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
1896
1897         if (CHIP_IS_E1x(sc)) {
1898                 mac_obj->set_one_rule = ecore_set_one_mac_e1x;
1899                 mac_obj->check_del = ecore_check_mac_del;
1900                 mac_obj->check_add = ecore_check_mac_add;
1901                 mac_obj->check_move = ecore_check_move_always_err;
1902                 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1903
1904                 /* Exe Queue */
1905                 ecore_exe_queue_init(sc,
1906                                      &mac_obj->exe_queue, 1, qable_obj,
1907                                      ecore_validate_vlan_mac,
1908                                      ecore_remove_vlan_mac,
1909                                      ecore_optimize_vlan_mac,
1910                                      ecore_execute_vlan_mac,
1911                                      ecore_exeq_get_mac);
1912         } else {
1913                 mac_obj->set_one_rule = ecore_set_one_mac_e2;
1914                 mac_obj->check_del = ecore_check_mac_del;
1915                 mac_obj->check_add = ecore_check_mac_add;
1916                 mac_obj->check_move = ecore_check_move;
1917                 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1918                 mac_obj->get_n_elements = ecore_get_n_elements;
1919
1920                 /* Exe Queue */
1921                 ecore_exe_queue_init(sc,
1922                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1923                                      qable_obj, ecore_validate_vlan_mac,
1924                                      ecore_remove_vlan_mac,
1925                                      ecore_optimize_vlan_mac,
1926                                      ecore_execute_vlan_mac,
1927                                      ecore_exeq_get_mac);
1928         }
1929 }
1930
1931 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1932 static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct
1933                                        tstorm_eth_mac_filter_config
1934                                        *mac_filters, uint16_t pf_id)
1935 {
1936         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1937
1938         uint32_t addr = BAR_TSTRORM_INTMEM +
1939             TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
1940
1941         ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters);
1942 }
1943
1944 static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
1945                                  struct ecore_rx_mode_ramrod_params *p)
1946 {
1947         /* update the sc MAC filter structure */
1948         uint32_t mask = (1 << p->cl_id);
1949
1950         struct tstorm_eth_mac_filter_config *mac_filters =
1951             (struct tstorm_eth_mac_filter_config *)p->rdata;
1952
1953         /* initial setting is drop-all */
1954         uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
1955         uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
1956         uint8_t unmatched_unicast = 0;
1957
1958         /* In e1x there we only take into account rx accept flag since tx switching
1959          * isn't enabled. */
1960         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
1961                 /* accept matched ucast */
1962                 drop_all_ucast = 0;
1963
1964         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
1965                 /* accept matched mcast */
1966                 drop_all_mcast = 0;
1967
1968         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
1969                 /* accept all mcast */
1970                 drop_all_ucast = 0;
1971                 accp_all_ucast = 1;
1972         }
1973         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
1974                 /* accept all mcast */
1975                 drop_all_mcast = 0;
1976                 accp_all_mcast = 1;
1977         }
1978         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
1979                 /* accept (all) bcast */
1980                 accp_all_bcast = 1;
1981         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
1982                 /* accept unmatched unicasts */
1983                 unmatched_unicast = 1;
1984
1985         mac_filters->ucast_drop_all = drop_all_ucast ?
1986             mac_filters->ucast_drop_all | mask :
1987             mac_filters->ucast_drop_all & ~mask;
1988
1989         mac_filters->mcast_drop_all = drop_all_mcast ?
1990             mac_filters->mcast_drop_all | mask :
1991             mac_filters->mcast_drop_all & ~mask;
1992
1993         mac_filters->ucast_accept_all = accp_all_ucast ?
1994             mac_filters->ucast_accept_all | mask :
1995             mac_filters->ucast_accept_all & ~mask;
1996
1997         mac_filters->mcast_accept_all = accp_all_mcast ?
1998             mac_filters->mcast_accept_all | mask :
1999             mac_filters->mcast_accept_all & ~mask;
2000
2001         mac_filters->bcast_accept_all = accp_all_bcast ?
2002             mac_filters->bcast_accept_all | mask :
2003             mac_filters->bcast_accept_all & ~mask;
2004
2005         mac_filters->unmatched_unicast = unmatched_unicast ?
2006             mac_filters->unmatched_unicast | mask :
2007             mac_filters->unmatched_unicast & ~mask;
2008
2009         ECORE_MSG(sc, "drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
2010                   "accp_mcast 0x%xaccp_bcast 0x%x",
2011                   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2012                   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2013                   mac_filters->bcast_accept_all);
2014
2015         /* write the MAC filter structure */
2016         __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2017
2018         /* The operation is completed */
2019         ECORE_CLEAR_BIT(p->state, p->pstate);
2020         ECORE_SMP_MB_AFTER_CLEAR_BIT();
2021
2022         return ECORE_SUCCESS;
2023 }
2024
2025 /* Setup ramrod data */
2026 static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header
2027                                            *hdr, uint8_t rule_cnt)
2028 {
2029         hdr->echo = ECORE_CPU_TO_LE32(cid);
2030         hdr->rule_cnt = rule_cnt;
2031 }
2032
2033 static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd
2034                                            *cmd, int clear_accept_all)
2035 {
2036         uint16_t state;
2037
2038         /* start with 'drop-all' */
2039         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2040             ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2041
2042         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2043                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2044
2045         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2046                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2047
2048         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2049                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2050                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2051         }
2052
2053         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2054                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2055                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2056         }
2057         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2058                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2059
2060         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2061                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2062                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2063         }
2064         if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2065                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2066
2067         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2068         if (clear_accept_all) {
2069                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2070                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2071                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2072                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2073         }
2074
2075         cmd->state = ECORE_CPU_TO_LE16(state);
2076 }
2077
2078 static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
2079                                 struct ecore_rx_mode_ramrod_params *p)
2080 {
2081         struct eth_filter_rules_ramrod_data *data = p->rdata;
2082         int rc;
2083         uint8_t rule_idx = 0;
2084
2085         /* Reset the ramrod data buffer */
2086         ECORE_MEMSET(data, 0, sizeof(*data));
2087
2088         /* Setup ramrod data */
2089
2090         /* Tx (internal switching) */
2091         if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2092                 data->rules[rule_idx].client_id = p->cl_id;
2093                 data->rules[rule_idx].func_id = p->func_id;
2094
2095                 data->rules[rule_idx].cmd_general_data =
2096                     ETH_FILTER_RULES_CMD_TX_CMD;
2097
2098                 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2099                                                &(data->rules[rule_idx++]),
2100                                                FALSE);
2101         }
2102
2103         /* Rx */
2104         if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2105                 data->rules[rule_idx].client_id = p->cl_id;
2106                 data->rules[rule_idx].func_id = p->func_id;
2107
2108                 data->rules[rule_idx].cmd_general_data =
2109                     ETH_FILTER_RULES_CMD_RX_CMD;
2110
2111                 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2112                                                &(data->rules[rule_idx++]),
2113                                                FALSE);
2114         }
2115
2116         /* If FCoE Queue configuration has been requested configure the Rx and
2117          * internal switching modes for this queue in separate rules.
2118          *
2119          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2120          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2121          */
2122         if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2123                 /*  Tx (internal switching) */
2124                 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2125                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2126                         data->rules[rule_idx].func_id = p->func_id;
2127
2128                         data->rules[rule_idx].cmd_general_data =
2129                             ETH_FILTER_RULES_CMD_TX_CMD;
2130
2131                         ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2132                                                        &(data->rules
2133                                                          [rule_idx++]), TRUE);
2134                 }
2135
2136                 /* Rx */
2137                 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2138                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2139                         data->rules[rule_idx].func_id = p->func_id;
2140
2141                         data->rules[rule_idx].cmd_general_data =
2142                             ETH_FILTER_RULES_CMD_RX_CMD;
2143
2144                         ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2145                                                        &(data->rules
2146                                                          [rule_idx++]), TRUE);
2147                 }
2148         }
2149
2150         /* Set the ramrod header (most importantly - number of rules to
2151          * configure).
2152          */
2153         ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2154
2155             ECORE_MSG
2156             (sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
2157              data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
2158
2159         /* No need for an explicit memory barrier here as long we would
2160          * need to ensure the ordering of writing to the SPQ element
2161          * and updating of the SPQ producer which involves a memory
2162          * read and we will have to put a full memory barrier there
2163          * (inside ecore_sp_post()).
2164          */
2165
2166         /* Send a ramrod */
2167         rc = ecore_sp_post(sc,
2168                            RAMROD_CMD_ID_ETH_FILTER_RULES,
2169                            p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE);
2170         if (rc)
2171                 return rc;
2172
2173         /* Ramrod completion is pending */
2174         return ECORE_PENDING;
2175 }
2176
2177 static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc,
2178                                       struct ecore_rx_mode_ramrod_params *p)
2179 {
2180         return ecore_state_wait(sc, p->state, p->pstate);
2181 }
2182
2183 static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc,
2184                                     __rte_unused struct
2185                                     ecore_rx_mode_ramrod_params *p)
2186 {
2187         /* Do nothing */
2188         return ECORE_SUCCESS;
2189 }
2190
2191 int ecore_config_rx_mode(struct bnx2x_softc *sc,
2192                          struct ecore_rx_mode_ramrod_params *p)
2193 {
2194         int rc;
2195
2196         /* Configure the new classification in the chip */
2197         if (p->rx_mode_obj->config_rx_mode) {
2198                 rc = p->rx_mode_obj->config_rx_mode(sc, p);
2199                 if (rc < 0)
2200                         return rc;
2201
2202                 /* Wait for a ramrod completion if was requested */
2203                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2204                         rc = p->rx_mode_obj->wait_comp(sc, p);
2205                         if (rc)
2206                                 return rc;
2207                 }
2208         } else {
2209                 ECORE_MSG(sc, "ERROR: config_rx_mode is NULL");
2210                 return -1;
2211         }
2212
2213         return rc;
2214 }
2215
2216 void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o)
2217 {
2218         if (CHIP_IS_E1x(sc)) {
2219                 o->wait_comp = ecore_empty_rx_mode_wait;
2220                 o->config_rx_mode = ecore_set_rx_mode_e1x;
2221         } else {
2222                 o->wait_comp = ecore_wait_rx_mode_comp_e2;
2223                 o->config_rx_mode = ecore_set_rx_mode_e2;
2224         }
2225 }
2226
2227 /********************* Multicast verbs: SET, CLEAR ****************************/
2228 static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac)
2229 {
2230         return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2231 }
2232
2233 struct ecore_mcast_mac_elem {
2234         ecore_list_entry_t link;
2235         uint8_t mac[ETH_ALEN];
2236         uint8_t pad[2];         /* For a natural alignment of the following buffer */
2237 };
2238
2239 struct ecore_pending_mcast_cmd {
2240         ecore_list_entry_t link;
2241         int type;               /* ECORE_MCAST_CMD_X */
2242         union {
2243                 ecore_list_t macs_head;
2244                 uint32_t macs_num;      /* Needed for DEL command */
2245                 int next_bin;   /* Needed for RESTORE flow with aprox match */
2246         } data;
2247
2248         int done;               /* set to TRUE, when the command has been handled,
2249                                  * practically used in 57712 handling only, where one pending
2250                                  * command may be handled in a few operations. As long as for
2251                                  * other chips every operation handling is completed in a
2252                                  * single ramrod, there is no need to utilize this field.
2253                                  */
2254 };
2255
2256 static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o)
2257 {
2258         if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2259             o->raw.wait_comp(sc, &o->raw))
2260                 return ECORE_TIMEOUT;
2261
2262         return ECORE_SUCCESS;
2263 }
2264
2265 static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
2266                                    struct ecore_mcast_obj *o,
2267                                    struct ecore_mcast_ramrod_params *p,
2268                                    enum ecore_mcast_cmd cmd)
2269 {
2270         int total_sz;
2271         struct ecore_pending_mcast_cmd *new_cmd;
2272         struct ecore_mcast_mac_elem *cur_mac = NULL;
2273         struct ecore_mcast_list_elem *pos;
2274         int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2275                              p->mcast_list_len : 0);
2276
2277         /* If the command is empty ("handle pending commands only"), break */
2278         if (!p->mcast_list_len)
2279                 return ECORE_SUCCESS;
2280
2281         total_sz = sizeof(*new_cmd) +
2282             macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2283
2284         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2285         new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2286
2287         if (!new_cmd)
2288                 return ECORE_NOMEM;
2289
2290         ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d",
2291                   cmd, macs_list_len);
2292
2293         ECORE_LIST_INIT(&new_cmd->data.macs_head);
2294
2295         new_cmd->type = cmd;
2296         new_cmd->done = FALSE;
2297
2298         switch (cmd) {
2299         case ECORE_MCAST_CMD_ADD:
2300                 cur_mac = (struct ecore_mcast_mac_elem *)
2301                     ((uint8_t *) new_cmd + sizeof(*new_cmd));
2302
2303                 /* Push the MACs of the current command into the pending command
2304                  * MACs list: FIFO
2305                  */
2306                 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2307                                           struct ecore_mcast_list_elem) {
2308                         ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2309                         ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2310                                              &new_cmd->data.macs_head);
2311                         cur_mac++;
2312                 }
2313
2314                 break;
2315
2316         case ECORE_MCAST_CMD_DEL:
2317                 new_cmd->data.macs_num = p->mcast_list_len;
2318                 break;
2319
2320         case ECORE_MCAST_CMD_RESTORE:
2321                 new_cmd->data.next_bin = 0;
2322                 break;
2323
2324         default:
2325                 ECORE_FREE(sc, new_cmd, total_sz);
2326                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2327                 return ECORE_INVAL;
2328         }
2329
2330         /* Push the new pending command to the tail of the pending list: FIFO */
2331         ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2332
2333         o->set_sched(o);
2334
2335         return ECORE_PENDING;
2336 }
2337
2338 /**
2339  * ecore_mcast_get_next_bin - get the next set bin (index)
2340  *
2341  * @o:
2342  * @last:       index to start looking from (including)
2343  *
2344  * returns the next found (set) bin or a negative value if none is found.
2345  */
2346 static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2347 {
2348         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2349
2350         for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2351                 if (o->registry.aprox_match.vec[i])
2352                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2353                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2354                                 if (BIT_VEC64_TEST_BIT
2355                                     (o->registry.aprox_match.vec, cur_bit)) {
2356                                         return cur_bit;
2357                                 }
2358                         }
2359                 inner_start = 0;
2360         }
2361
2362         /* None found */
2363         return -1;
2364 }
2365
2366 /**
2367  * ecore_mcast_clear_first_bin - find the first set bin and clear it
2368  *
2369  * @o:
2370  *
2371  * returns the index of the found bin or -1 if none is found
2372  */
2373 static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2374 {
2375         int cur_bit = ecore_mcast_get_next_bin(o, 0);
2376
2377         if (cur_bit >= 0)
2378                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2379
2380         return cur_bit;
2381 }
2382
2383 static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2384 {
2385         struct ecore_raw_obj *raw = &o->raw;
2386         uint8_t rx_tx_flag = 0;
2387
2388         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2389             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2390                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2391
2392         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2393             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2394                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2395
2396         return rx_tx_flag;
2397 }
2398
2399 static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
2400                                         struct ecore_mcast_obj *o, int idx,
2401                                         union ecore_mcast_config_data *cfg_data,
2402                                         enum ecore_mcast_cmd cmd)
2403 {
2404         struct ecore_raw_obj *r = &o->raw;
2405         struct eth_multicast_rules_ramrod_data *data =
2406             (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2407         uint8_t func_id = r->func_id;
2408         uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2409         int bin;
2410
2411         if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2412                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2413
2414         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2415
2416         /* Get a bin and update a bins' vector */
2417         switch (cmd) {
2418         case ECORE_MCAST_CMD_ADD:
2419                 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2420                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2421                 break;
2422
2423         case ECORE_MCAST_CMD_DEL:
2424                 /* If there were no more bins to clear
2425                  * (ecore_mcast_clear_first_bin() returns -1) then we would
2426                  * clear any (0xff) bin.
2427                  * See ecore_mcast_validate_e2() for explanation when it may
2428                  * happen.
2429                  */
2430                 bin = ecore_mcast_clear_first_bin(o);
2431                 break;
2432
2433         case ECORE_MCAST_CMD_RESTORE:
2434                 bin = cfg_data->bin;
2435                 break;
2436
2437         default:
2438                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2439                 return;
2440         }
2441
2442         ECORE_MSG(sc, "%s bin %d",
2443                   ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2444                    "Setting" : "Clearing"), bin);
2445
2446         data->rules[idx].bin_id = (uint8_t) bin;
2447         data->rules[idx].func_id = func_id;
2448         data->rules[idx].engine_id = o->engine_id;
2449 }
2450
2451 /**
2452  * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2453  *
2454  * @sc:         device handle
2455  * @o:
2456  * @start_bin:  index in the registry to start from (including)
2457  * @rdata_idx:  index in the ramrod data to start from
2458  *
2459  * returns last handled bin index or -1 if all bins have been handled
2460  */
2461 static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
2462                                              struct ecore_mcast_obj *o,
2463                                              int start_bin, int *rdata_idx)
2464 {
2465         int cur_bin, cnt = *rdata_idx;
2466         union ecore_mcast_config_data cfg_data = { NULL };
2467
2468         /* go through the registry and configure the bins from it */
2469         for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2470              cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2471
2472                 cfg_data.bin = (uint8_t) cur_bin;
2473                 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE);
2474
2475                 cnt++;
2476
2477                 ECORE_MSG(sc, "About to configure a bin %d", cur_bin);
2478
2479                 /* Break if we reached the maximum number
2480                  * of rules.
2481                  */
2482                 if (cnt >= o->max_cmd_len)
2483                         break;
2484         }
2485
2486         *rdata_idx = cnt;
2487
2488         return cur_bin;
2489 }
2490
2491 static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
2492                                            struct ecore_mcast_obj *o,
2493                                            struct ecore_pending_mcast_cmd
2494                                            *cmd_pos, int *line_idx)
2495 {
2496         struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2497         int cnt = *line_idx;
2498         union ecore_mcast_config_data cfg_data = { NULL };
2499
2500         ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2501                                        &cmd_pos->data.macs_head, link,
2502                                        struct ecore_mcast_mac_elem) {
2503
2504                 cfg_data.mac = &pmac_pos->mac[0];
2505                 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2506
2507                 cnt++;
2508
2509                     ECORE_MSG
2510                     (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2511                      pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
2512                      pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2513
2514                 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2515                                         &cmd_pos->data.macs_head);
2516
2517                 /* Break if we reached the maximum number
2518                  * of rules.
2519                  */
2520                 if (cnt >= o->max_cmd_len)
2521                         break;
2522         }
2523
2524         *line_idx = cnt;
2525
2526         /* if no more MACs to configure - we are done */
2527         if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2528                 cmd_pos->done = TRUE;
2529 }
2530
2531 static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
2532                                            struct ecore_mcast_obj *o,
2533                                            struct ecore_pending_mcast_cmd
2534                                            *cmd_pos, int *line_idx)
2535 {
2536         int cnt = *line_idx;
2537
2538         while (cmd_pos->data.macs_num) {
2539                 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2540
2541                 cnt++;
2542
2543                 cmd_pos->data.macs_num--;
2544
2545                 ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d",
2546                           cmd_pos->data.macs_num, cnt);
2547
2548                 /* Break if we reached the maximum
2549                  * number of rules.
2550                  */
2551                 if (cnt >= o->max_cmd_len)
2552                         break;
2553         }
2554
2555         *line_idx = cnt;
2556
2557         /* If we cleared all bins - we are done */
2558         if (!cmd_pos->data.macs_num)
2559                 cmd_pos->done = TRUE;
2560 }
2561
2562 static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc,
2563                                                struct ecore_mcast_obj *o, struct
2564                                                ecore_pending_mcast_cmd
2565                                                *cmd_pos, int *line_idx)
2566 {
2567         cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2568                                                 line_idx);
2569
2570         if (cmd_pos->data.next_bin < 0)
2571                 /* If o->set_restore returned -1 we are done */
2572                 cmd_pos->done = TRUE;
2573         else
2574                 /* Start from the next bin next time */
2575                 cmd_pos->data.next_bin++;
2576 }
2577
2578 static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
2579                                               ecore_mcast_ramrod_params
2580                                               *p)
2581 {
2582         struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2583         int cnt = 0;
2584         struct ecore_mcast_obj *o = p->mcast_obj;
2585
2586         ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
2587                                        &o->pending_cmds_head, link,
2588                                        struct ecore_pending_mcast_cmd) {
2589                 switch (cmd_pos->type) {
2590                 case ECORE_MCAST_CMD_ADD:
2591                         ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
2592                         break;
2593
2594                 case ECORE_MCAST_CMD_DEL:
2595                         ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
2596                         break;
2597
2598                 case ECORE_MCAST_CMD_RESTORE:
2599                         ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
2600                                                            &cnt);
2601                         break;
2602
2603                 default:
2604                         PMD_DRV_LOG(ERR, sc,
2605                                     "Unknown command: %d", cmd_pos->type);
2606                         return ECORE_INVAL;
2607                 }
2608
2609                 /* If the command has been completed - remove it from the list
2610                  * and free the memory
2611                  */
2612                 if (cmd_pos->done) {
2613                         ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
2614                                                 &o->pending_cmds_head);
2615                         ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
2616                 }
2617
2618                 /* Break if we reached the maximum number of rules */
2619                 if (cnt >= o->max_cmd_len)
2620                         break;
2621         }
2622
2623         return cnt;
2624 }
2625
2626 static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
2627                                 struct ecore_mcast_obj *o,
2628                                 struct ecore_mcast_ramrod_params *p,
2629                                 int *line_idx)
2630 {
2631         struct ecore_mcast_list_elem *mlist_pos;
2632         union ecore_mcast_config_data cfg_data = { NULL };
2633         int cnt = *line_idx;
2634
2635         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2636                                   struct ecore_mcast_list_elem) {
2637                 cfg_data.mac = mlist_pos->mac;
2638                 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
2639
2640                 cnt++;
2641
2642                     ECORE_MSG
2643                     (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2644                      mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2645                      mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
2646         }
2647
2648         *line_idx = cnt;
2649 }
2650
2651 static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
2652                                 struct ecore_mcast_obj *o,
2653                                 struct ecore_mcast_ramrod_params *p,
2654                                 int *line_idx)
2655 {
2656         int cnt = *line_idx, i;
2657
2658         for (i = 0; i < p->mcast_list_len; i++) {
2659                 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
2660
2661                 cnt++;
2662
2663                 ECORE_MSG(sc,
2664                           "Deleting MAC. %d left", p->mcast_list_len - i - 1);
2665         }
2666
2667         *line_idx = cnt;
2668 }
2669
2670 /**
2671  * ecore_mcast_handle_current_cmd -
2672  *
2673  * @sc:         device handle
2674  * @p:
2675  * @cmd:
2676  * @start_cnt:  first line in the ramrod data that may be used
2677  *
2678  * This function is called if there is enough place for the current command in
2679  * the ramrod data.
2680  * Returns number of lines filled in the ramrod data in total.
2681  */
2682 static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
2683                                           ecore_mcast_ramrod_params *p,
2684                                           enum ecore_mcast_cmd cmd,
2685                                           int start_cnt)
2686 {
2687         struct ecore_mcast_obj *o = p->mcast_obj;
2688         int cnt = start_cnt;
2689
2690         ECORE_MSG(sc, "p->mcast_list_len=%d", p->mcast_list_len);
2691
2692         switch (cmd) {
2693         case ECORE_MCAST_CMD_ADD:
2694                 ecore_mcast_hdl_add(sc, o, p, &cnt);
2695                 break;
2696
2697         case ECORE_MCAST_CMD_DEL:
2698                 ecore_mcast_hdl_del(sc, o, p, &cnt);
2699                 break;
2700
2701         case ECORE_MCAST_CMD_RESTORE:
2702                 o->hdl_restore(sc, o, 0, &cnt);
2703                 break;
2704
2705         default:
2706                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2707                 return ECORE_INVAL;
2708         }
2709
2710         /* The current command has been handled */
2711         p->mcast_list_len = 0;
2712
2713         return cnt;
2714 }
2715
2716 static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
2717                                    struct ecore_mcast_ramrod_params *p,
2718                                    enum ecore_mcast_cmd cmd)
2719 {
2720         struct ecore_mcast_obj *o = p->mcast_obj;
2721         int reg_sz = o->get_registry_size(o);
2722
2723         switch (cmd) {
2724                 /* DEL command deletes all currently configured MACs */
2725         case ECORE_MCAST_CMD_DEL:
2726                 o->set_registry_size(o, 0);
2727                 /* fall-through */
2728
2729                 /* RESTORE command will restore the entire multicast configuration */
2730         case ECORE_MCAST_CMD_RESTORE:
2731                 /* Here we set the approximate amount of work to do, which in
2732                  * fact may be only less as some MACs in postponed ADD
2733                  * command(s) scheduled before this command may fall into
2734                  * the same bin and the actual number of bins set in the
2735                  * registry would be less than we estimated here. See
2736                  * ecore_mcast_set_one_rule_e2() for further details.
2737                  */
2738                 p->mcast_list_len = reg_sz;
2739                 break;
2740
2741         case ECORE_MCAST_CMD_ADD:
2742         case ECORE_MCAST_CMD_CONT:
2743                 /* Here we assume that all new MACs will fall into new bins.
2744                  * However we will correct the real registry size after we
2745                  * handle all pending commands.
2746                  */
2747                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2748                 break;
2749
2750         default:
2751                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2752                 return ECORE_INVAL;
2753         }
2754
2755         /* Increase the total number of MACs pending to be configured */
2756         o->total_pending_num += p->mcast_list_len;
2757
2758         return ECORE_SUCCESS;
2759 }
2760
2761 static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc,
2762                                   struct ecore_mcast_ramrod_params *p,
2763                                   int old_num_bins)
2764 {
2765         struct ecore_mcast_obj *o = p->mcast_obj;
2766
2767         o->set_registry_size(o, old_num_bins);
2768         o->total_pending_num -= p->mcast_list_len;
2769 }
2770
2771 /**
2772  * ecore_mcast_set_rdata_hdr_e2 - sets a header values
2773  *
2774  * @sc:         device handle
2775  * @p:
2776  * @len:        number of rules to handle
2777  */
2778 static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc
2779                                          *sc, struct ecore_mcast_ramrod_params
2780                                          *p, uint8_t len)
2781 {
2782         struct ecore_raw_obj *r = &p->mcast_obj->raw;
2783         struct eth_multicast_rules_ramrod_data *data =
2784             (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2785
2786         data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
2787                                               (ECORE_FILTER_MCAST_PENDING <<
2788                                                ECORE_SWCID_SHIFT));
2789         data->header.rule_cnt = len;
2790 }
2791
2792 /**
2793  * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2794  *
2795  * @sc:         device handle
2796  * @o:
2797  *
2798  * Recalculate the actual number of set bins in the registry using Brian
2799  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2800  */
2801 static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o)
2802 {
2803         int i, cnt = 0;
2804         uint64_t elem;
2805
2806         for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
2807                 elem = o->registry.aprox_match.vec[i];
2808                 for (; elem; cnt++)
2809                         elem &= elem - 1;
2810         }
2811
2812         o->set_registry_size(o, cnt);
2813
2814         return ECORE_SUCCESS;
2815 }
2816
2817 static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
2818                                 struct ecore_mcast_ramrod_params *p,
2819                                 enum ecore_mcast_cmd cmd)
2820 {
2821         struct ecore_raw_obj *raw = &p->mcast_obj->raw;
2822         struct ecore_mcast_obj *o = p->mcast_obj;
2823         struct eth_multicast_rules_ramrod_data *data =
2824             (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2825         int cnt = 0, rc;
2826
2827         /* Reset the ramrod data buffer */
2828         ECORE_MEMSET(data, 0, sizeof(*data));
2829
2830         cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
2831
2832         /* If there are no more pending commands - clear SCHEDULED state */
2833         if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
2834                 o->clear_sched(o);
2835
2836         /* The below may be TRUE if there was enough room in ramrod
2837          * data for all pending commands and for the current
2838          * command. Otherwise the current command would have been added
2839          * to the pending commands and p->mcast_list_len would have been
2840          * zeroed.
2841          */
2842         if (p->mcast_list_len > 0)
2843                 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
2844
2845         /* We've pulled out some MACs - update the total number of
2846          * outstanding.
2847          */
2848         o->total_pending_num -= cnt;
2849
2850         /* send a ramrod */
2851         ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
2852         ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
2853
2854         ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt);
2855
2856         /* Update a registry size if there are no more pending operations.
2857          *
2858          * We don't want to change the value of the registry size if there are
2859          * pending operations because we want it to always be equal to the
2860          * exact or the approximate number (see ecore_mcast_validate_e2()) of
2861          * set bins after the last requested operation in order to properly
2862          * evaluate the size of the next DEL/RESTORE operation.
2863          *
2864          * Note that we update the registry itself during command(s) handling
2865          * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
2866          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2867          * with a limited amount of update commands (per MAC/bin) and we don't
2868          * know in this scope what the actual state of bins configuration is
2869          * going to be after this ramrod.
2870          */
2871         if (!o->total_pending_num)
2872                 ecore_mcast_refresh_registry_e2(o);
2873
2874         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2875          * RAMROD_PENDING status immediately.
2876          */
2877         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2878                 raw->clear_pending(raw);
2879                 return ECORE_SUCCESS;
2880         } else {
2881                 /* No need for an explicit memory barrier here as long we would
2882                  * need to ensure the ordering of writing to the SPQ element
2883                  * and updating of the SPQ producer which involves a memory
2884                  * read and we will have to put a full memory barrier there
2885                  * (inside ecore_sp_post()).
2886                  */
2887
2888                 /* Send a ramrod */
2889                 rc = ecore_sp_post(sc,
2890                                    RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2891                                    raw->cid,
2892                                    raw->rdata_mapping, ETH_CONNECTION_TYPE);
2893                 if (rc)
2894                         return rc;
2895
2896                 /* Ramrod completion is pending */
2897                 return ECORE_PENDING;
2898         }
2899 }
2900
2901 static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc,
2902                                     struct ecore_mcast_ramrod_params *p,
2903                                     enum ecore_mcast_cmd cmd)
2904 {
2905         /* Mark, that there is a work to do */
2906         if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
2907                 p->mcast_list_len = 1;
2908
2909         return ECORE_SUCCESS;
2910 }
2911
2912 static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc,
2913                                    __rte_unused struct ecore_mcast_ramrod_params
2914                                    *p, __rte_unused int old_num_bins)
2915 {
2916         /* Do nothing */
2917 }
2918
2919 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
2920 do { \
2921         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2922 } while (0)
2923
2924 static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
2925                                     struct ecore_mcast_obj *o,
2926                                     struct ecore_mcast_ramrod_params *p,
2927                                     uint32_t * mc_filter)
2928 {
2929         struct ecore_mcast_list_elem *mlist_pos;
2930         int bit;
2931
2932         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2933                                   struct ecore_mcast_list_elem) {
2934                 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
2935                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2936
2937                     ECORE_MSG
2938                     (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
2939                      mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2940                      mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
2941                      bit);
2942
2943                 /* bookkeeping... */
2944                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit);
2945         }
2946 }
2947
2948 static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
2949                                         __rte_unused,
2950                                         struct ecore_mcast_obj *o,
2951                                         uint32_t * mc_filter)
2952 {
2953         int bit;
2954
2955         for (bit = ecore_mcast_get_next_bin(o, 0);
2956              bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
2957                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2958                 ECORE_MSG(sc, "About to set bin %d", bit);
2959         }
2960 }
2961
2962 /* On 57711 we write the multicast MACs' approximate match
2963  * table by directly into the TSTORM's internal RAM. So we don't
2964  * really need to handle any tricks to make it work.
2965  */
2966 static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
2967                                  struct ecore_mcast_ramrod_params *p,
2968                                  enum ecore_mcast_cmd cmd)
2969 {
2970         int i;
2971         struct ecore_mcast_obj *o = p->mcast_obj;
2972         struct ecore_raw_obj *r = &o->raw;
2973
2974         /* If CLEAR_ONLY has been requested - clear the registry
2975          * and clear a pending bit.
2976          */
2977         if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2978                 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 };
2979
2980                 /* Set the multicast filter bits before writing it into
2981                  * the internal memory.
2982                  */
2983                 switch (cmd) {
2984                 case ECORE_MCAST_CMD_ADD:
2985                         ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
2986                         break;
2987
2988                 case ECORE_MCAST_CMD_DEL:
2989                         ECORE_MSG(sc, "Invalidating multicast MACs configuration");
2990
2991                         /* clear the registry */
2992                         ECORE_MEMSET(o->registry.aprox_match.vec, 0,
2993                                      sizeof(o->registry.aprox_match.vec));
2994                         break;
2995
2996                 case ECORE_MCAST_CMD_RESTORE:
2997                         ecore_mcast_hdl_restore_e1h(sc, o, mc_filter);
2998                         break;
2999
3000                 default:
3001                         PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
3002                         return ECORE_INVAL;
3003                 }
3004
3005                 /* Set the mcast filter in the internal memory */
3006                 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3007                         REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3008         } else
3009                 /* clear the registry */
3010                 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3011                              sizeof(o->registry.aprox_match.vec));
3012
3013         /* We are done */
3014         r->clear_pending(r);
3015
3016         return ECORE_SUCCESS;
3017 }
3018
3019 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3020 {
3021         return o->registry.aprox_match.num_bins_set;
3022 }
3023
3024 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3025                                                 int n)
3026 {
3027         o->registry.aprox_match.num_bins_set = n;
3028 }
3029
3030 int ecore_config_mcast(struct bnx2x_softc *sc,
3031                        struct ecore_mcast_ramrod_params *p,
3032                        enum ecore_mcast_cmd cmd)
3033 {
3034         struct ecore_mcast_obj *o = p->mcast_obj;
3035         struct ecore_raw_obj *r = &o->raw;
3036         int rc = 0, old_reg_size;
3037
3038         /* This is needed to recover number of currently configured mcast macs
3039          * in case of failure.
3040          */
3041         old_reg_size = o->get_registry_size(o);
3042
3043         /* Do some calculations and checks */
3044         rc = o->validate(sc, p, cmd);
3045         if (rc)
3046                 return rc;
3047
3048         /* Return if there is no work to do */
3049         if ((!p->mcast_list_len) && (!o->check_sched(o)))
3050                 return ECORE_SUCCESS;
3051
3052             ECORE_MSG
3053             (sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
3054              o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3055
3056         /* Enqueue the current command to the pending list if we can't complete
3057          * it in the current iteration
3058          */
3059         if (r->check_pending(r) ||
3060             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3061                 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3062                 if (rc < 0)
3063                         goto error_exit1;
3064
3065                 /* As long as the current command is in a command list we
3066                  * don't need to handle it separately.
3067                  */
3068                 p->mcast_list_len = 0;
3069         }
3070
3071         if (!r->check_pending(r)) {
3072
3073                 /* Set 'pending' state */
3074                 r->set_pending(r);
3075
3076                 /* Configure the new classification in the chip */
3077                 rc = o->config_mcast(sc, p, cmd);
3078                 if (rc < 0)
3079                         goto error_exit2;
3080
3081                 /* Wait for a ramrod completion if was requested */
3082                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3083                         rc = o->wait_comp(sc, o);
3084         }
3085
3086         return rc;
3087
3088 error_exit2:
3089         r->clear_pending(r);
3090
3091 error_exit1:
3092         o->revert(sc, p, old_reg_size);
3093
3094         return rc;
3095 }
3096
3097 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3098 {
3099         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3100         ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3101         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3102 }
3103
3104 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3105 {
3106         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3107         ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3108         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3109 }
3110
3111 static int ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3112 {
3113         return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3114 }
3115
3116 static int ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3117 {
3118         return o->raw.check_pending(&o->raw) || o->check_sched(o);
3119 }
3120
3121 void ecore_init_mcast_obj(struct bnx2x_softc *sc,
3122                           struct ecore_mcast_obj *mcast_obj,
3123                           uint8_t mcast_cl_id, uint32_t mcast_cid,
3124                           uint8_t func_id, uint8_t engine_id, void *rdata,
3125                           ecore_dma_addr_t rdata_mapping, int state,
3126                           unsigned long *pstate, ecore_obj_type type)
3127 {
3128         ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3129
3130         ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3131                            rdata, rdata_mapping, state, pstate, type);
3132
3133         mcast_obj->engine_id = engine_id;
3134
3135         ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3136
3137         mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3138         mcast_obj->check_sched = ecore_mcast_check_sched;
3139         mcast_obj->set_sched = ecore_mcast_set_sched;
3140         mcast_obj->clear_sched = ecore_mcast_clear_sched;
3141
3142         if (CHIP_IS_E1H(sc)) {
3143                 mcast_obj->config_mcast = ecore_mcast_setup_e1h;
3144                 mcast_obj->enqueue_cmd = NULL;
3145                 mcast_obj->hdl_restore = NULL;
3146                 mcast_obj->check_pending = ecore_mcast_check_pending;
3147
3148                 /* 57711 doesn't send a ramrod, so it has unlimited credit
3149                  * for one command.
3150                  */
3151                 mcast_obj->max_cmd_len = -1;
3152                 mcast_obj->wait_comp = ecore_mcast_wait;
3153                 mcast_obj->set_one_rule = NULL;
3154                 mcast_obj->validate = ecore_mcast_validate_e1h;
3155                 mcast_obj->revert = ecore_mcast_revert_e1h;
3156                 mcast_obj->get_registry_size =
3157                     ecore_mcast_get_registry_size_aprox;
3158                 mcast_obj->set_registry_size =
3159                     ecore_mcast_set_registry_size_aprox;
3160         } else {
3161                 mcast_obj->config_mcast = ecore_mcast_setup_e2;
3162                 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
3163                 mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2;
3164                 mcast_obj->check_pending = ecore_mcast_check_pending;
3165                 mcast_obj->max_cmd_len = 16;
3166                 mcast_obj->wait_comp = ecore_mcast_wait;
3167                 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
3168                 mcast_obj->validate = ecore_mcast_validate_e2;
3169                 mcast_obj->revert = ecore_mcast_revert_e2;
3170                 mcast_obj->get_registry_size =
3171                     ecore_mcast_get_registry_size_aprox;
3172                 mcast_obj->set_registry_size =
3173                     ecore_mcast_set_registry_size_aprox;
3174         }
3175 }
3176
3177 /*************************** Credit handling **********************************/
3178
3179 /**
3180  * atomic_add_ifless - add if the result is less than a given value.
3181  *
3182  * @v:  pointer of type ecore_atomic_t
3183  * @a:  the amount to add to v...
3184  * @u:  ...if (v + a) is less than u.
3185  *
3186  * returns TRUE if (v + a) was less than u, and FALSE otherwise.
3187  *
3188  */
3189 static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u)
3190 {
3191         int c, old;
3192
3193         c = ECORE_ATOMIC_READ(v);
3194         for (;;) {
3195                 if (ECORE_UNLIKELY(c + a >= u))
3196                         return FALSE;
3197
3198                 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
3199                 if (ECORE_LIKELY(old == c))
3200                         break;
3201                 c = old;
3202         }
3203
3204         return TRUE;
3205 }
3206
3207 /**
3208  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3209  *
3210  * @v:  pointer of type ecore_atomic_t
3211  * @a:  the amount to dec from v...
3212  * @u:  ...if (v - a) is more or equal than u.
3213  *
3214  * returns TRUE if (v - a) was more or equal than u, and FALSE
3215  * otherwise.
3216  */
3217 static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u)
3218 {
3219         int c, old;
3220
3221         c = ECORE_ATOMIC_READ(v);
3222         for (;;) {
3223                 if (ECORE_UNLIKELY(c - a < u))
3224                         return FALSE;
3225
3226                 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
3227                 if (ECORE_LIKELY(old == c))
3228                         break;
3229                 c = old;
3230         }
3231
3232         return TRUE;
3233 }
3234
3235 static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
3236 {
3237         int rc;
3238
3239         ECORE_SMP_MB();
3240         rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3241         ECORE_SMP_MB();
3242
3243         return rc;
3244 }
3245
3246 static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
3247 {
3248         int rc;
3249
3250         ECORE_SMP_MB();
3251
3252         /* Don't let to refill if credit + cnt > pool_sz */
3253         rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3254
3255         ECORE_SMP_MB();
3256
3257         return rc;
3258 }
3259
3260 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
3261 {
3262         int cur_credit;
3263
3264         ECORE_SMP_MB();
3265         cur_credit = ECORE_ATOMIC_READ(&o->credit);
3266
3267         return cur_credit;
3268 }
3269
3270 static int ecore_credit_pool_always_TRUE(__rte_unused struct
3271                                          ecore_credit_pool_obj *o,
3272                                          __rte_unused int cnt)
3273 {
3274         return TRUE;
3275 }
3276
3277 static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o,
3278                                        int *offset)
3279 {
3280         int idx, vec, i;
3281
3282         *offset = -1;
3283
3284         /* Find "internal cam-offset" then add to base for this object... */
3285         for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
3286
3287                 /* Skip the current vector if there are no free entries in it */
3288                 if (!o->pool_mirror[vec])
3289                         continue;
3290
3291                 /* If we've got here we are going to find a free entry */
3292                 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3293                      i < BIT_VEC64_ELEM_SZ; idx++, i++)
3294
3295                         if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3296                                 /* Got one!! */
3297                                 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3298                                 *offset = o->base_pool_offset + idx;
3299                                 return TRUE;
3300                         }
3301         }
3302
3303         return FALSE;
3304 }
3305
3306 static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o,
3307                                        int offset)
3308 {
3309         if (offset < o->base_pool_offset)
3310                 return FALSE;
3311
3312         offset -= o->base_pool_offset;
3313
3314         if (offset >= o->pool_sz)
3315                 return FALSE;
3316
3317         /* Return the entry to the pool */
3318         BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3319
3320         return TRUE;
3321 }
3322
3323 static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct
3324                                                    ecore_credit_pool_obj *o,
3325                                                    __rte_unused int offset)
3326 {
3327         return TRUE;
3328 }
3329
3330 static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct
3331                                                    ecore_credit_pool_obj *o,
3332                                                    __rte_unused int *offset)
3333 {
3334         *offset = -1;
3335         return TRUE;
3336 }
3337
3338 /**
3339  * ecore_init_credit_pool - initialize credit pool internals.
3340  *
3341  * @p:
3342  * @base:       Base entry in the CAM to use.
3343  * @credit:     pool size.
3344  *
3345  * If base is negative no CAM entries handling will be performed.
3346  * If credit is negative pool operations will always succeed (unlimited pool).
3347  *
3348  */
3349 static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
3350                                    int base, int credit)
3351 {
3352         /* Zero the object first */
3353         ECORE_MEMSET(p, 0, sizeof(*p));
3354
3355         /* Set the table to all 1s */
3356         ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3357
3358         /* Init a pool as full */
3359         ECORE_ATOMIC_SET(&p->credit, credit);
3360
3361         /* The total poll size */
3362         p->pool_sz = credit;
3363
3364         p->base_pool_offset = base;
3365
3366         /* Commit the change */
3367         ECORE_SMP_MB();
3368
3369         p->check = ecore_credit_pool_check;
3370
3371         /* if pool credit is negative - disable the checks */
3372         if (credit >= 0) {
3373                 p->put = ecore_credit_pool_put;
3374                 p->get = ecore_credit_pool_get;
3375                 p->put_entry = ecore_credit_pool_put_entry;
3376                 p->get_entry = ecore_credit_pool_get_entry;
3377         } else {
3378                 p->put = ecore_credit_pool_always_TRUE;
3379                 p->get = ecore_credit_pool_always_TRUE;
3380                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3381                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3382         }
3383
3384         /* If base is negative - disable entries handling */
3385         if (base < 0) {
3386                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3387                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3388         }
3389 }
3390
3391 void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
3392                                 struct ecore_credit_pool_obj *p,
3393                                 uint8_t func_id, uint8_t func_num)
3394 {
3395
3396 #define ECORE_CAM_SIZE_EMUL 5
3397
3398         int cam_sz;
3399
3400         if (CHIP_IS_E1H(sc)) {
3401                 /* CAM credit is equally divided between all active functions
3402                  * on the PORT!.
3403                  */
3404                 if (func_num > 0) {
3405                         if (!CHIP_REV_IS_SLOW(sc))
3406                                 cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num));
3407                         else
3408                                 cam_sz = ECORE_CAM_SIZE_EMUL;
3409                         ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
3410                 } else {
3411                         /* this should never happen! Block MAC operations. */
3412                         ecore_init_credit_pool(p, 0, 0);
3413                 }
3414
3415         } else {
3416
3417                 /*
3418                  * CAM credit is equaly divided between all active functions
3419                  * on the PATH.
3420                  */
3421                 if (func_num > 0) {
3422                         if (!CHIP_REV_IS_SLOW(sc))
3423                                 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3424                         else
3425                                 cam_sz = ECORE_CAM_SIZE_EMUL;
3426
3427                         /* No need for CAM entries handling for 57712 and
3428                          * newer.
3429                          */
3430                         ecore_init_credit_pool(p, -1, cam_sz);
3431                 } else {
3432                         /* this should never happen! Block MAC operations. */
3433                         ecore_init_credit_pool(p, 0, 0);
3434                 }
3435         }
3436 }
3437
3438 void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
3439                                  struct ecore_credit_pool_obj *p,
3440                                  uint8_t func_id, uint8_t func_num)
3441 {
3442         if (CHIP_IS_E1x(sc)) {
3443                 /* There is no VLAN credit in HW on 57711 only
3444                  * MAC / MAC-VLAN can be set
3445                  */
3446                 ecore_init_credit_pool(p, 0, -1);
3447         } else {
3448                 /* CAM credit is equally divided between all active functions
3449                  * on the PATH.
3450                  */
3451                 if (func_num > 0) {
3452                         int credit = MAX_VLAN_CREDIT_E2 / func_num;
3453                         ecore_init_credit_pool(p, func_id * credit, credit);
3454                 } else
3455                         /* this should never happen! Block VLAN operations. */
3456                         ecore_init_credit_pool(p, 0, 0);
3457         }
3458 }
3459
3460 /****************** RSS Configuration ******************/
3461
3462 /**
3463  * ecore_setup_rss - configure RSS
3464  *
3465  * @sc:         device handle
3466  * @p:          rss configuration
3467  *
3468  * sends on UPDATE ramrod for that matter.
3469  */
3470 static int ecore_setup_rss(struct bnx2x_softc *sc,
3471                            struct ecore_config_rss_params *p)
3472 {
3473         struct ecore_rss_config_obj *o = p->rss_obj;
3474         struct ecore_raw_obj *r = &o->raw;
3475         struct eth_rss_update_ramrod_data *data =
3476             (struct eth_rss_update_ramrod_data *)(r->rdata);
3477         uint8_t rss_mode = 0;
3478         int rc;
3479
3480         ECORE_MEMSET(data, 0, sizeof(*data));
3481
3482         ECORE_MSG(sc, "Configuring RSS");
3483
3484         /* Set an echo field */
3485         data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3486                                        (r->state << ECORE_SWCID_SHIFT));
3487
3488         /* RSS mode */
3489         if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
3490                 rss_mode = ETH_RSS_MODE_DISABLED;
3491         else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
3492                 rss_mode = ETH_RSS_MODE_REGULAR;
3493
3494         data->rss_mode = rss_mode;
3495
3496         ECORE_MSG(sc, "rss_mode=%d", rss_mode);
3497
3498         /* RSS capabilities */
3499         if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
3500                 data->capabilities |=
3501                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
3502
3503         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
3504                 data->capabilities |=
3505                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
3506
3507         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
3508                 data->capabilities |=
3509                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
3510
3511         if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
3512                 data->capabilities |=
3513                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
3514
3515         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
3516                 data->capabilities |=
3517                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
3518
3519         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
3520                 data->capabilities |=
3521                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
3522
3523         if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
3524                 data->udp_4tuple_dst_port_mask =
3525                     ECORE_CPU_TO_LE16(p->tunnel_mask);
3526                 data->udp_4tuple_dst_port_value =
3527                     ECORE_CPU_TO_LE16(p->tunnel_value);
3528         }
3529
3530         /* Hashing mask */
3531         data->rss_result_mask = p->rss_result_mask;
3532
3533         /* RSS engine ID */
3534         data->rss_engine_id = o->engine_id;
3535
3536         ECORE_MSG(sc, "rss_engine_id=%d", data->rss_engine_id);
3537
3538         /* Indirection table */
3539         ECORE_MEMCPY(data->indirection_table, p->ind_table,
3540                      T_ETH_INDIRECTION_TABLE_SIZE);
3541
3542         /* Remember the last configuration */
3543         ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
3544
3545         /* RSS keys */
3546         if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
3547                 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
3548                              sizeof(data->rss_key));
3549                 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
3550         }
3551
3552         /* No need for an explicit memory barrier here as long we would
3553          * need to ensure the ordering of writing to the SPQ element
3554          * and updating of the SPQ producer which involves a memory
3555          * read and we will have to put a full memory barrier there
3556          * (inside ecore_sp_post()).
3557          */
3558
3559         /* Send a ramrod */
3560         rc = ecore_sp_post(sc,
3561                            RAMROD_CMD_ID_ETH_RSS_UPDATE,
3562                            r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE);
3563
3564         if (rc < 0)
3565                 return rc;
3566
3567         return ECORE_PENDING;
3568 }
3569
3570 int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p)
3571 {
3572         int rc;
3573         struct ecore_rss_config_obj *o = p->rss_obj;
3574         struct ecore_raw_obj *r = &o->raw;
3575
3576         /* Do nothing if only driver cleanup was requested */
3577         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
3578                 return ECORE_SUCCESS;
3579
3580         r->set_pending(r);
3581
3582         rc = o->config_rss(sc, p);
3583         if (rc < 0) {
3584                 r->clear_pending(r);
3585                 return rc;
3586         }
3587
3588         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3589                 rc = r->wait_comp(sc, r);
3590
3591         return rc;
3592 }
3593
3594 void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
3595                                uint8_t cl_id, uint32_t cid, uint8_t func_id,
3596                                uint8_t engine_id, void *rdata,
3597                                ecore_dma_addr_t rdata_mapping, int state,
3598                                unsigned long *pstate, ecore_obj_type type)
3599 {
3600         ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
3601                            rdata_mapping, state, pstate, type);
3602
3603         rss_obj->engine_id = engine_id;
3604         rss_obj->config_rss = ecore_setup_rss;
3605 }
3606
3607 /********************** Queue state object ***********************************/
3608
3609 /**
3610  * ecore_queue_state_change - perform Queue state change transition
3611  *
3612  * @sc:         device handle
3613  * @params:     parameters to perform the transition
3614  *
3615  * returns 0 in case of successfully completed transition, negative error
3616  * code in case of failure, positive (EBUSY) value if there is a completion
3617  * to that is still pending (possible only if RAMROD_COMP_WAIT is
3618  * not set in params->ramrod_flags for asynchronous commands).
3619  *
3620  */
3621 int ecore_queue_state_change(struct bnx2x_softc *sc,
3622                              struct ecore_queue_state_params *params)
3623 {
3624         struct ecore_queue_sp_obj *o = params->q_obj;
3625         int rc, pending_bit;
3626         unsigned long *pending = &o->pending;
3627
3628         /* Check that the requested transition is legal */
3629         rc = o->check_transition(sc, o, params);
3630         if (rc) {
3631                 PMD_DRV_LOG(ERR, sc, "check transition returned an error. rc %d",
3632                             rc);
3633                 return ECORE_INVAL;
3634         }
3635
3636         /* Set "pending" bit */
3637         ECORE_MSG(sc, "pending bit was=%lx", o->pending);
3638         pending_bit = o->set_pending(o, params);
3639         ECORE_MSG(sc, "pending bit now=%lx", o->pending);
3640
3641         /* Don't send a command if only driver cleanup was requested */
3642         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
3643                 o->complete_cmd(sc, o, pending_bit);
3644         else {
3645                 /* Send a ramrod */
3646                 rc = o->send_cmd(sc, params);
3647                 if (rc) {
3648                         o->next_state = ECORE_Q_STATE_MAX;
3649                         ECORE_CLEAR_BIT(pending_bit, pending);
3650                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3651                         return rc;
3652                 }
3653
3654                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
3655                         rc = o->wait_comp(sc, o, pending_bit);
3656                         if (rc)
3657                                 return rc;
3658
3659                         return ECORE_SUCCESS;
3660                 }
3661         }
3662
3663         return ECORE_RET_PENDING(pending_bit, pending);
3664 }
3665
3666 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
3667                                    struct ecore_queue_state_params *params)
3668 {
3669         enum ecore_queue_cmd cmd = params->cmd, bit;
3670
3671         /* ACTIVATE and DEACTIVATE commands are implemented on top of
3672          * UPDATE command.
3673          */
3674         if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE))
3675                 bit = ECORE_Q_CMD_UPDATE;
3676         else
3677                 bit = cmd;
3678
3679         ECORE_SET_BIT(bit, &obj->pending);
3680         return bit;
3681 }
3682
3683 static int ecore_queue_wait_comp(struct bnx2x_softc *sc,
3684                                  struct ecore_queue_sp_obj *o,
3685                                  enum ecore_queue_cmd cmd)
3686 {
3687         return ecore_state_wait(sc, cmd, &o->pending);
3688 }
3689
3690 /**
3691  * ecore_queue_comp_cmd - complete the state change command.
3692  *
3693  * @sc:         device handle
3694  * @o:
3695  * @cmd:
3696  *
3697  * Checks that the arrived completion is expected.
3698  */
3699 static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
3700                                 struct ecore_queue_sp_obj *o,
3701                                 enum ecore_queue_cmd cmd)
3702 {
3703         unsigned long cur_pending = o->pending;
3704
3705         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
3706                 PMD_DRV_LOG(ERR, sc,
3707                             "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
3708                             cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
3709                             cur_pending, o->next_state);
3710                 return ECORE_INVAL;
3711         }
3712
3713         if (o->next_tx_only >= o->max_cos)
3714                 /* >= because tx only must always be smaller than cos since the
3715                  * primary connection supports COS 0
3716                  */
3717                 PMD_DRV_LOG(ERR, sc,
3718                             "illegal value for next tx_only: %d. max cos was %d",
3719                             o->next_tx_only, o->max_cos);
3720
3721         ECORE_MSG(sc, "Completing command %d for queue %d, setting state to %d",
3722                   cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
3723
3724         if (o->next_tx_only)    /* print num tx-only if any exist */
3725                 ECORE_MSG(sc, "primary cid %d: num tx-only cons %d",
3726                           o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
3727
3728         o->state = o->next_state;
3729         o->num_tx_only = o->next_tx_only;
3730         o->next_state = ECORE_Q_STATE_MAX;
3731
3732         /* It's important that o->state and o->next_state are
3733          * updated before o->pending.
3734          */
3735         wmb();
3736
3737         ECORE_CLEAR_BIT(cmd, &o->pending);
3738         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3739
3740         return ECORE_SUCCESS;
3741 }
3742
3743 static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params
3744                                        *cmd_params,
3745                                        struct client_init_ramrod_data *data)
3746 {
3747         struct ecore_queue_setup_params *params = &cmd_params->params.setup;
3748
3749         /* Rx data */
3750
3751         /* IPv6 TPA supported for E2 and above only */
3752         data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
3753                                           &params->flags) *
3754             CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
3755 }
3756
3757 static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
3758                                            struct ecore_queue_sp_obj *o,
3759                                            struct ecore_general_setup_params
3760                                            *params, struct client_init_general_data
3761                                            *gen_data, unsigned long *flags)
3762 {
3763         gen_data->client_id = o->cl_id;
3764
3765         if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
3766                 gen_data->statistics_counter_id = params->stat_id;
3767                 gen_data->statistics_en_flg = 1;
3768                 gen_data->statistics_zero_flg =
3769                     ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
3770         } else
3771                 gen_data->statistics_counter_id =
3772                     DISABLE_STATISTIC_COUNTER_ID_VALUE;
3773
3774         gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags);
3775         gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags);
3776         gen_data->sp_client_id = params->spcl_id;
3777         gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
3778         gen_data->func_id = o->func_id;
3779
3780         gen_data->cos = params->cos;
3781
3782         gen_data->traffic_type =
3783             ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
3784             LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
3785
3786         ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d",
3787                   gen_data->activate_flg, gen_data->cos,
3788                   gen_data->statistics_en_flg);
3789 }
3790
3791 static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params,
3792                                       struct client_init_tx_data *tx_data,
3793                                       unsigned long *flags)
3794 {
3795         tx_data->enforce_security_flg =
3796             ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
3797         tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan);
3798         tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
3799         tx_data->tx_switching_flg =
3800             ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
3801         tx_data->anti_spoofing_flg =
3802             ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
3803         tx_data->force_default_pri_flg =
3804             ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
3805         tx_data->refuse_outband_vlan_flg =
3806             ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
3807         tx_data->tunnel_non_lso_pcsum_location =
3808             ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
3809             CSUM_ON_BD;
3810
3811         tx_data->tx_status_block_id = params->fw_sb_id;
3812         tx_data->tx_sb_index_number = params->sb_cq_index;
3813         tx_data->tss_leading_client_id = params->tss_leading_cl_id;
3814
3815         tx_data->tx_bd_page_base.lo =
3816             ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3817         tx_data->tx_bd_page_base.hi =
3818             ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3819
3820         /* Don't configure any Tx switching mode during queue SETUP */
3821         tx_data->state = 0;
3822 }
3823
3824 static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params,
3825                                          struct client_init_rx_data *rx_data)
3826 {
3827         /* flow control data */
3828         rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
3829         rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
3830         rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
3831         rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
3832         rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
3833         rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
3834         rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
3835 }
3836
3837 static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params,
3838                                       struct client_init_rx_data *rx_data,
3839                                       unsigned long *flags)
3840 {
3841         rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
3842             CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
3843         rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
3844             CLIENT_INIT_RX_DATA_TPA_MODE;
3845         rx_data->vmqueue_mode_en_flg = 0;
3846
3847         rx_data->extra_data_over_sgl_en_flg =
3848             ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
3849         rx_data->cache_line_alignment_log_size = params->cache_line_log;
3850         rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
3851         rx_data->client_qzone_id = params->cl_qzone_id;
3852         rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
3853
3854         /* Always start in DROP_ALL mode */
3855         rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
3856                                            CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
3857
3858         /* We don't set drop flags */
3859         rx_data->drop_ip_cs_err_flg = 0;
3860         rx_data->drop_tcp_cs_err_flg = 0;
3861         rx_data->drop_ttl0_flg = 0;
3862         rx_data->drop_udp_cs_err_flg = 0;
3863         rx_data->inner_vlan_removal_enable_flg =
3864             ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
3865         rx_data->outer_vlan_removal_enable_flg =
3866             ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
3867         rx_data->status_block_id = params->fw_sb_id;
3868         rx_data->rx_sb_index_number = params->sb_cq_index;
3869         rx_data->max_tpa_queues = params->max_tpa_queues;
3870         rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
3871         rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3872         rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3873         rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
3874         rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
3875         rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
3876                                                  flags);
3877
3878         if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
3879                 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
3880                 rx_data->is_approx_mcast = 1;
3881         }
3882
3883         rx_data->rss_engine_id = params->rss_engine_id;
3884
3885         /* silent vlan removal */
3886         rx_data->silent_vlan_removal_flg =
3887             ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
3888         rx_data->silent_vlan_value =
3889             ECORE_CPU_TO_LE16(params->silent_removal_value);
3890         rx_data->silent_vlan_mask =
3891             ECORE_CPU_TO_LE16(params->silent_removal_mask);
3892 }
3893
3894 /* initialize the general, tx and rx parts of a queue object */
3895 static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
3896                                         *cmd_params,
3897                                         struct client_init_ramrod_data *data)
3898 {
3899         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3900                                        &cmd_params->params.setup.gen_params,
3901                                        &data->general,
3902                                        &cmd_params->params.setup.flags);
3903
3904         ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params,
3905                                   &data->tx, &cmd_params->params.setup.flags);
3906
3907         ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params,
3908                                   &data->rx, &cmd_params->params.setup.flags);
3909
3910         ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params,
3911                                      &data->rx);
3912 }
3913
3914 /* initialize the general and tx parts of a tx-only queue object */
3915 static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
3916                                        *cmd_params,
3917                                        struct tx_queue_init_ramrod_data *data)
3918 {
3919         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3920                                        &cmd_params->params.tx_only.gen_params,
3921                                        &data->general,
3922                                        &cmd_params->params.tx_only.flags);
3923
3924         ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
3925                                   &data->tx, &cmd_params->params.tx_only.flags);
3926
3927         ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
3928                   cmd_params->q_obj->cids[0],
3929                   data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
3930 }
3931
3932 /**
3933  * ecore_q_init - init HW/FW queue
3934  *
3935  * @sc:         device handle
3936  * @params:
3937  *
3938  * HW/FW initial Queue configuration:
3939  *      - HC: Rx and Tx
3940  *      - CDU context validation
3941  *
3942  */
3943 static int ecore_q_init(struct bnx2x_softc *sc,
3944                         struct ecore_queue_state_params *params)
3945 {
3946         struct ecore_queue_sp_obj *o = params->q_obj;
3947         struct ecore_queue_init_params *init = &params->params.init;
3948         uint16_t hc_usec;
3949         uint8_t cos;
3950
3951         /* Tx HC configuration */
3952         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
3953             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
3954                 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
3955
3956                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
3957                                                init->tx.sb_cq_index,
3958                                                !ECORE_TEST_BIT
3959                                                (ECORE_Q_FLG_HC_EN,
3960                                                 &init->tx.flags), hc_usec);
3961         }
3962
3963         /* Rx HC configuration */
3964         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
3965             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
3966                 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
3967
3968                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
3969                                                init->rx.sb_cq_index,
3970                                                !ECORE_TEST_BIT
3971                                                (ECORE_Q_FLG_HC_EN,
3972                                                 &init->rx.flags), hc_usec);
3973         }
3974
3975         /* Set CDU context validation values */
3976         for (cos = 0; cos < o->max_cos; cos++) {
3977                 ECORE_MSG(sc, "setting context validation. cid %d, cos %d",
3978                           o->cids[cos], cos);
3979                 ECORE_MSG(sc, "context pointer %p", init->cxts[cos]);
3980                 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
3981         }
3982
3983         /* As no ramrod is sent, complete the command immediately  */
3984         o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
3985
3986         ECORE_MMIOWB();
3987         ECORE_SMP_MB();
3988
3989         return ECORE_SUCCESS;
3990 }
3991
3992 static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params
3993                                   *params)
3994 {
3995         struct ecore_queue_sp_obj *o = params->q_obj;
3996         struct client_init_ramrod_data *rdata =
3997             (struct client_init_ramrod_data *)o->rdata;
3998         ecore_dma_addr_t data_mapping = o->rdata_mapping;
3999         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4000
4001         /* Clear the ramrod data */
4002         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4003
4004         /* Fill the ramrod data */
4005         ecore_q_fill_setup_data_cmn(sc, params, rdata);
4006
4007         /* No need for an explicit memory barrier here as long we would
4008          * need to ensure the ordering of writing to the SPQ element
4009          * and updating of the SPQ producer which involves a memory
4010          * read and we will have to put a full memory barrier there
4011          * (inside ecore_sp_post()).
4012          */
4013
4014         return ecore_sp_post(sc,
4015                              ramrod,
4016                              o->cids[ECORE_PRIMARY_CID_INDEX],
4017                              data_mapping, ETH_CONNECTION_TYPE);
4018 }
4019
4020 static int ecore_q_send_setup_e2(struct bnx2x_softc *sc,
4021                                  struct ecore_queue_state_params *params)
4022 {
4023         struct ecore_queue_sp_obj *o = params->q_obj;
4024         struct client_init_ramrod_data *rdata =
4025             (struct client_init_ramrod_data *)o->rdata;
4026         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4027         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4028
4029         /* Clear the ramrod data */
4030         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4031
4032         /* Fill the ramrod data */
4033         ecore_q_fill_setup_data_cmn(sc, params, rdata);
4034         ecore_q_fill_setup_data_e2(params, rdata);
4035
4036         /* No need for an explicit memory barrier here as long we would
4037          * need to ensure the ordering of writing to the SPQ element
4038          * and updating of the SPQ producer which involves a memory
4039          * read and we will have to put a full memory barrier there
4040          * (inside ecore_sp_post()).
4041          */
4042
4043         return ecore_sp_post(sc,
4044                              ramrod,
4045                              o->cids[ECORE_PRIMARY_CID_INDEX],
4046                              data_mapping, ETH_CONNECTION_TYPE);
4047 }
4048
4049 static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
4050                                       *params)
4051 {
4052         struct ecore_queue_sp_obj *o = params->q_obj;
4053         struct tx_queue_init_ramrod_data *rdata =
4054             (struct tx_queue_init_ramrod_data *)o->rdata;
4055         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4056         int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4057         struct ecore_queue_setup_tx_only_params *tx_only_params =
4058             &params->params.tx_only;
4059         uint8_t cid_index = tx_only_params->cid_index;
4060
4061         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4062                 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4063         ECORE_MSG(sc, "sending forward tx-only ramrod");
4064
4065         if (cid_index >= o->max_cos) {
4066                 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4067                             o->cl_id, cid_index);
4068                 return ECORE_INVAL;
4069         }
4070
4071         ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d",
4072                   tx_only_params->gen_params.cos,
4073                   tx_only_params->gen_params.spcl_id);
4074
4075         /* Clear the ramrod data */
4076         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4077
4078         /* Fill the ramrod data */
4079         ecore_q_fill_setup_tx_only(sc, params, rdata);
4080
4081             ECORE_MSG
4082             (sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
4083              o->cids[cid_index], rdata->general.client_id,
4084              rdata->general.sp_client_id, rdata->general.cos);
4085
4086         /* No need for an explicit memory barrier here as long we would
4087          * need to ensure the ordering of writing to the SPQ element
4088          * and updating of the SPQ producer which involves a memory
4089          * read and we will have to put a full memory barrier there
4090          * (inside ecore_sp_post()).
4091          */
4092
4093         return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4094                              data_mapping, ETH_CONNECTION_TYPE);
4095 }
4096
4097 static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj,
4098                                      struct ecore_queue_update_params *params,
4099                                      struct client_update_ramrod_data *data)
4100 {
4101         /* Client ID of the client to update */
4102         data->client_id = obj->cl_id;
4103
4104         /* Function ID of the client to update */
4105         data->func_id = obj->func_id;
4106
4107         /* Default VLAN value */
4108         data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
4109
4110         /* Inner VLAN stripping */
4111         data->inner_vlan_removal_enable_flg =
4112             ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4113         data->inner_vlan_removal_change_flg =
4114             ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
4115                            &params->update_flags);
4116
4117         /* Outer VLAN stripping */
4118         data->outer_vlan_removal_enable_flg =
4119             ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4120         data->outer_vlan_removal_change_flg =
4121             ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
4122                            &params->update_flags);
4123
4124         /* Drop packets that have source MAC that doesn't belong to this
4125          * Queue.
4126          */
4127         data->anti_spoofing_enable_flg =
4128             ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4129         data->anti_spoofing_change_flg =
4130             ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
4131                            &params->update_flags);
4132
4133         /* Activate/Deactivate */
4134         data->activate_flg =
4135             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
4136         data->activate_change_flg =
4137             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4138
4139         /* Enable default VLAN */
4140         data->default_vlan_enable_flg =
4141             ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4142         data->default_vlan_change_flg =
4143             ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
4144                            &params->update_flags);
4145
4146         /* silent vlan removal */
4147         data->silent_vlan_change_flg =
4148             ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4149                            &params->update_flags);
4150         data->silent_vlan_removal_flg =
4151             ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
4152                            &params->update_flags);
4153         data->silent_vlan_value =
4154             ECORE_CPU_TO_LE16(params->silent_removal_value);
4155         data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
4156
4157         /* tx switching */
4158         data->tx_switching_flg =
4159             ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, &params->update_flags);
4160         data->tx_switching_change_flg =
4161             ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
4162                            &params->update_flags);
4163 }
4164
4165 static int ecore_q_send_update(struct bnx2x_softc *sc,
4166                                struct ecore_queue_state_params *params)
4167 {
4168         struct ecore_queue_sp_obj *o = params->q_obj;
4169         struct client_update_ramrod_data *rdata =
4170             (struct client_update_ramrod_data *)o->rdata;
4171         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4172         struct ecore_queue_update_params *update_params =
4173             &params->params.update;
4174         uint8_t cid_index = update_params->cid_index;
4175
4176         if (cid_index >= o->max_cos) {
4177                 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4178                             o->cl_id, cid_index);
4179                 return ECORE_INVAL;
4180         }
4181
4182         /* Clear the ramrod data */
4183         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4184
4185         /* Fill the ramrod data */
4186         ecore_q_fill_update_data(o, update_params, rdata);
4187
4188         /* No need for an explicit memory barrier here as long we would
4189          * need to ensure the ordering of writing to the SPQ element
4190          * and updating of the SPQ producer which involves a memory
4191          * read and we will have to put a full memory barrier there
4192          * (inside ecore_sp_post()).
4193          */
4194
4195         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4196                              o->cids[cid_index], data_mapping,
4197                              ETH_CONNECTION_TYPE);
4198 }
4199
4200 /**
4201  * ecore_q_send_deactivate - send DEACTIVATE command
4202  *
4203  * @sc:         device handle
4204  * @params:
4205  *
4206  * implemented using the UPDATE command.
4207  */
4208 static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4209                                    *params)
4210 {
4211         struct ecore_queue_update_params *update = &params->params.update;
4212
4213         ECORE_MEMSET(update, 0, sizeof(*update));
4214
4215         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4216
4217         return ecore_q_send_update(sc, params);
4218 }
4219
4220 /**
4221  * ecore_q_send_activate - send ACTIVATE command
4222  *
4223  * @sc:         device handle
4224  * @params:
4225  *
4226  * implemented using the UPDATE command.
4227  */
4228 static int ecore_q_send_activate(struct bnx2x_softc *sc,
4229                                  struct ecore_queue_state_params *params)
4230 {
4231         struct ecore_queue_update_params *update = &params->params.update;
4232
4233         ECORE_MEMSET(update, 0, sizeof(*update));
4234
4235         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
4236         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4237
4238         return ecore_q_send_update(sc, params);
4239 }
4240
4241 static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc,
4242                                    __rte_unused struct
4243                                    ecore_queue_state_params *params)
4244 {
4245         /* Not implemented yet. */
4246         return -1;
4247 }
4248
4249 static int ecore_q_send_halt(struct bnx2x_softc *sc,
4250                              struct ecore_queue_state_params *params)
4251 {
4252         struct ecore_queue_sp_obj *o = params->q_obj;
4253
4254         /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
4255         ecore_dma_addr_t data_mapping = 0;
4256         data_mapping = (ecore_dma_addr_t) o->cl_id;
4257
4258         return ecore_sp_post(sc,
4259                              RAMROD_CMD_ID_ETH_HALT,
4260                              o->cids[ECORE_PRIMARY_CID_INDEX],
4261                              data_mapping, ETH_CONNECTION_TYPE);
4262 }
4263
4264 static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
4265                                 struct ecore_queue_state_params *params)
4266 {
4267         struct ecore_queue_sp_obj *o = params->q_obj;
4268         uint8_t cid_idx = params->params.cfc_del.cid_index;
4269
4270         if (cid_idx >= o->max_cos) {
4271                 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4272                             o->cl_id, cid_idx);
4273                 return ECORE_INVAL;
4274         }
4275
4276         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
4277                              o->cids[cid_idx], 0, NONE_CONNECTION_TYPE);
4278 }
4279
4280 static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4281                                   *params)
4282 {
4283         struct ecore_queue_sp_obj *o = params->q_obj;
4284         uint8_t cid_index = params->params.terminate.cid_index;
4285
4286         if (cid_index >= o->max_cos) {
4287                 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4288                             o->cl_id, cid_index);
4289                 return ECORE_INVAL;
4290         }
4291
4292         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
4293                              o->cids[cid_index], 0, ETH_CONNECTION_TYPE);
4294 }
4295
4296 static int ecore_q_send_empty(struct bnx2x_softc *sc,
4297                               struct ecore_queue_state_params *params)
4298 {
4299         struct ecore_queue_sp_obj *o = params->q_obj;
4300
4301         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
4302                              o->cids[ECORE_PRIMARY_CID_INDEX], 0,
4303                              ETH_CONNECTION_TYPE);
4304 }
4305
4306 static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
4307                                     *params)
4308 {
4309         switch (params->cmd) {
4310         case ECORE_Q_CMD_INIT:
4311                 return ecore_q_init(sc, params);
4312         case ECORE_Q_CMD_SETUP_TX_ONLY:
4313                 return ecore_q_send_setup_tx_only(sc, params);
4314         case ECORE_Q_CMD_DEACTIVATE:
4315                 return ecore_q_send_deactivate(sc, params);
4316         case ECORE_Q_CMD_ACTIVATE:
4317                 return ecore_q_send_activate(sc, params);
4318         case ECORE_Q_CMD_UPDATE:
4319                 return ecore_q_send_update(sc, params);
4320         case ECORE_Q_CMD_UPDATE_TPA:
4321                 return ecore_q_send_update_tpa(sc, params);
4322         case ECORE_Q_CMD_HALT:
4323                 return ecore_q_send_halt(sc, params);
4324         case ECORE_Q_CMD_CFC_DEL:
4325                 return ecore_q_send_cfc_del(sc, params);
4326         case ECORE_Q_CMD_TERMINATE:
4327                 return ecore_q_send_terminate(sc, params);
4328         case ECORE_Q_CMD_EMPTY:
4329                 return ecore_q_send_empty(sc, params);
4330         default:
4331                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4332                 return ECORE_INVAL;
4333         }
4334 }
4335
4336 static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
4337                                     struct ecore_queue_state_params *params)
4338 {
4339         switch (params->cmd) {
4340         case ECORE_Q_CMD_SETUP:
4341                 return ecore_q_send_setup_e1x(sc, params);
4342         case ECORE_Q_CMD_INIT:
4343         case ECORE_Q_CMD_SETUP_TX_ONLY:
4344         case ECORE_Q_CMD_DEACTIVATE:
4345         case ECORE_Q_CMD_ACTIVATE:
4346         case ECORE_Q_CMD_UPDATE:
4347         case ECORE_Q_CMD_UPDATE_TPA:
4348         case ECORE_Q_CMD_HALT:
4349         case ECORE_Q_CMD_CFC_DEL:
4350         case ECORE_Q_CMD_TERMINATE:
4351         case ECORE_Q_CMD_EMPTY:
4352                 return ecore_queue_send_cmd_cmn(sc, params);
4353         default:
4354                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4355                 return ECORE_INVAL;
4356         }
4357 }
4358
4359 static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
4360                                    struct ecore_queue_state_params *params)
4361 {
4362         switch (params->cmd) {
4363         case ECORE_Q_CMD_SETUP:
4364                 return ecore_q_send_setup_e2(sc, params);
4365         case ECORE_Q_CMD_INIT:
4366         case ECORE_Q_CMD_SETUP_TX_ONLY:
4367         case ECORE_Q_CMD_DEACTIVATE:
4368         case ECORE_Q_CMD_ACTIVATE:
4369         case ECORE_Q_CMD_UPDATE:
4370         case ECORE_Q_CMD_UPDATE_TPA:
4371         case ECORE_Q_CMD_HALT:
4372         case ECORE_Q_CMD_CFC_DEL:
4373         case ECORE_Q_CMD_TERMINATE:
4374         case ECORE_Q_CMD_EMPTY:
4375                 return ecore_queue_send_cmd_cmn(sc, params);
4376         default:
4377                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4378                 return ECORE_INVAL;
4379         }
4380 }
4381
4382 /**
4383  * ecore_queue_chk_transition - check state machine of a regular Queue
4384  *
4385  * @sc:         device handle
4386  * @o:
4387  * @params:
4388  *
4389  * (not Forwarding)
4390  * It both checks if the requested command is legal in a current
4391  * state and, if it's legal, sets a `next_state' in the object
4392  * that will be used in the completion flow to set the `state'
4393  * of the object.
4394  *
4395  * returns 0 if a requested command is a legal transition,
4396  *         ECORE_INVAL otherwise.
4397  */
4398 static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
4399                                       struct ecore_queue_sp_obj *o,
4400                                       struct ecore_queue_state_params *params)
4401 {
4402         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4403         enum ecore_queue_cmd cmd = params->cmd;
4404         struct ecore_queue_update_params *update_params =
4405             &params->params.update;
4406         uint8_t next_tx_only = o->num_tx_only;
4407
4408         /* Forget all pending for completion commands if a driver only state
4409          * transition has been requested.
4410          */
4411         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4412                 o->pending = 0;
4413                 o->next_state = ECORE_Q_STATE_MAX;
4414         }
4415
4416         /* Don't allow a next state transition if we are in the middle of
4417          * the previous one.
4418          */
4419         if (o->pending) {
4420                 PMD_DRV_LOG(ERR, sc, "Blocking transition since pending was %lx",
4421                             o->pending);
4422                 return ECORE_BUSY;
4423         }
4424
4425         switch (state) {
4426         case ECORE_Q_STATE_RESET:
4427                 if (cmd == ECORE_Q_CMD_INIT)
4428                         next_state = ECORE_Q_STATE_INITIALIZED;
4429
4430                 break;
4431         case ECORE_Q_STATE_INITIALIZED:
4432                 if (cmd == ECORE_Q_CMD_SETUP) {
4433                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4434                                            &params->params.setup.flags))
4435                                 next_state = ECORE_Q_STATE_ACTIVE;
4436                         else
4437                                 next_state = ECORE_Q_STATE_INACTIVE;
4438                 }
4439
4440                 break;
4441         case ECORE_Q_STATE_ACTIVE:
4442                 if (cmd == ECORE_Q_CMD_DEACTIVATE)
4443                         next_state = ECORE_Q_STATE_INACTIVE;
4444
4445                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4446                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4447                         next_state = ECORE_Q_STATE_ACTIVE;
4448
4449                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4450                         next_state = ECORE_Q_STATE_MULTI_COS;
4451                         next_tx_only = 1;
4452                 }
4453
4454                 else if (cmd == ECORE_Q_CMD_HALT)
4455                         next_state = ECORE_Q_STATE_STOPPED;
4456
4457                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4458                         /* If "active" state change is requested, update the
4459                          *  state accordingly.
4460                          */
4461                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4462                                            &update_params->update_flags) &&
4463                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4464                                             &update_params->update_flags))
4465                                 next_state = ECORE_Q_STATE_INACTIVE;
4466                         else
4467                                 next_state = ECORE_Q_STATE_ACTIVE;
4468                 }
4469
4470                 break;
4471         case ECORE_Q_STATE_MULTI_COS:
4472                 if (cmd == ECORE_Q_CMD_TERMINATE)
4473                         next_state = ECORE_Q_STATE_MCOS_TERMINATED;
4474
4475                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4476                         next_state = ECORE_Q_STATE_MULTI_COS;
4477                         next_tx_only = o->num_tx_only + 1;
4478                 }
4479
4480                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4481                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4482                         next_state = ECORE_Q_STATE_MULTI_COS;
4483
4484                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4485                         /* If "active" state change is requested, update the
4486                          *  state accordingly.
4487                          */
4488                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4489                                            &update_params->update_flags) &&
4490                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4491                                             &update_params->update_flags))
4492                                 next_state = ECORE_Q_STATE_INACTIVE;
4493                         else
4494                                 next_state = ECORE_Q_STATE_MULTI_COS;
4495                 }
4496
4497                 break;
4498         case ECORE_Q_STATE_MCOS_TERMINATED:
4499                 if (cmd == ECORE_Q_CMD_CFC_DEL) {
4500                         next_tx_only = o->num_tx_only - 1;
4501                         if (next_tx_only == 0)
4502                                 next_state = ECORE_Q_STATE_ACTIVE;
4503                         else
4504                                 next_state = ECORE_Q_STATE_MULTI_COS;
4505                 }
4506
4507                 break;
4508         case ECORE_Q_STATE_INACTIVE:
4509                 if (cmd == ECORE_Q_CMD_ACTIVATE)
4510                         next_state = ECORE_Q_STATE_ACTIVE;
4511
4512                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4513                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4514                         next_state = ECORE_Q_STATE_INACTIVE;
4515
4516                 else if (cmd == ECORE_Q_CMD_HALT)
4517                         next_state = ECORE_Q_STATE_STOPPED;
4518
4519                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4520                         /* If "active" state change is requested, update the
4521                          * state accordingly.
4522                          */
4523                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4524                                            &update_params->update_flags) &&
4525                             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4526                                            &update_params->update_flags)) {
4527                                 if (o->num_tx_only == 0)
4528                                         next_state = ECORE_Q_STATE_ACTIVE;
4529                                 else    /* tx only queues exist for this queue */
4530                                         next_state = ECORE_Q_STATE_MULTI_COS;
4531                         } else
4532                                 next_state = ECORE_Q_STATE_INACTIVE;
4533                 }
4534
4535                 break;
4536         case ECORE_Q_STATE_STOPPED:
4537                 if (cmd == ECORE_Q_CMD_TERMINATE)
4538                         next_state = ECORE_Q_STATE_TERMINATED;
4539
4540                 break;
4541         case ECORE_Q_STATE_TERMINATED:
4542                 if (cmd == ECORE_Q_CMD_CFC_DEL)
4543                         next_state = ECORE_Q_STATE_RESET;
4544
4545                 break;
4546         default:
4547                 PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4548         }
4549
4550         /* Transition is assured */
4551         if (next_state != ECORE_Q_STATE_MAX) {
4552                 ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4553                           state, cmd, next_state);
4554                 o->next_state = next_state;
4555                 o->next_tx_only = next_tx_only;
4556                 return ECORE_SUCCESS;
4557         }
4558
4559         ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4560
4561         return ECORE_INVAL;
4562 }
4563
4564 /**
4565  * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
4566  *
4567  * @sc:         device handle
4568  * @o:
4569  * @params:
4570  *
4571  * It both checks if the requested command is legal in a current
4572  * state and, if it's legal, sets a `next_state' in the object
4573  * that will be used in the completion flow to set the `state'
4574  * of the object.
4575  *
4576  * returns 0 if a requested command is a legal transition,
4577  *         ECORE_INVAL otherwise.
4578  */
4579 static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
4580                                           struct ecore_queue_sp_obj *o,
4581                                           struct ecore_queue_state_params
4582                                           *params)
4583 {
4584         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4585         enum ecore_queue_cmd cmd = params->cmd;
4586
4587         switch (state) {
4588         case ECORE_Q_STATE_RESET:
4589                 if (cmd == ECORE_Q_CMD_INIT)
4590                         next_state = ECORE_Q_STATE_INITIALIZED;
4591
4592                 break;
4593         case ECORE_Q_STATE_INITIALIZED:
4594                 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4595                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4596                                            &params->params.tx_only.flags))
4597                                 next_state = ECORE_Q_STATE_ACTIVE;
4598                         else
4599                                 next_state = ECORE_Q_STATE_INACTIVE;
4600                 }
4601
4602                 break;
4603         case ECORE_Q_STATE_ACTIVE:
4604         case ECORE_Q_STATE_INACTIVE:
4605                 if (cmd == ECORE_Q_CMD_CFC_DEL)
4606                         next_state = ECORE_Q_STATE_RESET;
4607
4608                 break;
4609         default:
4610                 PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4611         }
4612
4613         /* Transition is assured */
4614         if (next_state != ECORE_Q_STATE_MAX) {
4615                 ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4616                           state, cmd, next_state);
4617                 o->next_state = next_state;
4618                 return ECORE_SUCCESS;
4619         }
4620
4621         ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4622         return ECORE_INVAL;
4623 }
4624
4625 void ecore_init_queue_obj(struct bnx2x_softc *sc,
4626                           struct ecore_queue_sp_obj *obj,
4627                           uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt,
4628                           uint8_t func_id, void *rdata,
4629                           ecore_dma_addr_t rdata_mapping, unsigned long type)
4630 {
4631         ECORE_MEMSET(obj, 0, sizeof(*obj));
4632
4633         /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
4634         ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
4635
4636         rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
4637         obj->max_cos = cid_cnt;
4638         obj->cl_id = cl_id;
4639         obj->func_id = func_id;
4640         obj->rdata = rdata;
4641         obj->rdata_mapping = rdata_mapping;
4642         obj->type = type;
4643         obj->next_state = ECORE_Q_STATE_MAX;
4644
4645         if (CHIP_IS_E1x(sc))
4646                 obj->send_cmd = ecore_queue_send_cmd_e1x;
4647         else
4648                 obj->send_cmd = ecore_queue_send_cmd_e2;
4649
4650         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
4651                 obj->check_transition = ecore_queue_chk_fwd_transition;
4652         else
4653                 obj->check_transition = ecore_queue_chk_transition;
4654
4655         obj->complete_cmd = ecore_queue_comp_cmd;
4656         obj->wait_comp = ecore_queue_wait_comp;
4657         obj->set_pending = ecore_queue_set_pending;
4658 }
4659
4660 /********************** Function state object *********************************/
4661 enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc,
4662                                            struct ecore_func_sp_obj *o)
4663 {
4664         /* in the middle of transaction - return INVALID state */
4665         if (o->pending)
4666                 return ECORE_F_STATE_MAX;
4667
4668         /* unsure the order of reading of o->pending and o->state
4669          * o->pending should be read first
4670          */
4671         rmb();
4672
4673         return o->state;
4674 }
4675
4676 static int ecore_func_wait_comp(struct bnx2x_softc *sc,
4677                                 struct ecore_func_sp_obj *o,
4678                                 enum ecore_func_cmd cmd)
4679 {
4680         return ecore_state_wait(sc, cmd, &o->pending);
4681 }
4682
4683 /**
4684  * ecore_func_state_change_comp - complete the state machine transition
4685  *
4686  * @sc:         device handle
4687  * @o:
4688  * @cmd:
4689  *
4690  * Called on state change transition. Completes the state
4691  * machine transition only - no HW interaction.
4692  */
4693 static int
4694 ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
4695                              struct ecore_func_sp_obj *o,
4696                              enum ecore_func_cmd cmd)
4697 {
4698         unsigned long cur_pending = o->pending;
4699
4700         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4701                 PMD_DRV_LOG(ERR, sc,
4702                             "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
4703                             cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
4704                             o->next_state);
4705                 return ECORE_INVAL;
4706         }
4707
4708         ECORE_MSG(sc, "Completing command %d for func %d, setting state to %d",
4709                   cmd, ECORE_FUNC_ID(sc), o->next_state);
4710
4711         o->state = o->next_state;
4712         o->next_state = ECORE_F_STATE_MAX;
4713
4714         /* It's important that o->state and o->next_state are
4715          * updated before o->pending.
4716          */
4717         wmb();
4718
4719         ECORE_CLEAR_BIT(cmd, &o->pending);
4720         ECORE_SMP_MB_AFTER_CLEAR_BIT();
4721
4722         return ECORE_SUCCESS;
4723 }
4724
4725 /**
4726  * ecore_func_comp_cmd - complete the state change command
4727  *
4728  * @sc:         device handle
4729  * @o:
4730  * @cmd:
4731  *
4732  * Checks that the arrived completion is expected.
4733  */
4734 static int ecore_func_comp_cmd(struct bnx2x_softc *sc,
4735                                struct ecore_func_sp_obj *o,
4736                                enum ecore_func_cmd cmd)
4737 {
4738         /* Complete the state machine part first, check if it's a
4739          * legal completion.
4740          */
4741         int rc = ecore_func_state_change_comp(sc, o, cmd);
4742         return rc;
4743 }
4744
4745 /**
4746  * ecore_func_chk_transition - perform function state machine transition
4747  *
4748  * @sc:         device handle
4749  * @o:
4750  * @params:
4751  *
4752  * It both checks if the requested command is legal in a current
4753  * state and, if it's legal, sets a `next_state' in the object
4754  * that will be used in the completion flow to set the `state'
4755  * of the object.
4756  *
4757  * returns 0 if a requested command is a legal transition,
4758  *         ECORE_INVAL otherwise.
4759  */
4760 static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
4761                                      struct ecore_func_sp_obj *o,
4762                                      struct ecore_func_state_params *params)
4763 {
4764         enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
4765         enum ecore_func_cmd cmd = params->cmd;
4766
4767         /* Forget all pending for completion commands if a driver only state
4768          * transition has been requested.
4769          */
4770         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4771                 o->pending = 0;
4772                 o->next_state = ECORE_F_STATE_MAX;
4773         }
4774
4775         /* Don't allow a next state transition if we are in the middle of
4776          * the previous one.
4777          */
4778         if (o->pending)
4779                 return ECORE_BUSY;
4780
4781         switch (state) {
4782         case ECORE_F_STATE_RESET:
4783                 if (cmd == ECORE_F_CMD_HW_INIT)
4784                         next_state = ECORE_F_STATE_INITIALIZED;
4785
4786                 break;
4787         case ECORE_F_STATE_INITIALIZED:
4788                 if (cmd == ECORE_F_CMD_START)
4789                         next_state = ECORE_F_STATE_STARTED;
4790
4791                 else if (cmd == ECORE_F_CMD_HW_RESET)
4792                         next_state = ECORE_F_STATE_RESET;
4793
4794                 break;
4795         case ECORE_F_STATE_STARTED:
4796                 if (cmd == ECORE_F_CMD_STOP)
4797                         next_state = ECORE_F_STATE_INITIALIZED;
4798                 /* afex ramrods can be sent only in started mode, and only
4799                  * if not pending for function_stop ramrod completion
4800                  * for these events - next state remained STARTED.
4801                  */
4802                 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
4803                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4804                         next_state = ECORE_F_STATE_STARTED;
4805
4806                 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
4807                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4808                         next_state = ECORE_F_STATE_STARTED;
4809
4810                 /* Switch_update ramrod can be sent in either started or
4811                  * tx_stopped state, and it doesn't change the state.
4812                  */
4813                 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4814                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4815                         next_state = ECORE_F_STATE_STARTED;
4816
4817                 else if (cmd == ECORE_F_CMD_TX_STOP)
4818                         next_state = ECORE_F_STATE_TX_STOPPED;
4819
4820                 break;
4821         case ECORE_F_STATE_TX_STOPPED:
4822                 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4823                     (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4824                         next_state = ECORE_F_STATE_TX_STOPPED;
4825
4826                 else if (cmd == ECORE_F_CMD_TX_START)
4827                         next_state = ECORE_F_STATE_STARTED;
4828
4829                 break;
4830         default:
4831                 PMD_DRV_LOG(ERR, sc, "Unknown state: %d", state);
4832         }
4833
4834         /* Transition is assured */
4835         if (next_state != ECORE_F_STATE_MAX) {
4836                 ECORE_MSG(sc, "Good function state transition: %d(%d)->%d",
4837                           state, cmd, next_state);
4838                 o->next_state = next_state;
4839                 return ECORE_SUCCESS;
4840         }
4841
4842         ECORE_MSG(sc,
4843                   "Bad function state transition request: %d %d", state, cmd);
4844
4845         return ECORE_INVAL;
4846 }
4847
4848 /**
4849  * ecore_func_init_func - performs HW init at function stage
4850  *
4851  * @sc:         device handle
4852  * @drv:
4853  *
4854  * Init HW when the current phase is
4855  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4856  * HW blocks.
4857  */
4858 static int ecore_func_init_func(struct bnx2x_softc *sc,
4859                                 const struct ecore_func_sp_drv_ops *drv)
4860 {
4861         return drv->init_hw_func(sc);
4862 }
4863
4864 /**
4865  * ecore_func_init_port - performs HW init at port stage
4866  *
4867  * @sc:         device handle
4868  * @drv:
4869  *
4870  * Init HW when the current phase is
4871  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4872  * FUNCTION-only HW blocks.
4873  *
4874  */
4875 static int ecore_func_init_port(struct bnx2x_softc *sc,
4876                                 const struct ecore_func_sp_drv_ops *drv)
4877 {
4878         int rc = drv->init_hw_port(sc);
4879         if (rc)
4880                 return rc;
4881
4882         return ecore_func_init_func(sc, drv);
4883 }
4884
4885 /**
4886  * ecore_func_init_cmn_chip - performs HW init at chip-common stage
4887  *
4888  * @sc:         device handle
4889  * @drv:
4890  *
4891  * Init HW when the current phase is
4892  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
4893  * PORT-only and FUNCTION-only HW blocks.
4894  */
4895 static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4896                                     *drv)
4897 {
4898         int rc = drv->init_hw_cmn_chip(sc);
4899         if (rc)
4900                 return rc;
4901
4902         return ecore_func_init_port(sc, drv);
4903 }
4904
4905 /**
4906  * ecore_func_init_cmn - performs HW init at common stage
4907  *
4908  * @sc:         device handle
4909  * @drv:
4910  *
4911  * Init HW when the current phase is
4912  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
4913  * PORT-only and FUNCTION-only HW blocks.
4914  */
4915 static int ecore_func_init_cmn(struct bnx2x_softc *sc,
4916                                const struct ecore_func_sp_drv_ops *drv)
4917 {
4918         int rc = drv->init_hw_cmn(sc);
4919         if (rc)
4920                 return rc;
4921
4922         return ecore_func_init_port(sc, drv);
4923 }
4924
4925 static int ecore_func_hw_init(struct bnx2x_softc *sc,
4926                               struct ecore_func_state_params *params)
4927 {
4928         uint32_t load_code = params->params.hw_init.load_phase;
4929         struct ecore_func_sp_obj *o = params->f_obj;
4930         const struct ecore_func_sp_drv_ops *drv = o->drv;
4931         int rc = 0;
4932
4933         ECORE_MSG(sc, "function %d  load_code %x",
4934                   ECORE_ABS_FUNC_ID(sc), load_code);
4935
4936         /* Prepare FW */
4937         rc = drv->init_fw(sc);
4938         if (rc) {
4939                 PMD_DRV_LOG(ERR, sc, "Error loading firmware");
4940                 goto init_err;
4941         }
4942
4943         /* Handle the beginning of COMMON_XXX pases separately... */
4944         switch (load_code) {
4945         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4946                 rc = ecore_func_init_cmn_chip(sc, drv);
4947                 if (rc)
4948                         goto init_err;
4949
4950                 break;
4951         case FW_MSG_CODE_DRV_LOAD_COMMON:
4952                 rc = ecore_func_init_cmn(sc, drv);
4953                 if (rc)
4954                         goto init_err;
4955
4956                 break;
4957         case FW_MSG_CODE_DRV_LOAD_PORT:
4958                 rc = ecore_func_init_port(sc, drv);
4959                 if (rc)
4960                         goto init_err;
4961
4962                 break;
4963         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4964                 rc = ecore_func_init_func(sc, drv);
4965                 if (rc)
4966                         goto init_err;
4967
4968                 break;
4969         default:
4970                 PMD_DRV_LOG(ERR, sc, "Unknown load_code (0x%x) from MCP",
4971                             load_code);
4972                 rc = ECORE_INVAL;
4973         }
4974
4975 init_err:
4976         /* In case of success, complete the command immediately: no ramrods
4977          * have been sent.
4978          */
4979         if (!rc)
4980                 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
4981
4982         return rc;
4983 }
4984
4985 /**
4986  * ecore_func_reset_func - reset HW at function stage
4987  *
4988  * @sc:         device handle
4989  * @drv:
4990  *
4991  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
4992  * FUNCTION-only HW blocks.
4993  */
4994 static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4995                                   *drv)
4996 {
4997         drv->reset_hw_func(sc);
4998 }
4999
5000 /**
5001  * ecore_func_reset_port - reser HW at port stage
5002  *
5003  * @sc:         device handle
5004  * @drv:
5005  *
5006  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5007  * FUNCTION-only and PORT-only HW blocks.
5008  *
5009  *                 !!!IMPORTANT!!!
5010  *
5011  * It's important to call reset_port before reset_func() as the last thing
5012  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5013  * makes impossible any DMAE transactions.
5014  */
5015 static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
5016                                   *drv)
5017 {
5018         drv->reset_hw_port(sc);
5019         ecore_func_reset_func(sc, drv);
5020 }
5021
5022 /**
5023  * ecore_func_reset_cmn - reser HW at common stage
5024  *
5025  * @sc:         device handle
5026  * @drv:
5027  *
5028  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5029  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5030  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5031  */
5032 static void ecore_func_reset_cmn(struct bnx2x_softc *sc,
5033                                  const struct ecore_func_sp_drv_ops *drv)
5034 {
5035         ecore_func_reset_port(sc, drv);
5036         drv->reset_hw_cmn(sc);
5037 }
5038
5039 static int ecore_func_hw_reset(struct bnx2x_softc *sc,
5040                                struct ecore_func_state_params *params)
5041 {
5042         uint32_t reset_phase = params->params.hw_reset.reset_phase;
5043         struct ecore_func_sp_obj *o = params->f_obj;
5044         const struct ecore_func_sp_drv_ops *drv = o->drv;
5045
5046         ECORE_MSG(sc, "function %d  reset_phase %x", ECORE_ABS_FUNC_ID(sc),
5047                   reset_phase);
5048
5049         switch (reset_phase) {
5050         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5051                 ecore_func_reset_cmn(sc, drv);
5052                 break;
5053         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5054                 ecore_func_reset_port(sc, drv);
5055                 break;
5056         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5057                 ecore_func_reset_func(sc, drv);
5058                 break;
5059         default:
5060                 PMD_DRV_LOG(ERR, sc, "Unknown reset_phase (0x%x) from MCP",
5061                             reset_phase);
5062                 break;
5063         }
5064
5065         /* Complete the command immediately: no ramrods have been sent. */
5066         o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
5067
5068         return ECORE_SUCCESS;
5069 }
5070
5071 static int ecore_func_send_start(struct bnx2x_softc *sc,
5072                                  struct ecore_func_state_params *params)
5073 {
5074         struct ecore_func_sp_obj *o = params->f_obj;
5075         struct function_start_data *rdata =
5076             (struct function_start_data *)o->rdata;
5077         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5078         struct ecore_func_start_params *start_params = &params->params.start;
5079
5080         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5081
5082         /* Fill the ramrod data with provided parameters */
5083         rdata->function_mode = (uint8_t) start_params->mf_mode;
5084         rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
5085         rdata->path_id = ECORE_PATH_ID(sc);
5086         rdata->network_cos_mode = start_params->network_cos_mode;
5087         rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5088         rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5089
5090         /*
5091          *  No need for an explicit memory barrier here as long we would
5092          *  need to ensure the ordering of writing to the SPQ element
5093          *  and updating of the SPQ producer which involves a memory
5094          *  read and we will have to put a full memory barrier there
5095          *  (inside ecore_sp_post()).
5096          */
5097
5098         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5099                              data_mapping, NONE_CONNECTION_TYPE);
5100 }
5101
5102 static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5103                                          *params)
5104 {
5105         struct ecore_func_sp_obj *o = params->f_obj;
5106         struct function_update_data *rdata =
5107             (struct function_update_data *)o->rdata;
5108         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5109         struct ecore_func_switch_update_params *switch_update_params =
5110             &params->params.switch_update;
5111
5112         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5113
5114         /* Fill the ramrod data with provided parameters */
5115         rdata->tx_switch_suspend_change_flg = 1;
5116         rdata->tx_switch_suspend = switch_update_params->suspend;
5117         rdata->echo = SWITCH_UPDATE;
5118
5119         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5120                              data_mapping, NONE_CONNECTION_TYPE);
5121 }
5122
5123 static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5124                                        *params)
5125 {
5126         struct ecore_func_sp_obj *o = params->f_obj;
5127         struct function_update_data *rdata =
5128             (struct function_update_data *)o->afex_rdata;
5129         ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
5130         struct ecore_func_afex_update_params *afex_update_params =
5131             &params->params.afex_update;
5132
5133         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5134
5135         /* Fill the ramrod data with provided parameters */
5136         rdata->vif_id_change_flg = 1;
5137         rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
5138         rdata->afex_default_vlan_change_flg = 1;
5139         rdata->afex_default_vlan =
5140             ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
5141         rdata->allowed_priorities_change_flg = 1;
5142         rdata->allowed_priorities = afex_update_params->allowed_priorities;
5143         rdata->echo = AFEX_UPDATE;
5144
5145         /*  No need for an explicit memory barrier here as long we would
5146          *  need to ensure the ordering of writing to the SPQ element
5147          *  and updating of the SPQ producer which involves a memory
5148          *  read and we will have to put a full memory barrier there
5149          *  (inside ecore_sp_post()).
5150          */
5151         ECORE_MSG(sc, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
5152                   rdata->vif_id,
5153                   rdata->afex_default_vlan, rdata->allowed_priorities);
5154
5155         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5156                              data_mapping, NONE_CONNECTION_TYPE);
5157 }
5158
5159 static
5160 inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
5161                                          struct ecore_func_state_params *params)
5162 {
5163         struct ecore_func_sp_obj *o = params->f_obj;
5164         struct afex_vif_list_ramrod_data *rdata =
5165             (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5166         struct ecore_func_afex_viflists_params *afex_vif_params =
5167             &params->params.afex_viflists;
5168         uint64_t *p_rdata = (uint64_t *) rdata;
5169
5170         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5171
5172         /* Fill the ramrod data with provided parameters */
5173         rdata->vif_list_index =
5174             ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
5175         rdata->func_bit_map = afex_vif_params->func_bit_map;
5176         rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5177         rdata->func_to_clear = afex_vif_params->func_to_clear;
5178
5179         /* send in echo type of sub command */
5180         rdata->echo = afex_vif_params->afex_vif_list_command;
5181
5182         /*  No need for an explicit memory barrier here as long we would
5183          *  need to ensure the ordering of writing to the SPQ element
5184          *  and updating of the SPQ producer which involves a memory
5185          *  read and we will have to put a full memory barrier there
5186          *  (inside ecore_sp_post()).
5187          */
5188
5189             ECORE_MSG
5190             (sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
5191              rdata->afex_vif_list_command, rdata->vif_list_index,
5192              rdata->func_bit_map, rdata->func_to_clear);
5193
5194         /* this ramrod sends data directly and not through DMA mapping */
5195         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5196                              *p_rdata, NONE_CONNECTION_TYPE);
5197 }
5198
5199 static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct
5200                                 ecore_func_state_params *params)
5201 {
5202         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
5203                              NONE_CONNECTION_TYPE);
5204 }
5205
5206 static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct
5207                                    ecore_func_state_params *params)
5208 {
5209         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
5210                              NONE_CONNECTION_TYPE);
5211 }
5212
5213 static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params
5214                                     *params)
5215 {
5216         struct ecore_func_sp_obj *o = params->f_obj;
5217         struct flow_control_configuration *rdata =
5218             (struct flow_control_configuration *)o->rdata;
5219         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5220         struct ecore_func_tx_start_params *tx_start_params =
5221             &params->params.tx_start;
5222         uint32_t i;
5223
5224         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5225
5226         rdata->dcb_enabled = tx_start_params->dcb_enabled;
5227         rdata->dcb_version = tx_start_params->dcb_version;
5228         rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
5229
5230         for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5231                 rdata->traffic_type_to_priority_cos[i] =
5232                     tx_start_params->traffic_type_to_priority_cos[i];
5233
5234         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5235                              data_mapping, NONE_CONNECTION_TYPE);
5236 }
5237
5238 static int ecore_func_send_cmd(struct bnx2x_softc *sc,
5239                                struct ecore_func_state_params *params)
5240 {
5241         switch (params->cmd) {
5242         case ECORE_F_CMD_HW_INIT:
5243                 return ecore_func_hw_init(sc, params);
5244         case ECORE_F_CMD_START:
5245                 return ecore_func_send_start(sc, params);
5246         case ECORE_F_CMD_STOP:
5247                 return ecore_func_send_stop(sc, params);
5248         case ECORE_F_CMD_HW_RESET:
5249                 return ecore_func_hw_reset(sc, params);
5250         case ECORE_F_CMD_AFEX_UPDATE:
5251                 return ecore_func_send_afex_update(sc, params);
5252         case ECORE_F_CMD_AFEX_VIFLISTS:
5253                 return ecore_func_send_afex_viflists(sc, params);
5254         case ECORE_F_CMD_TX_STOP:
5255                 return ecore_func_send_tx_stop(sc, params);
5256         case ECORE_F_CMD_TX_START:
5257                 return ecore_func_send_tx_start(sc, params);
5258         case ECORE_F_CMD_SWITCH_UPDATE:
5259                 return ecore_func_send_switch_update(sc, params);
5260         default:
5261                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
5262                 return ECORE_INVAL;
5263         }
5264 }
5265
5266 void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc,
5267                          struct ecore_func_sp_obj *obj,
5268                          void *rdata, ecore_dma_addr_t rdata_mapping,
5269                          void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
5270                          struct ecore_func_sp_drv_ops *drv_iface)
5271 {
5272         ECORE_MEMSET(obj, 0, sizeof(*obj));
5273
5274         ECORE_MUTEX_INIT(&obj->one_pending_mutex);
5275
5276         obj->rdata = rdata;
5277         obj->rdata_mapping = rdata_mapping;
5278         obj->afex_rdata = afex_rdata;
5279         obj->afex_rdata_mapping = afex_rdata_mapping;
5280         obj->send_cmd = ecore_func_send_cmd;
5281         obj->check_transition = ecore_func_chk_transition;
5282         obj->complete_cmd = ecore_func_comp_cmd;
5283         obj->wait_comp = ecore_func_wait_comp;
5284         obj->drv = drv_iface;
5285 }
5286
5287 /**
5288  * ecore_func_state_change - perform Function state change transition
5289  *
5290  * @sc:         device handle
5291  * @params:     parameters to perform the transaction
5292  *
5293  * returns 0 in case of successfully completed transition,
5294  *         negative error code in case of failure, positive
5295  *         (EBUSY) value if there is a completion to that is
5296  *         still pending (possible only if RAMROD_COMP_WAIT is
5297  *         not set in params->ramrod_flags for asynchronous
5298  *         commands).
5299  */
5300 int ecore_func_state_change(struct bnx2x_softc *sc,
5301                             struct ecore_func_state_params *params)
5302 {
5303         struct ecore_func_sp_obj *o = params->f_obj;
5304         int rc, cnt = 300;
5305         enum ecore_func_cmd cmd = params->cmd;
5306         unsigned long *pending = &o->pending;
5307
5308         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5309
5310         /* Check that the requested transition is legal */
5311         rc = o->check_transition(sc, o, params);
5312         if ((rc == ECORE_BUSY) &&
5313             (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
5314                 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
5315                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5316                         ECORE_MSLEEP(10);
5317                         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5318                         rc = o->check_transition(sc, o, params);
5319                 }
5320                 if (rc == ECORE_BUSY) {
5321                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5322                         PMD_DRV_LOG(ERR, sc,
5323                                     "timeout waiting for previous ramrod completion");
5324                         return rc;
5325                 }
5326         } else if (rc) {
5327                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5328                 return rc;
5329         }
5330
5331         /* Set "pending" bit */
5332         ECORE_SET_BIT(cmd, pending);
5333
5334         /* Don't send a command if only driver cleanup was requested */
5335         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5336                 ecore_func_state_change_comp(sc, o, cmd);
5337                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5338         } else {
5339                 /* Send a ramrod */
5340                 rc = o->send_cmd(sc, params);
5341
5342                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5343
5344                 if (rc) {
5345                         o->next_state = ECORE_F_STATE_MAX;
5346                         ECORE_CLEAR_BIT(cmd, pending);
5347                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
5348                         return rc;
5349                 }
5350
5351                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5352                         rc = o->wait_comp(sc, o, cmd);
5353                         if (rc)
5354                                 return rc;
5355
5356                         return ECORE_SUCCESS;
5357                 }
5358         }
5359
5360         return ECORE_RET_PENDING(cmd, pending);
5361 }
5362
5363 /******************************************************************************
5364  * Description:
5365  *         Calculates crc 8 on a word value: polynomial 0-1-2-8
5366  *         Code was translated from Verilog.
5367  * Return:
5368  *****************************************************************************/
5369 uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc)
5370 {
5371         uint8_t D[32];
5372         uint8_t NewCRC[8];
5373         uint8_t C[8];
5374         uint8_t crc_res;
5375         uint8_t i;
5376
5377         /* split the data into 31 bits */
5378         for (i = 0; i < 32; i++) {
5379                 D[i] = (uint8_t) (data & 1);
5380                 data = data >> 1;
5381         }
5382
5383         /* split the crc into 8 bits */
5384         for (i = 0; i < 8; i++) {
5385                 C[i] = crc & 1;
5386                 crc = crc >> 1;
5387         }
5388
5389         NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
5390             D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
5391             C[6] ^ C[7];
5392         NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
5393             D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
5394             D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
5395         NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
5396             D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
5397             C[0] ^ C[1] ^ C[4] ^ C[5];
5398         NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
5399             D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
5400             C[1] ^ C[2] ^ C[5] ^ C[6];
5401         NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
5402             D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
5403             C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
5404         NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
5405             D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
5406             C[3] ^ C[4] ^ C[7];
5407         NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
5408             D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5];
5409         NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
5410             D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6];
5411
5412         crc_res = 0;
5413         for (i = 0; i < 8; i++) {
5414                 crc_res |= (NewCRC[i] << i);
5415         }
5416
5417         return crc_res;
5418 }
5419
5420 uint32_t
5421 ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic)
5422 {
5423         int i;
5424         while (len--) {
5425                 crc ^= *p++;
5426                 for (i = 0; i < 8; i++)
5427                         crc = (crc >> 1) ^ ((crc & 1) ? magic : 0);
5428         }
5429         return crc;
5430 }