New upstream version 16.11.9
[deb_dpdk.git] / drivers / net / bnx2x / ecore_sp.c
1 /*-
2  * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
3  *
4  * Eric Davis        <edavis@broadcom.com>
5  * David Christensen <davidch@broadcom.com>
6  * Gary Zambrano     <zambrano@broadcom.com>
7  *
8  * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
9  * Copyright (c) 2015 QLogic Corporation.
10  * All rights reserved.
11  * www.qlogic.com
12  *
13  * See LICENSE.bnx2x_pmd for copyright and licensing details.
14  */
15
16 #include "bnx2x.h"
17 #include "ecore_init.h"
18
19 /**** Exe Queue interfaces ****/
20
21 /**
22  * ecore_exe_queue_init - init the Exe Queue object
23  *
24  * @o:          pointer to the object
25  * @exe_len:    length
26  * @owner:      pointer to the owner
27  * @validate:   validate function pointer
28  * @optimize:   optimize function pointer
29  * @exec:       execute function pointer
30  * @get:        get function pointer
31  */
32 static void
33 ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
34                      struct ecore_exe_queue_obj *o,
35                      int exe_len,
36                      union ecore_qable_obj *owner,
37                      exe_q_validate validate,
38                      exe_q_remove remove,
39                      exe_q_optimize optimize, exe_q_execute exec, exe_q_get get)
40 {
41         ECORE_MEMSET(o, 0, sizeof(*o));
42
43         ECORE_LIST_INIT(&o->exe_queue);
44         ECORE_LIST_INIT(&o->pending_comp);
45
46         ECORE_SPIN_LOCK_INIT(&o->lock, sc);
47
48         o->exe_chunk_len = exe_len;
49         o->owner = owner;
50
51         /* Owner specific callbacks */
52         o->validate = validate;
53         o->remove = remove;
54         o->optimize = optimize;
55         o->execute = exec;
56         o->get = get;
57
58         ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d",
59                   exe_len);
60 }
61
62 static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
63                                       struct ecore_exeq_elem *elem)
64 {
65         ECORE_MSG(sc, "Deleting an exe_queue element");
66         ECORE_FREE(sc, elem, sizeof(*elem));
67 }
68
69 static inline int ecore_exe_queue_length(struct ecore_exe_queue_obj *o)
70 {
71         struct ecore_exeq_elem *elem;
72         int cnt = 0;
73
74         ECORE_SPIN_LOCK_BH(&o->lock);
75
76         ECORE_LIST_FOR_EACH_ENTRY(elem, &o->exe_queue, link,
77                                   struct ecore_exeq_elem) cnt++;
78
79         ECORE_SPIN_UNLOCK_BH(&o->lock);
80
81         return cnt;
82 }
83
84 /**
85  * ecore_exe_queue_add - add a new element to the execution queue
86  *
87  * @sc:         driver handle
88  * @o:          queue
89  * @cmd:        new command to add
90  * @restore:    true - do not optimize the command
91  *
92  * If the element is optimized or is illegal, frees it.
93  */
94 static int ecore_exe_queue_add(struct bnx2x_softc *sc,
95                                struct ecore_exe_queue_obj *o,
96                                struct ecore_exeq_elem *elem, int restore)
97 {
98         int rc;
99
100         ECORE_SPIN_LOCK_BH(&o->lock);
101
102         if (!restore) {
103                 /* Try to cancel this element queue */
104                 rc = o->optimize(sc, o->owner, elem);
105                 if (rc)
106                         goto free_and_exit;
107
108                 /* Check if this request is ok */
109                 rc = o->validate(sc, o->owner, elem);
110                 if (rc) {
111                         ECORE_MSG(sc, "Preamble failed: %d", rc);
112                         goto free_and_exit;
113                 }
114         }
115
116         /* If so, add it to the execution queue */
117         ECORE_LIST_PUSH_TAIL(&elem->link, &o->exe_queue);
118
119         ECORE_SPIN_UNLOCK_BH(&o->lock);
120
121         return ECORE_SUCCESS;
122
123 free_and_exit:
124         ecore_exe_queue_free_elem(sc, elem);
125
126         ECORE_SPIN_UNLOCK_BH(&o->lock);
127
128         return rc;
129 }
130
131 static void __ecore_exe_queue_reset_pending(struct bnx2x_softc *sc, struct ecore_exe_queue_obj
132                                             *o)
133 {
134         struct ecore_exeq_elem *elem;
135
136         while (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
137                 elem = ECORE_LIST_FIRST_ENTRY(&o->pending_comp,
138                                               struct ecore_exeq_elem, link);
139
140                 ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->pending_comp);
141                 ecore_exe_queue_free_elem(sc, elem);
142         }
143 }
144
145 static inline void ecore_exe_queue_reset_pending(struct bnx2x_softc *sc,
146                                                  struct ecore_exe_queue_obj *o)
147 {
148         ECORE_SPIN_LOCK_BH(&o->lock);
149
150         __ecore_exe_queue_reset_pending(sc, o);
151
152         ECORE_SPIN_UNLOCK_BH(&o->lock);
153 }
154
155 /**
156  * ecore_exe_queue_step - execute one execution chunk atomically
157  *
158  * @sc:                 driver handle
159  * @o:                  queue
160  * @ramrod_flags:       flags
161  *
162  * (Should be called while holding the exe_queue->lock).
163  */
164 static int ecore_exe_queue_step(struct bnx2x_softc *sc,
165                                 struct ecore_exe_queue_obj *o,
166                                 unsigned long *ramrod_flags)
167 {
168         struct ecore_exeq_elem *elem, spacer;
169         int cur_len = 0, rc;
170
171         ECORE_MEMSET(&spacer, 0, sizeof(spacer));
172
173         /* Next step should not be performed until the current is finished,
174          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
175          * properly clear object internals without sending any command to the FW
176          * which also implies there won't be any completion to clear the
177          * 'pending' list.
178          */
179         if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
180                 if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
181                         ECORE_MSG(sc,
182                                   "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
183                         __ecore_exe_queue_reset_pending(sc, o);
184                 } else {
185                         return ECORE_PENDING;
186                 }
187         }
188
189         /* Run through the pending commands list and create a next
190          * execution chunk.
191          */
192         while (!ECORE_LIST_IS_EMPTY(&o->exe_queue)) {
193                 elem = ECORE_LIST_FIRST_ENTRY(&o->exe_queue,
194                                               struct ecore_exeq_elem, link);
195                 ECORE_DBG_BREAK_IF(!elem->cmd_len);
196
197                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
198                         cur_len += elem->cmd_len;
199                         /* Prevent from both lists being empty when moving an
200                          * element. This will allow the call of
201                          * ecore_exe_queue_empty() without locking.
202                          */
203                         ECORE_LIST_PUSH_TAIL(&spacer.link, &o->pending_comp);
204                         mb();
205                         ECORE_LIST_REMOVE_ENTRY(&elem->link, &o->exe_queue);
206                         ECORE_LIST_PUSH_TAIL(&elem->link, &o->pending_comp);
207                         ECORE_LIST_REMOVE_ENTRY(&spacer.link, &o->pending_comp);
208                 } else
209                         break;
210         }
211
212         /* Sanity check */
213         if (!cur_len)
214                 return ECORE_SUCCESS;
215
216         rc = o->execute(sc, o->owner, &o->pending_comp, ramrod_flags);
217         if (rc < 0)
218                 /* In case of an error return the commands back to the queue
219                  *  and reset the pending_comp.
220                  */
221                 ECORE_LIST_SPLICE_INIT(&o->pending_comp, &o->exe_queue);
222         else if (!rc)
223                 /* If zero is returned, means there are no outstanding pending
224                  * completions and we may dismiss the pending list.
225                  */
226                 __ecore_exe_queue_reset_pending(sc, o);
227
228         return rc;
229 }
230
231 static inline int ecore_exe_queue_empty(struct ecore_exe_queue_obj *o)
232 {
233         int empty = ECORE_LIST_IS_EMPTY(&o->exe_queue);
234
235         /* Don't reorder!!! */
236         mb();
237
238         return empty && ECORE_LIST_IS_EMPTY(&o->pending_comp);
239 }
240
241 static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
242                                                           bnx2x_softc *sc
243                                                           __rte_unused)
244 {
245         ECORE_MSG(sc, "Allocating a new exe_queue element");
246         return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
247 }
248
249 /************************ raw_obj functions ***********************************/
250 static int ecore_raw_check_pending(struct ecore_raw_obj *o)
251 {
252         /*
253          * !! converts the value returned by ECORE_TEST_BIT such that it
254          * is guaranteed not to be truncated regardless of int definition.
255          *
256          * Note we cannot simply define the function's return value type
257          * to match the type returned by ECORE_TEST_BIT, as it varies by
258          * platform/implementation.
259          */
260
261         return ! !ECORE_TEST_BIT(o->state, o->pstate);
262 }
263
264 static void ecore_raw_clear_pending(struct ecore_raw_obj *o)
265 {
266         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
267         ECORE_CLEAR_BIT(o->state, o->pstate);
268         ECORE_SMP_MB_AFTER_CLEAR_BIT();
269 }
270
271 static void ecore_raw_set_pending(struct ecore_raw_obj *o)
272 {
273         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
274         ECORE_SET_BIT(o->state, o->pstate);
275         ECORE_SMP_MB_AFTER_CLEAR_BIT();
276 }
277
278 /**
279  * ecore_state_wait - wait until the given bit(state) is cleared
280  *
281  * @sc:         device handle
282  * @state:      state which is to be cleared
283  * @state_p:    state buffer
284  *
285  */
286 static int ecore_state_wait(struct bnx2x_softc *sc, int state,
287                             unsigned long *pstate)
288 {
289         /* can take a while if any port is running */
290         int cnt = 5000;
291
292         if (CHIP_REV_IS_EMUL(sc))
293                 cnt *= 20;
294
295         ECORE_MSG(sc, "waiting for state to become %d", state);
296
297         ECORE_MIGHT_SLEEP();
298         while (cnt--) {
299                 bnx2x_intr_legacy(sc, 1);
300                 if (!ECORE_TEST_BIT(state, pstate)) {
301 #ifdef ECORE_STOP_ON_ERROR
302                         ECORE_MSG(sc, "exit  (cnt %d)", 5000 - cnt);
303 #endif
304                         return ECORE_SUCCESS;
305                 }
306
307                 ECORE_WAIT(sc, delay_us);
308
309                 if (sc->panic)
310                         return ECORE_IO;
311         }
312
313         /* timeout! */
314         PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
315 #ifdef ECORE_STOP_ON_ERROR
316         ecore_panic();
317 #endif
318
319         return ECORE_TIMEOUT;
320 }
321
322 static int ecore_raw_wait(struct bnx2x_softc *sc, struct ecore_raw_obj *raw)
323 {
324         return ecore_state_wait(sc, raw->state, raw->pstate);
325 }
326
327 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
328 /* credit handling callbacks */
329 static int ecore_get_cam_offset_mac(struct ecore_vlan_mac_obj *o, int *offset)
330 {
331         struct ecore_credit_pool_obj *mp = o->macs_pool;
332
333         ECORE_DBG_BREAK_IF(!mp);
334
335         return mp->get_entry(mp, offset);
336 }
337
338 static int ecore_get_credit_mac(struct ecore_vlan_mac_obj *o)
339 {
340         struct ecore_credit_pool_obj *mp = o->macs_pool;
341
342         ECORE_DBG_BREAK_IF(!mp);
343
344         return mp->get(mp, 1);
345 }
346
347 static int ecore_put_cam_offset_mac(struct ecore_vlan_mac_obj *o, int offset)
348 {
349         struct ecore_credit_pool_obj *mp = o->macs_pool;
350
351         return mp->put_entry(mp, offset);
352 }
353
354 static int ecore_put_credit_mac(struct ecore_vlan_mac_obj *o)
355 {
356         struct ecore_credit_pool_obj *mp = o->macs_pool;
357
358         return mp->put(mp, 1);
359 }
360
361 /**
362  * __ecore_vlan_mac_h_write_trylock - try getting the writer lock on vlan mac
363  * head list.
364  *
365  * @sc:         device handle
366  * @o:          vlan_mac object
367  *
368  * @details: Non-blocking implementation; should be called under execution
369  *           queue lock.
370  */
371 static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
372                                             struct ecore_vlan_mac_obj *o)
373 {
374         if (o->head_reader) {
375                 ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy");
376                 return ECORE_BUSY;
377         }
378
379         ECORE_MSG(sc, "vlan_mac_lock writer - Taken");
380         return ECORE_SUCCESS;
381 }
382
383 /**
384  * __ecore_vlan_mac_h_exec_pending - execute step instead of a previous step
385  * which wasn't able to run due to a taken lock on vlan mac head list.
386  *
387  * @sc:         device handle
388  * @o:          vlan_mac object
389  *
390  * @details Should be called under execution queue lock; notice it might release
391  *          and reclaim it during its run.
392  */
393 static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
394                                             struct ecore_vlan_mac_obj *o)
395 {
396         int rc;
397         unsigned long ramrod_flags = o->saved_ramrod_flags;
398
399         ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu",
400                   ramrod_flags);
401         o->head_exe_request = FALSE;
402         o->saved_ramrod_flags = 0;
403         rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
404         if (rc != ECORE_SUCCESS) {
405                 PMD_DRV_LOG(ERR, sc,
406                             "execution of pending commands failed with rc %d",
407                             rc);
408 #ifdef ECORE_STOP_ON_ERROR
409                 ecore_panic();
410 #endif
411         }
412 }
413
414 /**
415  * __ecore_vlan_mac_h_pend - Pend an execution step which couldn't have been
416  * called due to vlan mac head list lock being taken.
417  *
418  * @sc:                 device handle
419  * @o:                  vlan_mac object
420  * @ramrod_flags:       ramrod flags of missed execution
421  *
422  * @details Should be called under execution queue lock.
423  */
424 static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
425                                     struct ecore_vlan_mac_obj *o,
426                                     unsigned long ramrod_flags)
427 {
428         o->head_exe_request = TRUE;
429         o->saved_ramrod_flags = ramrod_flags;
430         ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu",
431                   ramrod_flags);
432 }
433
434 /**
435  * __ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
436  *
437  * @sc:                 device handle
438  * @o:                  vlan_mac object
439  *
440  * @details Should be called under execution queue lock. Notice if a pending
441  *          execution exists, it would perform it - possibly releasing and
442  *          reclaiming the execution queue lock.
443  */
444 static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
445                                             struct ecore_vlan_mac_obj *o)
446 {
447         /* It's possible a new pending execution was added since this writer
448          * executed. If so, execute again. [Ad infinitum]
449          */
450         while (o->head_exe_request) {
451                 ECORE_MSG(sc,
452                           "vlan_mac_lock - writer release encountered a pending request");
453                 __ecore_vlan_mac_h_exec_pending(sc, o);
454         }
455 }
456
457 /**
458  * ecore_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
459  *
460  * @sc:                 device handle
461  * @o:                  vlan_mac object
462  *
463  * @details Notice if a pending execution exists, it would perform it -
464  *          possibly releasing and reclaiming the execution queue lock.
465  */
466 void ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
467                                    struct ecore_vlan_mac_obj *o)
468 {
469         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
470         __ecore_vlan_mac_h_write_unlock(sc, o);
471         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
472 }
473
474 /**
475  * __ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
476  *
477  * @sc:                 device handle
478  * @o:                  vlan_mac object
479  *
480  * @details Should be called under the execution queue lock. May sleep. May
481  *          release and reclaim execution queue lock during its run.
482  */
483 static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
484                                         struct ecore_vlan_mac_obj *o)
485 {
486         /* If we got here, we're holding lock --> no WRITER exists */
487         o->head_reader++;
488         ECORE_MSG(sc,
489                   "vlan_mac_lock - locked reader - number %d", o->head_reader);
490
491         return ECORE_SUCCESS;
492 }
493
494 /**
495  * ecore_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
496  *
497  * @sc:                 device handle
498  * @o:                  vlan_mac object
499  *
500  * @details May sleep. Claims and releases execution queue lock during its run.
501  */
502 static int ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc,
503                                       struct ecore_vlan_mac_obj *o)
504 {
505         int rc;
506
507         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
508         rc = __ecore_vlan_mac_h_read_lock(sc, o);
509         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
510
511         return rc;
512 }
513
514 /**
515  * __ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
516  *
517  * @sc:                 device handle
518  * @o:                  vlan_mac object
519  *
520  * @details Should be called under execution queue lock. Notice if a pending
521  *          execution exists, it would be performed if this was the last
522  *          reader. possibly releasing and reclaiming the execution queue lock.
523  */
524 static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
525                                            struct ecore_vlan_mac_obj *o)
526 {
527         if (!o->head_reader) {
528                 PMD_DRV_LOG(ERR, sc,
529                             "Need to release vlan mac reader lock, but lock isn't taken");
530 #ifdef ECORE_STOP_ON_ERROR
531                 ecore_panic();
532 #endif
533         } else {
534                 o->head_reader--;
535                 PMD_DRV_LOG(INFO, sc,
536                             "vlan_mac_lock - decreased readers to %d",
537                             o->head_reader);
538         }
539
540         /* It's possible a new pending execution was added, and that this reader
541          * was last - if so we need to execute the command.
542          */
543         if (!o->head_reader && o->head_exe_request) {
544                 PMD_DRV_LOG(INFO, sc,
545                             "vlan_mac_lock - reader release encountered a pending request");
546
547                 /* Writer release will do the trick */
548                 __ecore_vlan_mac_h_write_unlock(sc, o);
549         }
550 }
551
552 /**
553  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
554  *
555  * @sc:                 device handle
556  * @o:                  vlan_mac object
557  *
558  * @details Notice if a pending execution exists, it would be performed if this
559  *          was the last reader. Claims and releases the execution queue lock
560  *          during its run.
561  */
562 void ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
563                                   struct ecore_vlan_mac_obj *o)
564 {
565         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
566         __ecore_vlan_mac_h_read_unlock(sc, o);
567         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
568 }
569
570 /**
571  * ecore_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
572  *
573  * @sc:                 device handle
574  * @o:                  vlan_mac object
575  * @n:                  number of elements to get
576  * @base:               base address for element placement
577  * @stride:             stride between elements (in bytes)
578  */
579 static int ecore_get_n_elements(struct bnx2x_softc *sc,
580                                 struct ecore_vlan_mac_obj *o, int n,
581                                 uint8_t * base, uint8_t stride, uint8_t size)
582 {
583         struct ecore_vlan_mac_registry_elem *pos;
584         uint8_t *next = base;
585         int counter = 0, read_lock;
586
587         ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)");
588         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
589         if (read_lock != ECORE_SUCCESS)
590                 PMD_DRV_LOG(ERR, sc,
591                             "get_n_elements failed to get vlan mac reader lock; Access without lock");
592
593         /* traverse list */
594         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
595                                   struct ecore_vlan_mac_registry_elem) {
596                 if (counter < n) {
597                         ECORE_MEMCPY(next, &pos->u, size);
598                         counter++;
599                             ECORE_MSG
600                             (sc, "copied element number %d to address %p element was:",
601                              counter, next);
602                         next += stride + size;
603                 }
604         }
605
606         if (read_lock == ECORE_SUCCESS) {
607                 ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)");
608                 ecore_vlan_mac_h_read_unlock(sc, o);
609         }
610
611         return counter * ETH_ALEN;
612 }
613
614 /* check_add() callbacks */
615 static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
616                                struct ecore_vlan_mac_obj *o,
617                                union ecore_classification_ramrod_data *data)
618 {
619         struct ecore_vlan_mac_registry_elem *pos;
620
621         ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
622                   data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
623                   data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
624
625         if (!ECORE_IS_VALID_ETHER_ADDR(data->mac.mac))
626                 return ECORE_INVAL;
627
628         /* Check if a requested MAC already exists */
629         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
630                                   struct ecore_vlan_mac_registry_elem)
631             if (!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
632                 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
633                 return ECORE_EXISTS;
634
635         return ECORE_SUCCESS;
636 }
637
638 /* check_del() callbacks */
639 static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_softc
640                                                                 *sc
641                                                                 __rte_unused,
642                                                                 struct
643                                                                 ecore_vlan_mac_obj
644                                                                 *o, union
645                                                                 ecore_classification_ramrod_data
646                                                                 *data)
647 {
648         struct ecore_vlan_mac_registry_elem *pos;
649
650         ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
651                   data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
652                   data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
653
654         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
655                                   struct ecore_vlan_mac_registry_elem)
656         if ((!ECORE_MEMCMP(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
657             (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
658                 return pos;
659
660         return NULL;
661 }
662
663 /* check_move() callback */
664 static int ecore_check_move(struct bnx2x_softc *sc,
665                             struct ecore_vlan_mac_obj *src_o,
666                             struct ecore_vlan_mac_obj *dst_o,
667                             union ecore_classification_ramrod_data *data)
668 {
669         struct ecore_vlan_mac_registry_elem *pos;
670         int rc;
671
672         /* Check if we can delete the requested configuration from the first
673          * object.
674          */
675         pos = src_o->check_del(sc, src_o, data);
676
677         /*  check if configuration can be added */
678         rc = dst_o->check_add(sc, dst_o, data);
679
680         /* If this classification can not be added (is already set)
681          * or can't be deleted - return an error.
682          */
683         if (rc || !pos)
684                 return FALSE;
685
686         return TRUE;
687 }
688
689 static int ecore_check_move_always_err(__rte_unused struct bnx2x_softc *sc,
690                                        __rte_unused struct ecore_vlan_mac_obj
691                                        *src_o, __rte_unused struct ecore_vlan_mac_obj
692                                        *dst_o, __rte_unused union
693                                        ecore_classification_ramrod_data *data)
694 {
695         return FALSE;
696 }
697
698 static uint8_t ecore_vlan_mac_get_rx_tx_flag(struct ecore_vlan_mac_obj
699                                              *o)
700 {
701         struct ecore_raw_obj *raw = &o->raw;
702         uint8_t rx_tx_flag = 0;
703
704         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
705             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
706                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
707
708         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
709             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
710                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
711
712         return rx_tx_flag;
713 }
714
715 static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
716                                  int add, unsigned char *dev_addr, int index)
717 {
718         uint32_t wb_data[2];
719         uint32_t reg_offset = ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM :
720             NIG_REG_LLH0_FUNC_MEM;
721
722         if (!ECORE_IS_MF_SI_MODE(sc) && !IS_MF_AFEX(sc))
723                 return;
724
725         if (index > ECORE_LLH_CAM_MAX_PF_LINE)
726                 return;
727
728         ECORE_MSG(sc, "Going to %s LLH configuration at entry %d",
729                   (add ? "ADD" : "DELETE"), index);
730
731         if (add) {
732                 /* LLH_FUNC_MEM is a uint64_t WB register */
733                 reg_offset += 8 * index;
734
735                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
736                               (dev_addr[4] << 8) | dev_addr[5]);
737                 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
738
739                 ECORE_REG_WR_DMAE_LEN(sc, reg_offset, wb_data, 2);
740         }
741
742         REG_WR(sc, (ECORE_PORT_ID(sc) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
743                     NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4 * index, add);
744 }
745
746 /**
747  * ecore_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
748  *
749  * @sc:         device handle
750  * @o:          queue for which we want to configure this rule
751  * @add:        if TRUE the command is an ADD command, DEL otherwise
752  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
753  * @hdr:        pointer to a header to setup
754  *
755  */
756 static void ecore_vlan_mac_set_cmd_hdr_e2(struct ecore_vlan_mac_obj *o,
757                                           int add, int opcode,
758                                           struct eth_classify_cmd_header
759                                           *hdr)
760 {
761         struct ecore_raw_obj *raw = &o->raw;
762
763         hdr->client_id = raw->cl_id;
764         hdr->func_id = raw->func_id;
765
766         /* Rx or/and Tx (internal switching) configuration ? */
767         hdr->cmd_general_data |= ecore_vlan_mac_get_rx_tx_flag(o);
768
769         if (add)
770                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
771
772         hdr->cmd_general_data |=
773             (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
774 }
775
776 /**
777  * ecore_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
778  *
779  * @cid:        connection id
780  * @type:       ECORE_FILTER_XXX_PENDING
781  * @hdr:        pointer to header to setup
782  * @rule_cnt:
783  *
784  * currently we always configure one rule and echo field to contain a CID and an
785  * opcode type.
786  */
787 static void ecore_vlan_mac_set_rdata_hdr_e2(uint32_t cid, int type, struct eth_classify_header
788                                             *hdr, int rule_cnt)
789 {
790         hdr->echo = ECORE_CPU_TO_LE32((cid & ECORE_SWCID_MASK) |
791                                       (type << ECORE_SWCID_SHIFT));
792         hdr->rule_cnt = (uint8_t) rule_cnt;
793 }
794
795 /* hw_config() callbacks */
796 static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
797                                  struct ecore_vlan_mac_obj *o,
798                                  struct ecore_exeq_elem *elem, int rule_idx,
799                                  __rte_unused int cam_offset)
800 {
801         struct ecore_raw_obj *raw = &o->raw;
802         struct eth_classify_rules_ramrod_data *data =
803             (struct eth_classify_rules_ramrod_data *)(raw->rdata);
804         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
805         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
806         int add = (cmd == ECORE_VLAN_MAC_ADD) ? TRUE : FALSE;
807         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
808         uint8_t *mac = elem->cmd_data.vlan_mac.u.mac.mac;
809
810         /* Set LLH CAM entry: currently only iSCSI and ETH macs are
811          * relevant. In addition, current implementation is tuned for a
812          * single ETH MAC.
813          *
814          * When multiple unicast ETH MACs PF configuration in switch
815          * independent mode is required (NetQ, multiple netdev MACs,
816          * etc.), consider better utilisation of 8 per function MAC
817          * entries in the LLH register. There is also
818          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
819          * total number of CAM entries to 16.
820          *
821          * Currently we won't configure NIG for MACs other than a primary ETH
822          * MAC and iSCSI L2 MAC.
823          *
824          * If this MAC is moving from one Queue to another, no need to change
825          * NIG configuration.
826          */
827         if (cmd != ECORE_VLAN_MAC_MOVE) {
828                 if (ECORE_TEST_BIT(ECORE_ISCSI_ETH_MAC, vlan_mac_flags))
829                         ecore_set_mac_in_nig(sc, add, mac,
830                                              ECORE_LLH_CAM_ISCSI_ETH_LINE);
831                 else if (ECORE_TEST_BIT(ECORE_ETH_MAC, vlan_mac_flags))
832                         ecore_set_mac_in_nig(sc, add, mac,
833                                              ECORE_LLH_CAM_ETH_LINE);
834         }
835
836         /* Reset the ramrod data buffer for the first rule */
837         if (rule_idx == 0)
838                 ECORE_MEMSET(data, 0, sizeof(*data));
839
840         /* Setup a command header */
841         ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
842                                       &rule_entry->mac.header);
843
844         ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
845                   (add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
846                   mac[4], mac[5], raw->cl_id);
847
848         /* Set a MAC itself */
849         ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
850                               &rule_entry->mac.mac_mid,
851                               &rule_entry->mac.mac_lsb, mac);
852         rule_entry->mac.inner_mac = elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
853
854         /* MOVE: Add a rule that will add this MAC to the target Queue */
855         if (cmd == ECORE_VLAN_MAC_MOVE) {
856                 rule_entry++;
857                 rule_cnt++;
858
859                 /* Setup ramrod data */
860                 ecore_vlan_mac_set_cmd_hdr_e2(elem->cmd_data.
861                                               vlan_mac.target_obj, TRUE,
862                                               CLASSIFY_RULE_OPCODE_MAC,
863                                               &rule_entry->mac.header);
864
865                 /* Set a MAC itself */
866                 ecore_set_fw_mac_addr(&rule_entry->mac.mac_msb,
867                                       &rule_entry->mac.mac_mid,
868                                       &rule_entry->mac.mac_lsb, mac);
869                 rule_entry->mac.inner_mac =
870                     elem->cmd_data.vlan_mac.u.mac.is_inner_mac;
871         }
872
873         /* Set the ramrod data header */
874         ecore_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
875                                         rule_cnt);
876 }
877
878 /**
879  * ecore_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
880  *
881  * @sc:         device handle
882  * @o:          queue
883  * @type:
884  * @cam_offset: offset in cam memory
885  * @hdr:        pointer to a header to setup
886  *
887  * E1H
888  */
889 static void ecore_vlan_mac_set_rdata_hdr_e1x(struct ecore_vlan_mac_obj
890                                              *o, int type, int cam_offset, struct mac_configuration_hdr
891                                              *hdr)
892 {
893         struct ecore_raw_obj *r = &o->raw;
894
895         hdr->length = 1;
896         hdr->offset = (uint8_t) cam_offset;
897         hdr->client_id = ECORE_CPU_TO_LE16(0xff);
898         hdr->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
899                                       (type << ECORE_SWCID_SHIFT));
900 }
901
902 static void ecore_vlan_mac_set_cfg_entry_e1x(struct ecore_vlan_mac_obj
903                                              *o, int add, int opcode,
904                                              uint8_t * mac,
905                                              uint16_t vlan_id, struct
906                                              mac_configuration_entry
907                                              *cfg_entry)
908 {
909         struct ecore_raw_obj *r = &o->raw;
910         uint32_t cl_bit_vec = (1 << r->cl_id);
911
912         cfg_entry->clients_bit_vector = ECORE_CPU_TO_LE32(cl_bit_vec);
913         cfg_entry->pf_id = r->func_id;
914         cfg_entry->vlan_id = ECORE_CPU_TO_LE16(vlan_id);
915
916         if (add) {
917                 ECORE_SET_FLAG(cfg_entry->flags,
918                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
919                                T_ETH_MAC_COMMAND_SET);
920                 ECORE_SET_FLAG(cfg_entry->flags,
921                                MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE,
922                                opcode);
923
924                 /* Set a MAC in a ramrod data */
925                 ecore_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
926                                       &cfg_entry->middle_mac_addr,
927                                       &cfg_entry->lsb_mac_addr, mac);
928         } else
929                 ECORE_SET_FLAG(cfg_entry->flags,
930                                MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
931                                T_ETH_MAC_COMMAND_INVALIDATE);
932 }
933
934 static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
935                                          __rte_unused,
936                                          struct ecore_vlan_mac_obj *o,
937                                          int type, int cam_offset,
938                                          int add, uint8_t * mac,
939                                          uint16_t vlan_id, int opcode,
940                                          struct mac_configuration_cmd
941                                          *config)
942 {
943         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
944
945         ecore_vlan_mac_set_rdata_hdr_e1x(o, type, cam_offset, &config->hdr);
946         ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
947                                          cfg_entry);
948
949         ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
950                   (add ? "setting" : "clearing"),
951                   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
952                   o->raw.cl_id, cam_offset);
953 }
954
955 /**
956  * ecore_set_one_mac_e1x - fill a single MAC rule ramrod data
957  *
958  * @sc:         device handle
959  * @o:          ecore_vlan_mac_obj
960  * @elem:       ecore_exeq_elem
961  * @rule_idx:   rule_idx
962  * @cam_offset: cam_offset
963  */
964 static void ecore_set_one_mac_e1x(struct bnx2x_softc *sc,
965                                   struct ecore_vlan_mac_obj *o,
966                                   struct ecore_exeq_elem *elem,
967                                   __rte_unused int rule_idx, int cam_offset)
968 {
969         struct ecore_raw_obj *raw = &o->raw;
970         struct mac_configuration_cmd *config =
971             (struct mac_configuration_cmd *)(raw->rdata);
972         /* 57711 do not support MOVE command,
973          * so it's either ADD or DEL
974          */
975         int add = (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
976             TRUE : FALSE;
977
978         /* Reset the ramrod data buffer */
979         ECORE_MEMSET(config, 0, sizeof(*config));
980
981         ecore_vlan_mac_set_rdata_e1x(sc, o, raw->state,
982                                      cam_offset, add,
983                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
984                                      ETH_VLAN_FILTER_ANY_VLAN, config);
985 }
986
987 /**
988  * ecore_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
989  *
990  * @sc:         device handle
991  * @p:          command parameters
992  * @ppos:       pointer to the cookie
993  *
994  * reconfigure next MAC/VLAN/VLAN-MAC element from the
995  * previously configured elements list.
996  *
997  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
998  * into an account
999  *
1000  * pointer to the cookie  - that should be given back in the next call to make
1001  * function handle the next element. If *ppos is set to NULL it will restart the
1002  * iterator. If returned *ppos == NULL this means that the last element has been
1003  * handled.
1004  *
1005  */
1006 static int ecore_vlan_mac_restore(struct bnx2x_softc *sc,
1007                                   struct ecore_vlan_mac_ramrod_params *p,
1008                                   struct ecore_vlan_mac_registry_elem **ppos)
1009 {
1010         struct ecore_vlan_mac_registry_elem *pos;
1011         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1012
1013         /* If list is empty - there is nothing to do here */
1014         if (ECORE_LIST_IS_EMPTY(&o->head)) {
1015                 *ppos = NULL;
1016                 return 0;
1017         }
1018
1019         /* make a step... */
1020         if (*ppos == NULL)
1021                 *ppos = ECORE_LIST_FIRST_ENTRY(&o->head, struct
1022                                                ecore_vlan_mac_registry_elem,
1023                                                link);
1024         else
1025                 *ppos = ECORE_LIST_NEXT(*ppos, link,
1026                                         struct ecore_vlan_mac_registry_elem);
1027
1028         pos = *ppos;
1029
1030         /* If it's the last step - return NULL */
1031         if (ECORE_LIST_IS_LAST(&pos->link, &o->head))
1032                 *ppos = NULL;
1033
1034         /* Prepare a 'user_req' */
1035         ECORE_MEMCPY(&p->user_req.u, &pos->u, sizeof(pos->u));
1036
1037         /* Set the command */
1038         p->user_req.cmd = ECORE_VLAN_MAC_ADD;
1039
1040         /* Set vlan_mac_flags */
1041         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1042
1043         /* Set a restore bit */
1044         ECORE_SET_BIT_NA(RAMROD_RESTORE, &p->ramrod_flags);
1045
1046         return ecore_config_vlan_mac(sc, p);
1047 }
1048
1049 /* ecore_exeq_get_mac/ecore_exeq_get_vlan/ecore_exeq_get_vlan_mac return a
1050  * pointer to an element with a specific criteria and NULL if such an element
1051  * hasn't been found.
1052  */
1053 static struct ecore_exeq_elem *ecore_exeq_get_mac(struct ecore_exe_queue_obj *o,
1054                                                   struct ecore_exeq_elem *elem)
1055 {
1056         struct ecore_exeq_elem *pos;
1057         struct ecore_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1058
1059         /* Check pending for execution commands */
1060         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->exe_queue, link,
1061                                   struct ecore_exeq_elem)
1062         if (!ECORE_MEMCMP(&pos->cmd_data.vlan_mac.u.mac, data,
1063                           sizeof(*data)) &&
1064             (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1065                 return pos;
1066
1067         return NULL;
1068 }
1069
1070 /**
1071  * ecore_validate_vlan_mac_add - check if an ADD command can be executed
1072  *
1073  * @sc:         device handle
1074  * @qo:         ecore_qable_obj
1075  * @elem:       ecore_exeq_elem
1076  *
1077  * Checks that the requested configuration can be added. If yes and if
1078  * requested, consume CAM credit.
1079  *
1080  * The 'validate' is run after the 'optimize'.
1081  *
1082  */
1083 static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
1084                                        union ecore_qable_obj *qo,
1085                                        struct ecore_exeq_elem *elem)
1086 {
1087         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1088         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1089         int rc;
1090
1091         /* Check the registry */
1092         rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
1093         if (rc) {
1094                 ECORE_MSG(sc,
1095                           "ADD command is not allowed considering current registry state.");
1096                 return rc;
1097         }
1098
1099         /* Check if there is a pending ADD command for this
1100          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1101          */
1102         if (exeq->get(exeq, elem)) {
1103                 ECORE_MSG(sc, "There is a pending ADD command already");
1104                 return ECORE_EXISTS;
1105         }
1106
1107         /* Consume the credit if not requested not to */
1108         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1109                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1110               o->get_credit(o)))
1111                 return ECORE_INVAL;
1112
1113         return ECORE_SUCCESS;
1114 }
1115
1116 /**
1117  * ecore_validate_vlan_mac_del - check if the DEL command can be executed
1118  *
1119  * @sc:         device handle
1120  * @qo:         quable object to check
1121  * @elem:       element that needs to be deleted
1122  *
1123  * Checks that the requested configuration can be deleted. If yes and if
1124  * requested, returns a CAM credit.
1125  *
1126  * The 'validate' is run after the 'optimize'.
1127  */
1128 static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
1129                                        union ecore_qable_obj *qo,
1130                                        struct ecore_exeq_elem *elem)
1131 {
1132         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1133         struct ecore_vlan_mac_registry_elem *pos;
1134         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1135         struct ecore_exeq_elem query_elem;
1136
1137         /* If this classification can not be deleted (doesn't exist)
1138          * - return a ECORE_EXIST.
1139          */
1140         pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1141         if (!pos) {
1142                 ECORE_MSG(sc,
1143                           "DEL command is not allowed considering current registry state");
1144                 return ECORE_EXISTS;
1145         }
1146
1147         /* Check if there are pending DEL or MOVE commands for this
1148          * MAC/VLAN/VLAN-MAC. Return an error if so.
1149          */
1150         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1151
1152         /* Check for MOVE commands */
1153         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
1154         if (exeq->get(exeq, &query_elem)) {
1155                 PMD_DRV_LOG(ERR, sc, "There is a pending MOVE command already");
1156                 return ECORE_INVAL;
1157         }
1158
1159         /* Check for DEL commands */
1160         if (exeq->get(exeq, elem)) {
1161                 ECORE_MSG(sc, "There is a pending DEL command already");
1162                 return ECORE_EXISTS;
1163         }
1164
1165         /* Return the credit to the credit pool if not requested not to */
1166         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1167                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1168               o->put_credit(o))) {
1169                 PMD_DRV_LOG(ERR, sc, "Failed to return a credit");
1170                 return ECORE_INVAL;
1171         }
1172
1173         return ECORE_SUCCESS;
1174 }
1175
1176 /**
1177  * ecore_validate_vlan_mac_move - check if the MOVE command can be executed
1178  *
1179  * @sc:         device handle
1180  * @qo:         quable object to check (source)
1181  * @elem:       element that needs to be moved
1182  *
1183  * Checks that the requested configuration can be moved. If yes and if
1184  * requested, returns a CAM credit.
1185  *
1186  * The 'validate' is run after the 'optimize'.
1187  */
1188 static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
1189                                         union ecore_qable_obj *qo,
1190                                         struct ecore_exeq_elem *elem)
1191 {
1192         struct ecore_vlan_mac_obj *src_o = &qo->vlan_mac;
1193         struct ecore_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1194         struct ecore_exeq_elem query_elem;
1195         struct ecore_exe_queue_obj *src_exeq = &src_o->exe_queue;
1196         struct ecore_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1197
1198         /* Check if we can perform this operation based on the current registry
1199          * state.
1200          */
1201         if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1202                 ECORE_MSG(sc,
1203                           "MOVE command is not allowed considering current registry state");
1204                 return ECORE_INVAL;
1205         }
1206
1207         /* Check if there is an already pending DEL or MOVE command for the
1208          * source object or ADD command for a destination object. Return an
1209          * error if so.
1210          */
1211         ECORE_MEMCPY(&query_elem, elem, sizeof(query_elem));
1212
1213         /* Check DEL on source */
1214         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1215         if (src_exeq->get(src_exeq, &query_elem)) {
1216                 PMD_DRV_LOG(ERR, sc,
1217                             "There is a pending DEL command on the source queue already");
1218                 return ECORE_INVAL;
1219         }
1220
1221         /* Check MOVE on source */
1222         if (src_exeq->get(src_exeq, elem)) {
1223                 ECORE_MSG(sc, "There is a pending MOVE command already");
1224                 return ECORE_EXISTS;
1225         }
1226
1227         /* Check ADD on destination */
1228         query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1229         if (dest_exeq->get(dest_exeq, &query_elem)) {
1230                 PMD_DRV_LOG(ERR, sc,
1231                             "There is a pending ADD command on the destination queue already");
1232                 return ECORE_INVAL;
1233         }
1234
1235         /* Consume the credit if not requested not to */
1236         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT_DEST,
1237                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1238               dest_o->get_credit(dest_o)))
1239                 return ECORE_INVAL;
1240
1241         if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1242                              &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1243               src_o->put_credit(src_o))) {
1244                 /* return the credit taken from dest... */
1245                 dest_o->put_credit(dest_o);
1246                 return ECORE_INVAL;
1247         }
1248
1249         return ECORE_SUCCESS;
1250 }
1251
1252 static int ecore_validate_vlan_mac(struct bnx2x_softc *sc,
1253                                    union ecore_qable_obj *qo,
1254                                    struct ecore_exeq_elem *elem)
1255 {
1256         switch (elem->cmd_data.vlan_mac.cmd) {
1257         case ECORE_VLAN_MAC_ADD:
1258                 return ecore_validate_vlan_mac_add(sc, qo, elem);
1259         case ECORE_VLAN_MAC_DEL:
1260                 return ecore_validate_vlan_mac_del(sc, qo, elem);
1261         case ECORE_VLAN_MAC_MOVE:
1262                 return ecore_validate_vlan_mac_move(sc, qo, elem);
1263         default:
1264                 return ECORE_INVAL;
1265         }
1266 }
1267
1268 static int ecore_remove_vlan_mac(__rte_unused struct bnx2x_softc *sc,
1269                                  union ecore_qable_obj *qo,
1270                                  struct ecore_exeq_elem *elem)
1271 {
1272         int rc = 0;
1273
1274         /* If consumption wasn't required, nothing to do */
1275         if (ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1276                            &elem->cmd_data.vlan_mac.vlan_mac_flags))
1277                 return ECORE_SUCCESS;
1278
1279         switch (elem->cmd_data.vlan_mac.cmd) {
1280         case ECORE_VLAN_MAC_ADD:
1281         case ECORE_VLAN_MAC_MOVE:
1282                 rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
1283                 break;
1284         case ECORE_VLAN_MAC_DEL:
1285                 rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
1286                 break;
1287         default:
1288                 return ECORE_INVAL;
1289         }
1290
1291         if (rc != TRUE)
1292                 return ECORE_INVAL;
1293
1294         return ECORE_SUCCESS;
1295 }
1296
1297 /**
1298  * ecore_wait_vlan_mac - passively wait for 5 seconds until all work completes.
1299  *
1300  * @sc:         device handle
1301  * @o:          ecore_vlan_mac_obj
1302  *
1303  */
1304 static int ecore_wait_vlan_mac(struct bnx2x_softc *sc,
1305                                struct ecore_vlan_mac_obj *o)
1306 {
1307         int cnt = 5000, rc;
1308         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1309         struct ecore_raw_obj *raw = &o->raw;
1310
1311         while (cnt--) {
1312                 /* Wait for the current command to complete */
1313                 rc = raw->wait_comp(sc, raw);
1314                 if (rc)
1315                         return rc;
1316
1317                 /* Wait until there are no pending commands */
1318                 if (!ecore_exe_queue_empty(exeq))
1319                         ECORE_WAIT(sc, 1000);
1320                 else
1321                         return ECORE_SUCCESS;
1322         }
1323
1324         return ECORE_TIMEOUT;
1325 }
1326
1327 static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
1328                                          struct ecore_vlan_mac_obj *o,
1329                                          unsigned long *ramrod_flags)
1330 {
1331         int rc = ECORE_SUCCESS;
1332
1333         ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
1334
1335         ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock");
1336         rc = __ecore_vlan_mac_h_write_trylock(sc, o);
1337
1338         if (rc != ECORE_SUCCESS) {
1339                 __ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
1340
1341                 /** Calling function should not diffrentiate between this case
1342                  *  and the case in which there is already a pending ramrod
1343                  */
1344                 rc = ECORE_PENDING;
1345         } else {
1346                 rc = ecore_exe_queue_step(sc, &o->exe_queue, ramrod_flags);
1347         }
1348         ECORE_SPIN_UNLOCK_BH(&o->exe_queue.lock);
1349
1350         return rc;
1351 }
1352
1353 /**
1354  * ecore_complete_vlan_mac - complete one VLAN-MAC ramrod
1355  *
1356  * @sc:         device handle
1357  * @o:          ecore_vlan_mac_obj
1358  * @cqe:
1359  * @cont:       if TRUE schedule next execution chunk
1360  *
1361  */
1362 static int ecore_complete_vlan_mac(struct bnx2x_softc *sc,
1363                                    struct ecore_vlan_mac_obj *o,
1364                                    union event_ring_elem *cqe,
1365                                    unsigned long *ramrod_flags)
1366 {
1367         struct ecore_raw_obj *r = &o->raw;
1368         int rc;
1369
1370         /* Reset pending list */
1371         ecore_exe_queue_reset_pending(sc, &o->exe_queue);
1372
1373         /* Clear pending */
1374         r->clear_pending(r);
1375
1376         /* If ramrod failed this is most likely a SW bug */
1377         if (cqe->message.error)
1378                 return ECORE_INVAL;
1379
1380         /* Run the next bulk of pending commands if requested */
1381         if (ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags)) {
1382                 rc = __ecore_vlan_mac_execute_step(sc, o, ramrod_flags);
1383                 if (rc < 0)
1384                         return rc;
1385         }
1386
1387         /* If there is more work to do return PENDING */
1388         if (!ecore_exe_queue_empty(&o->exe_queue))
1389                 return ECORE_PENDING;
1390
1391         return ECORE_SUCCESS;
1392 }
1393
1394 /**
1395  * ecore_optimize_vlan_mac - optimize ADD and DEL commands.
1396  *
1397  * @sc:         device handle
1398  * @o:          ecore_qable_obj
1399  * @elem:       ecore_exeq_elem
1400  */
1401 static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
1402                                    union ecore_qable_obj *qo,
1403                                    struct ecore_exeq_elem *elem)
1404 {
1405         struct ecore_exeq_elem query, *pos;
1406         struct ecore_vlan_mac_obj *o = &qo->vlan_mac;
1407         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1408
1409         ECORE_MEMCPY(&query, elem, sizeof(query));
1410
1411         switch (elem->cmd_data.vlan_mac.cmd) {
1412         case ECORE_VLAN_MAC_ADD:
1413                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
1414                 break;
1415         case ECORE_VLAN_MAC_DEL:
1416                 query.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
1417                 break;
1418         default:
1419                 /* Don't handle anything other than ADD or DEL */
1420                 return 0;
1421         }
1422
1423         /* If we found the appropriate element - delete it */
1424         pos = exeq->get(exeq, &query);
1425         if (pos) {
1426
1427                 /* Return the credit of the optimized command */
1428                 if (!ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
1429                                     &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1430                         if ((query.cmd_data.vlan_mac.cmd ==
1431                              ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
1432                                 PMD_DRV_LOG(ERR, sc,
1433                                             "Failed to return the credit for the optimized ADD command");
1434                                 return ECORE_INVAL;
1435                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1436                                 PMD_DRV_LOG(ERR, sc,
1437                                             "Failed to recover the credit from the optimized DEL command");
1438                                 return ECORE_INVAL;
1439                         }
1440                 }
1441
1442                 ECORE_MSG(sc, "Optimizing %s command",
1443                           (elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
1444                           "ADD" : "DEL");
1445
1446                 ECORE_LIST_REMOVE_ENTRY(&pos->link, &exeq->exe_queue);
1447                 ecore_exe_queue_free_elem(sc, pos);
1448                 return 1;
1449         }
1450
1451         return 0;
1452 }
1453
1454 /**
1455  * ecore_vlan_mac_get_registry_elem - prepare a registry element
1456  *
1457  * @sc:   device handle
1458  * @o:
1459  * @elem:
1460  * @restore:
1461  * @re:
1462  *
1463  * prepare a registry element according to the current command request.
1464  */
1465 static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
1466                                             struct ecore_vlan_mac_obj *o,
1467                                             struct ecore_exeq_elem *elem,
1468                                             int restore, struct
1469                                             ecore_vlan_mac_registry_elem
1470                                             **re)
1471 {
1472         enum ecore_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
1473         struct ecore_vlan_mac_registry_elem *reg_elem;
1474
1475         /* Allocate a new registry element if needed. */
1476         if (!restore &&
1477             ((cmd == ECORE_VLAN_MAC_ADD) || (cmd == ECORE_VLAN_MAC_MOVE))) {
1478                 reg_elem = ECORE_ZALLOC(sizeof(*reg_elem), GFP_ATOMIC, sc);
1479                 if (!reg_elem)
1480                         return ECORE_NOMEM;
1481
1482                 /* Get a new CAM offset */
1483                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1484                         /* This shall never happen, because we have checked the
1485                          * CAM availability in the 'validate'.
1486                          */
1487                         ECORE_DBG_BREAK_IF(1);
1488                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1489                         return ECORE_INVAL;
1490                 }
1491
1492                 ECORE_MSG(sc, "Got cam offset %d", reg_elem->cam_offset);
1493
1494                 /* Set a VLAN-MAC data */
1495                 ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1496                              sizeof(reg_elem->u));
1497
1498                 /* Copy the flags (needed for DEL and RESTORE flows) */
1499                 reg_elem->vlan_mac_flags =
1500                     elem->cmd_data.vlan_mac.vlan_mac_flags;
1501         } else                  /* DEL, RESTORE */
1502                 reg_elem = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
1503
1504         *re = reg_elem;
1505         return ECORE_SUCCESS;
1506 }
1507
1508 /**
1509  * ecore_execute_vlan_mac - execute vlan mac command
1510  *
1511  * @sc:                 device handle
1512  * @qo:
1513  * @exe_chunk:
1514  * @ramrod_flags:
1515  *
1516  * go and send a ramrod!
1517  */
1518 static int ecore_execute_vlan_mac(struct bnx2x_softc *sc,
1519                                   union ecore_qable_obj *qo,
1520                                   ecore_list_t * exe_chunk,
1521                                   unsigned long *ramrod_flags)
1522 {
1523         struct ecore_exeq_elem *elem;
1524         struct ecore_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1525         struct ecore_raw_obj *r = &o->raw;
1526         int rc, idx = 0;
1527         int restore = ECORE_TEST_BIT(RAMROD_RESTORE, ramrod_flags);
1528         int drv_only = ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1529         struct ecore_vlan_mac_registry_elem *reg_elem;
1530         enum ecore_vlan_mac_cmd cmd;
1531
1532         /* If DRIVER_ONLY execution is requested, cleanup a registry
1533          * and exit. Otherwise send a ramrod to FW.
1534          */
1535         if (!drv_only) {
1536
1537                 /* Set pending */
1538                 r->set_pending(r);
1539
1540                 /* Fill the ramrod data */
1541                 ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link,
1542                                           struct ecore_exeq_elem) {
1543                         cmd = elem->cmd_data.vlan_mac.cmd;
1544                         /* We will add to the target object in MOVE command, so
1545                          * change the object for a CAM search.
1546                          */
1547                         if (cmd == ECORE_VLAN_MAC_MOVE)
1548                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1549                         else
1550                                 cam_obj = o;
1551
1552                         rc = ecore_vlan_mac_get_registry_elem(sc, cam_obj,
1553                                                               elem, restore,
1554                                                               &reg_elem);
1555                         if (rc)
1556                                 goto error_exit;
1557
1558                         ECORE_DBG_BREAK_IF(!reg_elem);
1559
1560                         /* Push a new entry into the registry */
1561                         if (!restore &&
1562                             ((cmd == ECORE_VLAN_MAC_ADD) ||
1563                              (cmd == ECORE_VLAN_MAC_MOVE)))
1564                                 ECORE_LIST_PUSH_HEAD(&reg_elem->link,
1565                                                      &cam_obj->head);
1566
1567                         /* Configure a single command in a ramrod data buffer */
1568                         o->set_one_rule(sc, o, elem, idx, reg_elem->cam_offset);
1569
1570                         /* MOVE command consumes 2 entries in the ramrod data */
1571                         if (cmd == ECORE_VLAN_MAC_MOVE)
1572                                 idx += 2;
1573                         else
1574                                 idx++;
1575                 }
1576
1577                 /*
1578                  *  No need for an explicit memory barrier here as long we would
1579                  *  need to ensure the ordering of writing to the SPQ element
1580                  *  and updating of the SPQ producer which involves a memory
1581                  *  read and we will have to put a full memory barrier there
1582                  *  (inside ecore_sp_post()).
1583                  */
1584
1585                 rc = ecore_sp_post(sc, o->ramrod_cmd, r->cid,
1586                                    r->rdata_mapping, ETH_CONNECTION_TYPE);
1587                 if (rc)
1588                         goto error_exit;
1589         }
1590
1591         /* Now, when we are done with the ramrod - clean up the registry */
1592         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1593                 cmd = elem->cmd_data.vlan_mac.cmd;
1594                 if ((cmd == ECORE_VLAN_MAC_DEL) || (cmd == ECORE_VLAN_MAC_MOVE)) {
1595                         reg_elem = o->check_del(sc, o,
1596                                                 &elem->cmd_data.vlan_mac.u);
1597
1598                         ECORE_DBG_BREAK_IF(!reg_elem);
1599
1600                         o->put_cam_offset(o, reg_elem->cam_offset);
1601                         ECORE_LIST_REMOVE_ENTRY(&reg_elem->link, &o->head);
1602                         ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1603                 }
1604         }
1605
1606         if (!drv_only)
1607                 return ECORE_PENDING;
1608         else
1609                 return ECORE_SUCCESS;
1610
1611 error_exit:
1612         r->clear_pending(r);
1613
1614         /* Cleanup a registry in case of a failure */
1615         ECORE_LIST_FOR_EACH_ENTRY(elem, exe_chunk, link, struct ecore_exeq_elem) {
1616                 cmd = elem->cmd_data.vlan_mac.cmd;
1617
1618                 if (cmd == ECORE_VLAN_MAC_MOVE)
1619                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1620                 else
1621                         cam_obj = o;
1622
1623                 /* Delete all newly added above entries */
1624                 if (!restore &&
1625                     ((cmd == ECORE_VLAN_MAC_ADD) ||
1626                      (cmd == ECORE_VLAN_MAC_MOVE))) {
1627                         reg_elem = o->check_del(sc, cam_obj,
1628                                                 &elem->cmd_data.vlan_mac.u);
1629                         if (reg_elem) {
1630                                 ECORE_LIST_REMOVE_ENTRY(&reg_elem->link,
1631                                                         &cam_obj->head);
1632                                 ECORE_FREE(sc, reg_elem, sizeof(*reg_elem));
1633                         }
1634                 }
1635         }
1636
1637         return rc;
1638 }
1639
1640 static int ecore_vlan_mac_push_new_cmd(struct bnx2x_softc *sc, struct
1641                                        ecore_vlan_mac_ramrod_params *p)
1642 {
1643         struct ecore_exeq_elem *elem;
1644         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1645         int restore = ECORE_TEST_BIT(RAMROD_RESTORE, &p->ramrod_flags);
1646
1647         /* Allocate the execution queue element */
1648         elem = ecore_exe_queue_alloc_elem(sc);
1649         if (!elem)
1650                 return ECORE_NOMEM;
1651
1652         /* Set the command 'length' */
1653         switch (p->user_req.cmd) {
1654         case ECORE_VLAN_MAC_MOVE:
1655                 elem->cmd_len = 2;
1656                 break;
1657         default:
1658                 elem->cmd_len = 1;
1659         }
1660
1661         /* Fill the object specific info */
1662         ECORE_MEMCPY(&elem->cmd_data.vlan_mac, &p->user_req,
1663                      sizeof(p->user_req));
1664
1665         /* Try to add a new command to the pending list */
1666         return ecore_exe_queue_add(sc, &o->exe_queue, elem, restore);
1667 }
1668
1669 /**
1670  * ecore_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1671  *
1672  * @sc:   device handle
1673  * @p:
1674  *
1675  */
1676 int ecore_config_vlan_mac(struct bnx2x_softc *sc,
1677                           struct ecore_vlan_mac_ramrod_params *p)
1678 {
1679         int rc = ECORE_SUCCESS;
1680         struct ecore_vlan_mac_obj *o = p->vlan_mac_obj;
1681         unsigned long *ramrod_flags = &p->ramrod_flags;
1682         int cont = ECORE_TEST_BIT(RAMROD_CONT, ramrod_flags);
1683         struct ecore_raw_obj *raw = &o->raw;
1684
1685         /*
1686          * Add new elements to the execution list for commands that require it.
1687          */
1688         if (!cont) {
1689                 rc = ecore_vlan_mac_push_new_cmd(sc, p);
1690                 if (rc)
1691                         return rc;
1692         }
1693
1694         /* If nothing will be executed further in this iteration we want to
1695          * return PENDING if there are pending commands
1696          */
1697         if (!ecore_exe_queue_empty(&o->exe_queue))
1698                 rc = ECORE_PENDING;
1699
1700         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
1701                 ECORE_MSG(sc,
1702                           "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
1703                 raw->clear_pending(raw);
1704         }
1705
1706         /* Execute commands if required */
1707         if (cont || ECORE_TEST_BIT(RAMROD_EXEC, ramrod_flags) ||
1708             ECORE_TEST_BIT(RAMROD_COMP_WAIT, ramrod_flags)) {
1709                 rc = __ecore_vlan_mac_execute_step(sc, p->vlan_mac_obj,
1710                                                    &p->ramrod_flags);
1711                 if (rc < 0)
1712                         return rc;
1713         }
1714
1715         /* RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1716          * then user want to wait until the last command is done.
1717          */
1718         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1719                 /* Wait maximum for the current exe_queue length iterations plus
1720                  * one (for the current pending command).
1721                  */
1722                 int max_iterations = ecore_exe_queue_length(&o->exe_queue) + 1;
1723
1724                 while (!ecore_exe_queue_empty(&o->exe_queue) &&
1725                        max_iterations--) {
1726
1727                         /* Wait for the current command to complete */
1728                         rc = raw->wait_comp(sc, raw);
1729                         if (rc)
1730                                 return rc;
1731
1732                         /* Make a next step */
1733                         rc = __ecore_vlan_mac_execute_step(sc,
1734                                                            p->vlan_mac_obj,
1735                                                            &p->ramrod_flags);
1736                         if (rc < 0)
1737                                 return rc;
1738                 }
1739
1740                 return ECORE_SUCCESS;
1741         }
1742
1743         return rc;
1744 }
1745
1746 /**
1747  * ecore_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1748  *
1749  * @sc:                 device handle
1750  * @o:
1751  * @vlan_mac_flags:
1752  * @ramrod_flags:       execution flags to be used for this deletion
1753  *
1754  * if the last operation has completed successfully and there are no
1755  * more elements left, positive value if the last operation has completed
1756  * successfully and there are more previously configured elements, negative
1757  * value is current operation has failed.
1758  */
1759 static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
1760                                   struct ecore_vlan_mac_obj *o,
1761                                   unsigned long *vlan_mac_flags,
1762                                   unsigned long *ramrod_flags)
1763 {
1764         struct ecore_vlan_mac_registry_elem *pos = NULL;
1765         int rc = 0, read_lock;
1766         struct ecore_vlan_mac_ramrod_params p;
1767         struct ecore_exe_queue_obj *exeq = &o->exe_queue;
1768         struct ecore_exeq_elem *exeq_pos, *exeq_pos_n;
1769
1770         /* Clear pending commands first */
1771
1772         ECORE_SPIN_LOCK_BH(&exeq->lock);
1773
1774         ECORE_LIST_FOR_EACH_ENTRY_SAFE(exeq_pos, exeq_pos_n,
1775                                        &exeq->exe_queue, link,
1776                                        struct ecore_exeq_elem) {
1777                 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1778                     *vlan_mac_flags) {
1779                         rc = exeq->remove(sc, exeq->owner, exeq_pos);
1780                         if (rc) {
1781                                 PMD_DRV_LOG(ERR, sc, "Failed to remove command");
1782                                 ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1783                                 return rc;
1784                         }
1785                         ECORE_LIST_REMOVE_ENTRY(&exeq_pos->link,
1786                                                 &exeq->exe_queue);
1787                         ecore_exe_queue_free_elem(sc, exeq_pos);
1788                 }
1789         }
1790
1791         ECORE_SPIN_UNLOCK_BH(&exeq->lock);
1792
1793         /* Prepare a command request */
1794         ECORE_MEMSET(&p, 0, sizeof(p));
1795         p.vlan_mac_obj = o;
1796         p.ramrod_flags = *ramrod_flags;
1797         p.user_req.cmd = ECORE_VLAN_MAC_DEL;
1798
1799         /* Add all but the last VLAN-MAC to the execution queue without actually
1800          * execution anything.
1801          */
1802         ECORE_CLEAR_BIT_NA(RAMROD_COMP_WAIT, &p.ramrod_flags);
1803         ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
1804         ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1805
1806         ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)");
1807         read_lock = ecore_vlan_mac_h_read_lock(sc, o);
1808         if (read_lock != ECORE_SUCCESS)
1809                 return read_lock;
1810
1811         ECORE_LIST_FOR_EACH_ENTRY(pos, &o->head, link,
1812                                   struct ecore_vlan_mac_registry_elem) {
1813                 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1814                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1815                         ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
1816                         rc = ecore_config_vlan_mac(sc, &p);
1817                         if (rc < 0) {
1818                                 PMD_DRV_LOG(ERR, sc,
1819                                             "Failed to add a new DEL command");
1820                                 ecore_vlan_mac_h_read_unlock(sc, o);
1821                                 return rc;
1822                         }
1823                 }
1824         }
1825
1826         ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
1827         ecore_vlan_mac_h_read_unlock(sc, o);
1828
1829         p.ramrod_flags = *ramrod_flags;
1830         ECORE_SET_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
1831
1832         return ecore_config_vlan_mac(sc, &p);
1833 }
1834
1835 static void ecore_init_raw_obj(struct ecore_raw_obj *raw, uint8_t cl_id,
1836                                uint32_t cid, uint8_t func_id,
1837                                void *rdata,
1838                                ecore_dma_addr_t rdata_mapping, int state,
1839                                unsigned long *pstate, ecore_obj_type type)
1840 {
1841         raw->func_id = func_id;
1842         raw->cid = cid;
1843         raw->cl_id = cl_id;
1844         raw->rdata = rdata;
1845         raw->rdata_mapping = rdata_mapping;
1846         raw->state = state;
1847         raw->pstate = pstate;
1848         raw->obj_type = type;
1849         raw->check_pending = ecore_raw_check_pending;
1850         raw->clear_pending = ecore_raw_clear_pending;
1851         raw->set_pending = ecore_raw_set_pending;
1852         raw->wait_comp = ecore_raw_wait;
1853 }
1854
1855 static void ecore_init_vlan_mac_common(struct ecore_vlan_mac_obj *o,
1856                                        uint8_t cl_id, uint32_t cid,
1857                                        uint8_t func_id, void *rdata,
1858                                        ecore_dma_addr_t rdata_mapping,
1859                                        int state, unsigned long *pstate,
1860                                        ecore_obj_type type,
1861                                        struct ecore_credit_pool_obj
1862                                        *macs_pool, struct ecore_credit_pool_obj
1863                                        *vlans_pool)
1864 {
1865         ECORE_LIST_INIT(&o->head);
1866         o->head_reader = 0;
1867         o->head_exe_request = FALSE;
1868         o->saved_ramrod_flags = 0;
1869
1870         o->macs_pool = macs_pool;
1871         o->vlans_pool = vlans_pool;
1872
1873         o->delete_all = ecore_vlan_mac_del_all;
1874         o->restore = ecore_vlan_mac_restore;
1875         o->complete = ecore_complete_vlan_mac;
1876         o->wait = ecore_wait_vlan_mac;
1877
1878         ecore_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1879                            state, pstate, type);
1880 }
1881
1882 void ecore_init_mac_obj(struct bnx2x_softc *sc,
1883                         struct ecore_vlan_mac_obj *mac_obj,
1884                         uint8_t cl_id, uint32_t cid, uint8_t func_id,
1885                         void *rdata, ecore_dma_addr_t rdata_mapping, int state,
1886                         unsigned long *pstate, ecore_obj_type type,
1887                         struct ecore_credit_pool_obj *macs_pool)
1888 {
1889         union ecore_qable_obj *qable_obj = (union ecore_qable_obj *)mac_obj;
1890
1891         ecore_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1892                                    rdata_mapping, state, pstate, type,
1893                                    macs_pool, NULL);
1894
1895         /* CAM credit pool handling */
1896         mac_obj->get_credit = ecore_get_credit_mac;
1897         mac_obj->put_credit = ecore_put_credit_mac;
1898         mac_obj->get_cam_offset = ecore_get_cam_offset_mac;
1899         mac_obj->put_cam_offset = ecore_put_cam_offset_mac;
1900
1901         if (CHIP_IS_E1x(sc)) {
1902                 mac_obj->set_one_rule = ecore_set_one_mac_e1x;
1903                 mac_obj->check_del = ecore_check_mac_del;
1904                 mac_obj->check_add = ecore_check_mac_add;
1905                 mac_obj->check_move = ecore_check_move_always_err;
1906                 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1907
1908                 /* Exe Queue */
1909                 ecore_exe_queue_init(sc,
1910                                      &mac_obj->exe_queue, 1, qable_obj,
1911                                      ecore_validate_vlan_mac,
1912                                      ecore_remove_vlan_mac,
1913                                      ecore_optimize_vlan_mac,
1914                                      ecore_execute_vlan_mac,
1915                                      ecore_exeq_get_mac);
1916         } else {
1917                 mac_obj->set_one_rule = ecore_set_one_mac_e2;
1918                 mac_obj->check_del = ecore_check_mac_del;
1919                 mac_obj->check_add = ecore_check_mac_add;
1920                 mac_obj->check_move = ecore_check_move;
1921                 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1922                 mac_obj->get_n_elements = ecore_get_n_elements;
1923
1924                 /* Exe Queue */
1925                 ecore_exe_queue_init(sc,
1926                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1927                                      qable_obj, ecore_validate_vlan_mac,
1928                                      ecore_remove_vlan_mac,
1929                                      ecore_optimize_vlan_mac,
1930                                      ecore_execute_vlan_mac,
1931                                      ecore_exeq_get_mac);
1932         }
1933 }
1934
1935 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1936 static void __storm_memset_mac_filters(struct bnx2x_softc *sc, struct
1937                                        tstorm_eth_mac_filter_config
1938                                        *mac_filters, uint16_t pf_id)
1939 {
1940         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1941
1942         uint32_t addr = BAR_TSTRORM_INTMEM +
1943             TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
1944
1945         ecore_storm_memset_struct(sc, addr, size, (uint32_t *) mac_filters);
1946 }
1947
1948 static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
1949                                  struct ecore_rx_mode_ramrod_params *p)
1950 {
1951         /* update the sc MAC filter structure */
1952         uint32_t mask = (1 << p->cl_id);
1953
1954         struct tstorm_eth_mac_filter_config *mac_filters =
1955             (struct tstorm_eth_mac_filter_config *)p->rdata;
1956
1957         /* initial setting is drop-all */
1958         uint8_t drop_all_ucast = 1, drop_all_mcast = 1;
1959         uint8_t accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
1960         uint8_t unmatched_unicast = 0;
1961
1962         /* In e1x there we only take into account rx accept flag since tx switching
1963          * isn't enabled. */
1964         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, &p->rx_accept_flags))
1965                 /* accept matched ucast */
1966                 drop_all_ucast = 0;
1967
1968         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, &p->rx_accept_flags))
1969                 /* accept matched mcast */
1970                 drop_all_mcast = 0;
1971
1972         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
1973                 /* accept all mcast */
1974                 drop_all_ucast = 0;
1975                 accp_all_ucast = 1;
1976         }
1977         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
1978                 /* accept all mcast */
1979                 drop_all_mcast = 0;
1980                 accp_all_mcast = 1;
1981         }
1982         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, &p->rx_accept_flags))
1983                 /* accept (all) bcast */
1984                 accp_all_bcast = 1;
1985         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, &p->rx_accept_flags))
1986                 /* accept unmatched unicasts */
1987                 unmatched_unicast = 1;
1988
1989         mac_filters->ucast_drop_all = drop_all_ucast ?
1990             mac_filters->ucast_drop_all | mask :
1991             mac_filters->ucast_drop_all & ~mask;
1992
1993         mac_filters->mcast_drop_all = drop_all_mcast ?
1994             mac_filters->mcast_drop_all | mask :
1995             mac_filters->mcast_drop_all & ~mask;
1996
1997         mac_filters->ucast_accept_all = accp_all_ucast ?
1998             mac_filters->ucast_accept_all | mask :
1999             mac_filters->ucast_accept_all & ~mask;
2000
2001         mac_filters->mcast_accept_all = accp_all_mcast ?
2002             mac_filters->mcast_accept_all | mask :
2003             mac_filters->mcast_accept_all & ~mask;
2004
2005         mac_filters->bcast_accept_all = accp_all_bcast ?
2006             mac_filters->bcast_accept_all | mask :
2007             mac_filters->bcast_accept_all & ~mask;
2008
2009         mac_filters->unmatched_unicast = unmatched_unicast ?
2010             mac_filters->unmatched_unicast | mask :
2011             mac_filters->unmatched_unicast & ~mask;
2012
2013         ECORE_MSG(sc, "drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
2014                   "accp_mcast 0x%xaccp_bcast 0x%x",
2015                   mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
2016                   mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
2017                   mac_filters->bcast_accept_all);
2018
2019         /* write the MAC filter structure */
2020         __storm_memset_mac_filters(sc, mac_filters, p->func_id);
2021
2022         /* The operation is completed */
2023         ECORE_CLEAR_BIT(p->state, p->pstate);
2024         ECORE_SMP_MB_AFTER_CLEAR_BIT();
2025
2026         return ECORE_SUCCESS;
2027 }
2028
2029 /* Setup ramrod data */
2030 static void ecore_rx_mode_set_rdata_hdr_e2(uint32_t cid, struct eth_classify_header
2031                                            *hdr, uint8_t rule_cnt)
2032 {
2033         hdr->echo = ECORE_CPU_TO_LE32(cid);
2034         hdr->rule_cnt = rule_cnt;
2035 }
2036
2037 static void ecore_rx_mode_set_cmd_state_e2(unsigned long *accept_flags, struct eth_filter_rules_cmd
2038                                            *cmd, int clear_accept_all)
2039 {
2040         uint16_t state;
2041
2042         /* start with 'drop-all' */
2043         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2044             ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2045
2046         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNICAST, accept_flags))
2047                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2048
2049         if (ECORE_TEST_BIT(ECORE_ACCEPT_MULTICAST, accept_flags))
2050                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2051
2052         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_UNICAST, accept_flags)) {
2053                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2054                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2055         }
2056
2057         if (ECORE_TEST_BIT(ECORE_ACCEPT_ALL_MULTICAST, accept_flags)) {
2058                 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2059                 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2060         }
2061         if (ECORE_TEST_BIT(ECORE_ACCEPT_BROADCAST, accept_flags))
2062                 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2063
2064         if (ECORE_TEST_BIT(ECORE_ACCEPT_UNMATCHED, accept_flags)) {
2065                 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2066                 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2067         }
2068         if (ECORE_TEST_BIT(ECORE_ACCEPT_ANY_VLAN, accept_flags))
2069                 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2070
2071         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2072         if (clear_accept_all) {
2073                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2074                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2075                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2076                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2077         }
2078
2079         cmd->state = ECORE_CPU_TO_LE16(state);
2080 }
2081
2082 static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
2083                                 struct ecore_rx_mode_ramrod_params *p)
2084 {
2085         struct eth_filter_rules_ramrod_data *data = p->rdata;
2086         int rc;
2087         uint8_t rule_idx = 0;
2088
2089         /* Reset the ramrod data buffer */
2090         ECORE_MEMSET(data, 0, sizeof(*data));
2091
2092         /* Setup ramrod data */
2093
2094         /* Tx (internal switching) */
2095         if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2096                 data->rules[rule_idx].client_id = p->cl_id;
2097                 data->rules[rule_idx].func_id = p->func_id;
2098
2099                 data->rules[rule_idx].cmd_general_data =
2100                     ETH_FILTER_RULES_CMD_TX_CMD;
2101
2102                 ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2103                                                &(data->rules[rule_idx++]),
2104                                                FALSE);
2105         }
2106
2107         /* Rx */
2108         if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2109                 data->rules[rule_idx].client_id = p->cl_id;
2110                 data->rules[rule_idx].func_id = p->func_id;
2111
2112                 data->rules[rule_idx].cmd_general_data =
2113                     ETH_FILTER_RULES_CMD_RX_CMD;
2114
2115                 ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2116                                                &(data->rules[rule_idx++]),
2117                                                FALSE);
2118         }
2119
2120         /* If FCoE Queue configuration has been requested configure the Rx and
2121          * internal switching modes for this queue in separate rules.
2122          *
2123          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2124          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2125          */
2126         if (ECORE_TEST_BIT(ECORE_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2127                 /*  Tx (internal switching) */
2128                 if (ECORE_TEST_BIT(RAMROD_TX, &p->ramrod_flags)) {
2129                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2130                         data->rules[rule_idx].func_id = p->func_id;
2131
2132                         data->rules[rule_idx].cmd_general_data =
2133                             ETH_FILTER_RULES_CMD_TX_CMD;
2134
2135                         ecore_rx_mode_set_cmd_state_e2(&p->tx_accept_flags,
2136                                                        &(data->rules
2137                                                          [rule_idx++]), TRUE);
2138                 }
2139
2140                 /* Rx */
2141                 if (ECORE_TEST_BIT(RAMROD_RX, &p->ramrod_flags)) {
2142                         data->rules[rule_idx].client_id = ECORE_FCOE_CID(sc);
2143                         data->rules[rule_idx].func_id = p->func_id;
2144
2145                         data->rules[rule_idx].cmd_general_data =
2146                             ETH_FILTER_RULES_CMD_RX_CMD;
2147
2148                         ecore_rx_mode_set_cmd_state_e2(&p->rx_accept_flags,
2149                                                        &(data->rules
2150                                                          [rule_idx++]), TRUE);
2151                 }
2152         }
2153
2154         /* Set the ramrod header (most importantly - number of rules to
2155          * configure).
2156          */
2157         ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2158
2159             ECORE_MSG
2160             (sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
2161              data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
2162
2163         /* No need for an explicit memory barrier here as long we would
2164          * need to ensure the ordering of writing to the SPQ element
2165          * and updating of the SPQ producer which involves a memory
2166          * read and we will have to put a full memory barrier there
2167          * (inside ecore_sp_post()).
2168          */
2169
2170         /* Send a ramrod */
2171         rc = ecore_sp_post(sc,
2172                            RAMROD_CMD_ID_ETH_FILTER_RULES,
2173                            p->cid, p->rdata_mapping, ETH_CONNECTION_TYPE);
2174         if (rc)
2175                 return rc;
2176
2177         /* Ramrod completion is pending */
2178         return ECORE_PENDING;
2179 }
2180
2181 static int ecore_wait_rx_mode_comp_e2(struct bnx2x_softc *sc,
2182                                       struct ecore_rx_mode_ramrod_params *p)
2183 {
2184         return ecore_state_wait(sc, p->state, p->pstate);
2185 }
2186
2187 static int ecore_empty_rx_mode_wait(__rte_unused struct bnx2x_softc *sc,
2188                                     __rte_unused struct
2189                                     ecore_rx_mode_ramrod_params *p)
2190 {
2191         /* Do nothing */
2192         return ECORE_SUCCESS;
2193 }
2194
2195 int ecore_config_rx_mode(struct bnx2x_softc *sc,
2196                          struct ecore_rx_mode_ramrod_params *p)
2197 {
2198         int rc;
2199
2200         /* Configure the new classification in the chip */
2201         if (p->rx_mode_obj->config_rx_mode) {
2202                 rc = p->rx_mode_obj->config_rx_mode(sc, p);
2203                 if (rc < 0)
2204                         return rc;
2205
2206                 /* Wait for a ramrod completion if was requested */
2207                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2208                         rc = p->rx_mode_obj->wait_comp(sc, p);
2209                         if (rc)
2210                                 return rc;
2211                 }
2212         } else {
2213                 ECORE_MSG(sc, "ERROR: config_rx_mode is NULL");
2214                 return -1;
2215         }
2216
2217         return rc;
2218 }
2219
2220 void ecore_init_rx_mode_obj(struct bnx2x_softc *sc, struct ecore_rx_mode_obj *o)
2221 {
2222         if (CHIP_IS_E1x(sc)) {
2223                 o->wait_comp = ecore_empty_rx_mode_wait;
2224                 o->config_rx_mode = ecore_set_rx_mode_e1x;
2225         } else {
2226                 o->wait_comp = ecore_wait_rx_mode_comp_e2;
2227                 o->config_rx_mode = ecore_set_rx_mode_e2;
2228         }
2229 }
2230
2231 /********************* Multicast verbs: SET, CLEAR ****************************/
2232 static uint8_t ecore_mcast_bin_from_mac(uint8_t * mac)
2233 {
2234         return (ECORE_CRC32_LE(0, mac, ETH_ALEN) >> 24) & 0xff;
2235 }
2236
2237 struct ecore_mcast_mac_elem {
2238         ecore_list_entry_t link;
2239         uint8_t mac[ETH_ALEN];
2240         uint8_t pad[2];         /* For a natural alignment of the following buffer */
2241 };
2242
2243 struct ecore_pending_mcast_cmd {
2244         ecore_list_entry_t link;
2245         int type;               /* ECORE_MCAST_CMD_X */
2246         union {
2247                 ecore_list_t macs_head;
2248                 uint32_t macs_num;      /* Needed for DEL command */
2249                 int next_bin;   /* Needed for RESTORE flow with aprox match */
2250         } data;
2251
2252         int done;               /* set to TRUE, when the command has been handled,
2253                                  * practically used in 57712 handling only, where one pending
2254                                  * command may be handled in a few operations. As long as for
2255                                  * other chips every operation handling is completed in a
2256                                  * single ramrod, there is no need to utilize this field.
2257                                  */
2258 };
2259
2260 static int ecore_mcast_wait(struct bnx2x_softc *sc, struct ecore_mcast_obj *o)
2261 {
2262         if (ecore_state_wait(sc, o->sched_state, o->raw.pstate) ||
2263             o->raw.wait_comp(sc, &o->raw))
2264                 return ECORE_TIMEOUT;
2265
2266         return ECORE_SUCCESS;
2267 }
2268
2269 static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
2270                                    struct ecore_mcast_obj *o,
2271                                    struct ecore_mcast_ramrod_params *p,
2272                                    enum ecore_mcast_cmd cmd)
2273 {
2274         int total_sz;
2275         struct ecore_pending_mcast_cmd *new_cmd;
2276         struct ecore_mcast_mac_elem *cur_mac = NULL;
2277         struct ecore_mcast_list_elem *pos;
2278         int macs_list_len = ((cmd == ECORE_MCAST_CMD_ADD) ?
2279                              p->mcast_list_len : 0);
2280
2281         /* If the command is empty ("handle pending commands only"), break */
2282         if (!p->mcast_list_len)
2283                 return ECORE_SUCCESS;
2284
2285         total_sz = sizeof(*new_cmd) +
2286             macs_list_len * sizeof(struct ecore_mcast_mac_elem);
2287
2288         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2289         new_cmd = ECORE_ZALLOC(total_sz, GFP_ATOMIC, sc);
2290
2291         if (!new_cmd)
2292                 return ECORE_NOMEM;
2293
2294         ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d",
2295                   cmd, macs_list_len);
2296
2297         ECORE_LIST_INIT(&new_cmd->data.macs_head);
2298
2299         new_cmd->type = cmd;
2300         new_cmd->done = FALSE;
2301
2302         switch (cmd) {
2303         case ECORE_MCAST_CMD_ADD:
2304                 cur_mac = (struct ecore_mcast_mac_elem *)
2305                     ((uint8_t *) new_cmd + sizeof(*new_cmd));
2306
2307                 /* Push the MACs of the current command into the pending command
2308                  * MACs list: FIFO
2309                  */
2310                 ECORE_LIST_FOR_EACH_ENTRY(pos, &p->mcast_list, link,
2311                                           struct ecore_mcast_list_elem) {
2312                         ECORE_MEMCPY(cur_mac->mac, pos->mac, ETH_ALEN);
2313                         ECORE_LIST_PUSH_TAIL(&cur_mac->link,
2314                                              &new_cmd->data.macs_head);
2315                         cur_mac++;
2316                 }
2317
2318                 break;
2319
2320         case ECORE_MCAST_CMD_DEL:
2321                 new_cmd->data.macs_num = p->mcast_list_len;
2322                 break;
2323
2324         case ECORE_MCAST_CMD_RESTORE:
2325                 new_cmd->data.next_bin = 0;
2326                 break;
2327
2328         default:
2329                 ECORE_FREE(sc, new_cmd, total_sz);
2330                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2331                 return ECORE_INVAL;
2332         }
2333
2334         /* Push the new pending command to the tail of the pending list: FIFO */
2335         ECORE_LIST_PUSH_TAIL(&new_cmd->link, &o->pending_cmds_head);
2336
2337         o->set_sched(o);
2338
2339         return ECORE_PENDING;
2340 }
2341
2342 /**
2343  * ecore_mcast_get_next_bin - get the next set bin (index)
2344  *
2345  * @o:
2346  * @last:       index to start looking from (including)
2347  *
2348  * returns the next found (set) bin or a negative value if none is found.
2349  */
2350 static int ecore_mcast_get_next_bin(struct ecore_mcast_obj *o, int last)
2351 {
2352         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2353
2354         for (i = last / BIT_VEC64_ELEM_SZ; i < ECORE_MCAST_VEC_SZ; i++) {
2355                 if (o->registry.aprox_match.vec[i])
2356                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2357                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2358                                 if (BIT_VEC64_TEST_BIT
2359                                     (o->registry.aprox_match.vec, cur_bit)) {
2360                                         return cur_bit;
2361                                 }
2362                         }
2363                 inner_start = 0;
2364         }
2365
2366         /* None found */
2367         return -1;
2368 }
2369
2370 /**
2371  * ecore_mcast_clear_first_bin - find the first set bin and clear it
2372  *
2373  * @o:
2374  *
2375  * returns the index of the found bin or -1 if none is found
2376  */
2377 static int ecore_mcast_clear_first_bin(struct ecore_mcast_obj *o)
2378 {
2379         int cur_bit = ecore_mcast_get_next_bin(o, 0);
2380
2381         if (cur_bit >= 0)
2382                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2383
2384         return cur_bit;
2385 }
2386
2387 static uint8_t ecore_mcast_get_rx_tx_flag(struct ecore_mcast_obj *o)
2388 {
2389         struct ecore_raw_obj *raw = &o->raw;
2390         uint8_t rx_tx_flag = 0;
2391
2392         if ((raw->obj_type == ECORE_OBJ_TYPE_TX) ||
2393             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2394                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2395
2396         if ((raw->obj_type == ECORE_OBJ_TYPE_RX) ||
2397             (raw->obj_type == ECORE_OBJ_TYPE_RX_TX))
2398                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2399
2400         return rx_tx_flag;
2401 }
2402
2403 static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
2404                                         struct ecore_mcast_obj *o, int idx,
2405                                         union ecore_mcast_config_data *cfg_data,
2406                                         enum ecore_mcast_cmd cmd)
2407 {
2408         struct ecore_raw_obj *r = &o->raw;
2409         struct eth_multicast_rules_ramrod_data *data =
2410             (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2411         uint8_t func_id = r->func_id;
2412         uint8_t rx_tx_add_flag = ecore_mcast_get_rx_tx_flag(o);
2413         int bin;
2414
2415         if ((cmd == ECORE_MCAST_CMD_ADD) || (cmd == ECORE_MCAST_CMD_RESTORE))
2416                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2417
2418         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2419
2420         /* Get a bin and update a bins' vector */
2421         switch (cmd) {
2422         case ECORE_MCAST_CMD_ADD:
2423                 bin = ecore_mcast_bin_from_mac(cfg_data->mac);
2424                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2425                 break;
2426
2427         case ECORE_MCAST_CMD_DEL:
2428                 /* If there were no more bins to clear
2429                  * (ecore_mcast_clear_first_bin() returns -1) then we would
2430                  * clear any (0xff) bin.
2431                  * See ecore_mcast_validate_e2() for explanation when it may
2432                  * happen.
2433                  */
2434                 bin = ecore_mcast_clear_first_bin(o);
2435                 break;
2436
2437         case ECORE_MCAST_CMD_RESTORE:
2438                 bin = cfg_data->bin;
2439                 break;
2440
2441         default:
2442                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2443                 return;
2444         }
2445
2446         ECORE_MSG(sc, "%s bin %d",
2447                   ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2448                    "Setting" : "Clearing"), bin);
2449
2450         data->rules[idx].bin_id = (uint8_t) bin;
2451         data->rules[idx].func_id = func_id;
2452         data->rules[idx].engine_id = o->engine_id;
2453 }
2454
2455 /**
2456  * ecore_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2457  *
2458  * @sc:         device handle
2459  * @o:
2460  * @start_bin:  index in the registry to start from (including)
2461  * @rdata_idx:  index in the ramrod data to start from
2462  *
2463  * returns last handled bin index or -1 if all bins have been handled
2464  */
2465 static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
2466                                              struct ecore_mcast_obj *o,
2467                                              int start_bin, int *rdata_idx)
2468 {
2469         int cur_bin, cnt = *rdata_idx;
2470         union ecore_mcast_config_data cfg_data = { NULL };
2471
2472         /* go through the registry and configure the bins from it */
2473         for (cur_bin = ecore_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2474              cur_bin = ecore_mcast_get_next_bin(o, cur_bin + 1)) {
2475
2476                 cfg_data.bin = (uint8_t) cur_bin;
2477                 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_RESTORE);
2478
2479                 cnt++;
2480
2481                 ECORE_MSG(sc, "About to configure a bin %d", cur_bin);
2482
2483                 /* Break if we reached the maximum number
2484                  * of rules.
2485                  */
2486                 if (cnt >= o->max_cmd_len)
2487                         break;
2488         }
2489
2490         *rdata_idx = cnt;
2491
2492         return cur_bin;
2493 }
2494
2495 static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
2496                                            struct ecore_mcast_obj *o,
2497                                            struct ecore_pending_mcast_cmd
2498                                            *cmd_pos, int *line_idx)
2499 {
2500         struct ecore_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2501         int cnt = *line_idx;
2502         union ecore_mcast_config_data cfg_data = { NULL };
2503
2504         ECORE_LIST_FOR_EACH_ENTRY_SAFE(pmac_pos, pmac_pos_n,
2505                                        &cmd_pos->data.macs_head, link,
2506                                        struct ecore_mcast_mac_elem) {
2507
2508                 cfg_data.mac = &pmac_pos->mac[0];
2509                 o->set_one_rule(sc, o, cnt, &cfg_data, cmd_pos->type);
2510
2511                 cnt++;
2512
2513                     ECORE_MSG
2514                     (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2515                      pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
2516                      pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
2517
2518                 ECORE_LIST_REMOVE_ENTRY(&pmac_pos->link,
2519                                         &cmd_pos->data.macs_head);
2520
2521                 /* Break if we reached the maximum number
2522                  * of rules.
2523                  */
2524                 if (cnt >= o->max_cmd_len)
2525                         break;
2526         }
2527
2528         *line_idx = cnt;
2529
2530         /* if no more MACs to configure - we are done */
2531         if (ECORE_LIST_IS_EMPTY(&cmd_pos->data.macs_head))
2532                 cmd_pos->done = TRUE;
2533 }
2534
2535 static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
2536                                            struct ecore_mcast_obj *o,
2537                                            struct ecore_pending_mcast_cmd
2538                                            *cmd_pos, int *line_idx)
2539 {
2540         int cnt = *line_idx;
2541
2542         while (cmd_pos->data.macs_num) {
2543                 o->set_one_rule(sc, o, cnt, NULL, cmd_pos->type);
2544
2545                 cnt++;
2546
2547                 cmd_pos->data.macs_num--;
2548
2549                 ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d",
2550                           cmd_pos->data.macs_num, cnt);
2551
2552                 /* Break if we reached the maximum
2553                  * number of rules.
2554                  */
2555                 if (cnt >= o->max_cmd_len)
2556                         break;
2557         }
2558
2559         *line_idx = cnt;
2560
2561         /* If we cleared all bins - we are done */
2562         if (!cmd_pos->data.macs_num)
2563                 cmd_pos->done = TRUE;
2564 }
2565
2566 static void ecore_mcast_hdl_pending_restore_e2(struct bnx2x_softc *sc,
2567                                                struct ecore_mcast_obj *o, struct
2568                                                ecore_pending_mcast_cmd
2569                                                *cmd_pos, int *line_idx)
2570 {
2571         cmd_pos->data.next_bin = o->hdl_restore(sc, o, cmd_pos->data.next_bin,
2572                                                 line_idx);
2573
2574         if (cmd_pos->data.next_bin < 0)
2575                 /* If o->set_restore returned -1 we are done */
2576                 cmd_pos->done = TRUE;
2577         else
2578                 /* Start from the next bin next time */
2579                 cmd_pos->data.next_bin++;
2580 }
2581
2582 static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
2583                                               ecore_mcast_ramrod_params
2584                                               *p)
2585 {
2586         struct ecore_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2587         int cnt = 0;
2588         struct ecore_mcast_obj *o = p->mcast_obj;
2589
2590         ECORE_LIST_FOR_EACH_ENTRY_SAFE(cmd_pos, cmd_pos_n,
2591                                        &o->pending_cmds_head, link,
2592                                        struct ecore_pending_mcast_cmd) {
2593                 switch (cmd_pos->type) {
2594                 case ECORE_MCAST_CMD_ADD:
2595                         ecore_mcast_hdl_pending_add_e2(sc, o, cmd_pos, &cnt);
2596                         break;
2597
2598                 case ECORE_MCAST_CMD_DEL:
2599                         ecore_mcast_hdl_pending_del_e2(sc, o, cmd_pos, &cnt);
2600                         break;
2601
2602                 case ECORE_MCAST_CMD_RESTORE:
2603                         ecore_mcast_hdl_pending_restore_e2(sc, o, cmd_pos,
2604                                                            &cnt);
2605                         break;
2606
2607                 default:
2608                         PMD_DRV_LOG(ERR, sc,
2609                                     "Unknown command: %d", cmd_pos->type);
2610                         return ECORE_INVAL;
2611                 }
2612
2613                 /* If the command has been completed - remove it from the list
2614                  * and free the memory
2615                  */
2616                 if (cmd_pos->done) {
2617                         ECORE_LIST_REMOVE_ENTRY(&cmd_pos->link,
2618                                                 &o->pending_cmds_head);
2619                         ECORE_FREE(sc, cmd_pos, cmd_pos->alloc_len);
2620                 }
2621
2622                 /* Break if we reached the maximum number of rules */
2623                 if (cnt >= o->max_cmd_len)
2624                         break;
2625         }
2626
2627         return cnt;
2628 }
2629
2630 static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
2631                                 struct ecore_mcast_obj *o,
2632                                 struct ecore_mcast_ramrod_params *p,
2633                                 int *line_idx)
2634 {
2635         struct ecore_mcast_list_elem *mlist_pos;
2636         union ecore_mcast_config_data cfg_data = { NULL };
2637         int cnt = *line_idx;
2638
2639         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2640                                   struct ecore_mcast_list_elem) {
2641                 cfg_data.mac = mlist_pos->mac;
2642                 o->set_one_rule(sc, o, cnt, &cfg_data, ECORE_MCAST_CMD_ADD);
2643
2644                 cnt++;
2645
2646                     ECORE_MSG
2647                     (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
2648                      mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2649                      mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
2650         }
2651
2652         *line_idx = cnt;
2653 }
2654
2655 static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
2656                                 struct ecore_mcast_obj *o,
2657                                 struct ecore_mcast_ramrod_params *p,
2658                                 int *line_idx)
2659 {
2660         int cnt = *line_idx, i;
2661
2662         for (i = 0; i < p->mcast_list_len; i++) {
2663                 o->set_one_rule(sc, o, cnt, NULL, ECORE_MCAST_CMD_DEL);
2664
2665                 cnt++;
2666
2667                 ECORE_MSG(sc,
2668                           "Deleting MAC. %d left", p->mcast_list_len - i - 1);
2669         }
2670
2671         *line_idx = cnt;
2672 }
2673
2674 /**
2675  * ecore_mcast_handle_current_cmd -
2676  *
2677  * @sc:         device handle
2678  * @p:
2679  * @cmd:
2680  * @start_cnt:  first line in the ramrod data that may be used
2681  *
2682  * This function is called iff there is enough place for the current command in
2683  * the ramrod data.
2684  * Returns number of lines filled in the ramrod data in total.
2685  */
2686 static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
2687                                           ecore_mcast_ramrod_params *p,
2688                                           enum ecore_mcast_cmd cmd,
2689                                           int start_cnt)
2690 {
2691         struct ecore_mcast_obj *o = p->mcast_obj;
2692         int cnt = start_cnt;
2693
2694         ECORE_MSG(sc, "p->mcast_list_len=%d", p->mcast_list_len);
2695
2696         switch (cmd) {
2697         case ECORE_MCAST_CMD_ADD:
2698                 ecore_mcast_hdl_add(sc, o, p, &cnt);
2699                 break;
2700
2701         case ECORE_MCAST_CMD_DEL:
2702                 ecore_mcast_hdl_del(sc, o, p, &cnt);
2703                 break;
2704
2705         case ECORE_MCAST_CMD_RESTORE:
2706                 o->hdl_restore(sc, o, 0, &cnt);
2707                 break;
2708
2709         default:
2710                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2711                 return ECORE_INVAL;
2712         }
2713
2714         /* The current command has been handled */
2715         p->mcast_list_len = 0;
2716
2717         return cnt;
2718 }
2719
2720 static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
2721                                    struct ecore_mcast_ramrod_params *p,
2722                                    enum ecore_mcast_cmd cmd)
2723 {
2724         struct ecore_mcast_obj *o = p->mcast_obj;
2725         int reg_sz = o->get_registry_size(o);
2726
2727         switch (cmd) {
2728                 /* DEL command deletes all currently configured MACs */
2729         case ECORE_MCAST_CMD_DEL:
2730                 o->set_registry_size(o, 0);
2731                 /* Don't break */
2732
2733                 /* RESTORE command will restore the entire multicast configuration */
2734         case ECORE_MCAST_CMD_RESTORE:
2735                 /* Here we set the approximate amount of work to do, which in
2736                  * fact may be only less as some MACs in postponed ADD
2737                  * command(s) scheduled before this command may fall into
2738                  * the same bin and the actual number of bins set in the
2739                  * registry would be less than we estimated here. See
2740                  * ecore_mcast_set_one_rule_e2() for further details.
2741                  */
2742                 p->mcast_list_len = reg_sz;
2743                 break;
2744
2745         case ECORE_MCAST_CMD_ADD:
2746         case ECORE_MCAST_CMD_CONT:
2747                 /* Here we assume that all new MACs will fall into new bins.
2748                  * However we will correct the real registry size after we
2749                  * handle all pending commands.
2750                  */
2751                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2752                 break;
2753
2754         default:
2755                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
2756                 return ECORE_INVAL;
2757         }
2758
2759         /* Increase the total number of MACs pending to be configured */
2760         o->total_pending_num += p->mcast_list_len;
2761
2762         return ECORE_SUCCESS;
2763 }
2764
2765 static void ecore_mcast_revert_e2(__rte_unused struct bnx2x_softc *sc,
2766                                   struct ecore_mcast_ramrod_params *p,
2767                                   int old_num_bins)
2768 {
2769         struct ecore_mcast_obj *o = p->mcast_obj;
2770
2771         o->set_registry_size(o, old_num_bins);
2772         o->total_pending_num -= p->mcast_list_len;
2773 }
2774
2775 /**
2776  * ecore_mcast_set_rdata_hdr_e2 - sets a header values
2777  *
2778  * @sc:         device handle
2779  * @p:
2780  * @len:        number of rules to handle
2781  */
2782 static void ecore_mcast_set_rdata_hdr_e2(__rte_unused struct bnx2x_softc
2783                                          *sc, struct ecore_mcast_ramrod_params
2784                                          *p, uint8_t len)
2785 {
2786         struct ecore_raw_obj *r = &p->mcast_obj->raw;
2787         struct eth_multicast_rules_ramrod_data *data =
2788             (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2789
2790         data->header.echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
2791                                               (ECORE_FILTER_MCAST_PENDING <<
2792                                                ECORE_SWCID_SHIFT));
2793         data->header.rule_cnt = len;
2794 }
2795
2796 /**
2797  * ecore_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2798  *
2799  * @sc:         device handle
2800  * @o:
2801  *
2802  * Recalculate the actual number of set bins in the registry using Brian
2803  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2804  */
2805 static int ecore_mcast_refresh_registry_e2(struct ecore_mcast_obj *o)
2806 {
2807         int i, cnt = 0;
2808         uint64_t elem;
2809
2810         for (i = 0; i < ECORE_MCAST_VEC_SZ; i++) {
2811                 elem = o->registry.aprox_match.vec[i];
2812                 for (; elem; cnt++)
2813                         elem &= elem - 1;
2814         }
2815
2816         o->set_registry_size(o, cnt);
2817
2818         return ECORE_SUCCESS;
2819 }
2820
2821 static int ecore_mcast_setup_e2(struct bnx2x_softc *sc,
2822                                 struct ecore_mcast_ramrod_params *p,
2823                                 enum ecore_mcast_cmd cmd)
2824 {
2825         struct ecore_raw_obj *raw = &p->mcast_obj->raw;
2826         struct ecore_mcast_obj *o = p->mcast_obj;
2827         struct eth_multicast_rules_ramrod_data *data =
2828             (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2829         int cnt = 0, rc;
2830
2831         /* Reset the ramrod data buffer */
2832         ECORE_MEMSET(data, 0, sizeof(*data));
2833
2834         cnt = ecore_mcast_handle_pending_cmds_e2(sc, p);
2835
2836         /* If there are no more pending commands - clear SCHEDULED state */
2837         if (ECORE_LIST_IS_EMPTY(&o->pending_cmds_head))
2838                 o->clear_sched(o);
2839
2840         /* The below may be TRUE iff there was enough room in ramrod
2841          * data for all pending commands and for the current
2842          * command. Otherwise the current command would have been added
2843          * to the pending commands and p->mcast_list_len would have been
2844          * zeroed.
2845          */
2846         if (p->mcast_list_len > 0)
2847                 cnt = ecore_mcast_handle_current_cmd(sc, p, cmd, cnt);
2848
2849         /* We've pulled out some MACs - update the total number of
2850          * outstanding.
2851          */
2852         o->total_pending_num -= cnt;
2853
2854         /* send a ramrod */
2855         ECORE_DBG_BREAK_IF(o->total_pending_num < 0);
2856         ECORE_DBG_BREAK_IF(cnt > o->max_cmd_len);
2857
2858         ecore_mcast_set_rdata_hdr_e2(sc, p, (uint8_t) cnt);
2859
2860         /* Update a registry size if there are no more pending operations.
2861          *
2862          * We don't want to change the value of the registry size if there are
2863          * pending operations because we want it to always be equal to the
2864          * exact or the approximate number (see ecore_mcast_validate_e2()) of
2865          * set bins after the last requested operation in order to properly
2866          * evaluate the size of the next DEL/RESTORE operation.
2867          *
2868          * Note that we update the registry itself during command(s) handling
2869          * - see ecore_mcast_set_one_rule_e2(). That's because for 57712 we
2870          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2871          * with a limited amount of update commands (per MAC/bin) and we don't
2872          * know in this scope what the actual state of bins configuration is
2873          * going to be after this ramrod.
2874          */
2875         if (!o->total_pending_num)
2876                 ecore_mcast_refresh_registry_e2(o);
2877
2878         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2879          * RAMROD_PENDING status immediately.
2880          */
2881         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2882                 raw->clear_pending(raw);
2883                 return ECORE_SUCCESS;
2884         } else {
2885                 /* No need for an explicit memory barrier here as long we would
2886                  * need to ensure the ordering of writing to the SPQ element
2887                  * and updating of the SPQ producer which involves a memory
2888                  * read and we will have to put a full memory barrier there
2889                  * (inside ecore_sp_post()).
2890                  */
2891
2892                 /* Send a ramrod */
2893                 rc = ecore_sp_post(sc,
2894                                    RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2895                                    raw->cid,
2896                                    raw->rdata_mapping, ETH_CONNECTION_TYPE);
2897                 if (rc)
2898                         return rc;
2899
2900                 /* Ramrod completion is pending */
2901                 return ECORE_PENDING;
2902         }
2903 }
2904
2905 static int ecore_mcast_validate_e1h(__rte_unused struct bnx2x_softc *sc,
2906                                     struct ecore_mcast_ramrod_params *p,
2907                                     enum ecore_mcast_cmd cmd)
2908 {
2909         /* Mark, that there is a work to do */
2910         if ((cmd == ECORE_MCAST_CMD_DEL) || (cmd == ECORE_MCAST_CMD_RESTORE))
2911                 p->mcast_list_len = 1;
2912
2913         return ECORE_SUCCESS;
2914 }
2915
2916 static void ecore_mcast_revert_e1h(__rte_unused struct bnx2x_softc *sc,
2917                                    __rte_unused struct ecore_mcast_ramrod_params
2918                                    *p, __rte_unused int old_num_bins)
2919 {
2920         /* Do nothing */
2921 }
2922
2923 #define ECORE_57711_SET_MC_FILTER(filter, bit) \
2924 do { \
2925         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2926 } while (0)
2927
2928 static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
2929                                     struct ecore_mcast_obj *o,
2930                                     struct ecore_mcast_ramrod_params *p,
2931                                     uint32_t * mc_filter)
2932 {
2933         struct ecore_mcast_list_elem *mlist_pos;
2934         int bit;
2935
2936         ECORE_LIST_FOR_EACH_ENTRY(mlist_pos, &p->mcast_list, link,
2937                                   struct ecore_mcast_list_elem) {
2938                 bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
2939                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2940
2941                     ECORE_MSG
2942                     (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
2943                      mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
2944                      mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
2945                      bit);
2946
2947                 /* bookkeeping... */
2948                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bit);
2949         }
2950 }
2951
2952 static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
2953                                         __rte_unused,
2954                                         struct ecore_mcast_obj *o,
2955                                         uint32_t * mc_filter)
2956 {
2957         int bit;
2958
2959         for (bit = ecore_mcast_get_next_bin(o, 0);
2960              bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
2961                 ECORE_57711_SET_MC_FILTER(mc_filter, bit);
2962                 ECORE_MSG(sc, "About to set bin %d", bit);
2963         }
2964 }
2965
2966 /* On 57711 we write the multicast MACs' approximate match
2967  * table by directly into the TSTORM's internal RAM. So we don't
2968  * really need to handle any tricks to make it work.
2969  */
2970 static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
2971                                  struct ecore_mcast_ramrod_params *p,
2972                                  enum ecore_mcast_cmd cmd)
2973 {
2974         int i;
2975         struct ecore_mcast_obj *o = p->mcast_obj;
2976         struct ecore_raw_obj *r = &o->raw;
2977
2978         /* If CLEAR_ONLY has been requested - clear the registry
2979          * and clear a pending bit.
2980          */
2981         if (!ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2982                 uint32_t mc_filter[ECORE_MC_HASH_SIZE] = { 0 };
2983
2984                 /* Set the multicast filter bits before writing it into
2985                  * the internal memory.
2986                  */
2987                 switch (cmd) {
2988                 case ECORE_MCAST_CMD_ADD:
2989                         ecore_mcast_hdl_add_e1h(sc, o, p, mc_filter);
2990                         break;
2991
2992                 case ECORE_MCAST_CMD_DEL:
2993                         ECORE_MSG(sc, "Invalidating multicast MACs configuration");
2994
2995                         /* clear the registry */
2996                         ECORE_MEMSET(o->registry.aprox_match.vec, 0,
2997                                      sizeof(o->registry.aprox_match.vec));
2998                         break;
2999
3000                 case ECORE_MCAST_CMD_RESTORE:
3001                         ecore_mcast_hdl_restore_e1h(sc, o, mc_filter);
3002                         break;
3003
3004                 default:
3005                         PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
3006                         return ECORE_INVAL;
3007                 }
3008
3009                 /* Set the mcast filter in the internal memory */
3010                 for (i = 0; i < ECORE_MC_HASH_SIZE; i++)
3011                         REG_WR(sc, ECORE_MC_HASH_OFFSET(sc, i), mc_filter[i]);
3012         } else
3013                 /* clear the registry */
3014                 ECORE_MEMSET(o->registry.aprox_match.vec, 0,
3015                              sizeof(o->registry.aprox_match.vec));
3016
3017         /* We are done */
3018         r->clear_pending(r);
3019
3020         return ECORE_SUCCESS;
3021 }
3022
3023 static int ecore_mcast_get_registry_size_aprox(struct ecore_mcast_obj *o)
3024 {
3025         return o->registry.aprox_match.num_bins_set;
3026 }
3027
3028 static void ecore_mcast_set_registry_size_aprox(struct ecore_mcast_obj *o,
3029                                                 int n)
3030 {
3031         o->registry.aprox_match.num_bins_set = n;
3032 }
3033
3034 int ecore_config_mcast(struct bnx2x_softc *sc,
3035                        struct ecore_mcast_ramrod_params *p,
3036                        enum ecore_mcast_cmd cmd)
3037 {
3038         struct ecore_mcast_obj *o = p->mcast_obj;
3039         struct ecore_raw_obj *r = &o->raw;
3040         int rc = 0, old_reg_size;
3041
3042         /* This is needed to recover number of currently configured mcast macs
3043          * in case of failure.
3044          */
3045         old_reg_size = o->get_registry_size(o);
3046
3047         /* Do some calculations and checks */
3048         rc = o->validate(sc, p, cmd);
3049         if (rc)
3050                 return rc;
3051
3052         /* Return if there is no work to do */
3053         if ((!p->mcast_list_len) && (!o->check_sched(o)))
3054                 return ECORE_SUCCESS;
3055
3056             ECORE_MSG
3057             (sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
3058              o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
3059
3060         /* Enqueue the current command to the pending list if we can't complete
3061          * it in the current iteration
3062          */
3063         if (r->check_pending(r) ||
3064             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3065                 rc = o->enqueue_cmd(sc, p->mcast_obj, p, cmd);
3066                 if (rc < 0)
3067                         goto error_exit1;
3068
3069                 /* As long as the current command is in a command list we
3070                  * don't need to handle it separately.
3071                  */
3072                 p->mcast_list_len = 0;
3073         }
3074
3075         if (!r->check_pending(r)) {
3076
3077                 /* Set 'pending' state */
3078                 r->set_pending(r);
3079
3080                 /* Configure the new classification in the chip */
3081                 rc = o->config_mcast(sc, p, cmd);
3082                 if (rc < 0)
3083                         goto error_exit2;
3084
3085                 /* Wait for a ramrod completion if was requested */
3086                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3087                         rc = o->wait_comp(sc, o);
3088         }
3089
3090         return rc;
3091
3092 error_exit2:
3093         r->clear_pending(r);
3094
3095 error_exit1:
3096         o->revert(sc, p, old_reg_size);
3097
3098         return rc;
3099 }
3100
3101 static void ecore_mcast_clear_sched(struct ecore_mcast_obj *o)
3102 {
3103         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3104         ECORE_CLEAR_BIT(o->sched_state, o->raw.pstate);
3105         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3106 }
3107
3108 static void ecore_mcast_set_sched(struct ecore_mcast_obj *o)
3109 {
3110         ECORE_SMP_MB_BEFORE_CLEAR_BIT();
3111         ECORE_SET_BIT(o->sched_state, o->raw.pstate);
3112         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3113 }
3114
3115 static int ecore_mcast_check_sched(struct ecore_mcast_obj *o)
3116 {
3117         return ! !ECORE_TEST_BIT(o->sched_state, o->raw.pstate);
3118 }
3119
3120 static int ecore_mcast_check_pending(struct ecore_mcast_obj *o)
3121 {
3122         return o->raw.check_pending(&o->raw) || o->check_sched(o);
3123 }
3124
3125 void ecore_init_mcast_obj(struct bnx2x_softc *sc,
3126                           struct ecore_mcast_obj *mcast_obj,
3127                           uint8_t mcast_cl_id, uint32_t mcast_cid,
3128                           uint8_t func_id, uint8_t engine_id, void *rdata,
3129                           ecore_dma_addr_t rdata_mapping, int state,
3130                           unsigned long *pstate, ecore_obj_type type)
3131 {
3132         ECORE_MEMSET(mcast_obj, 0, sizeof(*mcast_obj));
3133
3134         ecore_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3135                            rdata, rdata_mapping, state, pstate, type);
3136
3137         mcast_obj->engine_id = engine_id;
3138
3139         ECORE_LIST_INIT(&mcast_obj->pending_cmds_head);
3140
3141         mcast_obj->sched_state = ECORE_FILTER_MCAST_SCHED;
3142         mcast_obj->check_sched = ecore_mcast_check_sched;
3143         mcast_obj->set_sched = ecore_mcast_set_sched;
3144         mcast_obj->clear_sched = ecore_mcast_clear_sched;
3145
3146         if (CHIP_IS_E1H(sc)) {
3147                 mcast_obj->config_mcast = ecore_mcast_setup_e1h;
3148                 mcast_obj->enqueue_cmd = NULL;
3149                 mcast_obj->hdl_restore = NULL;
3150                 mcast_obj->check_pending = ecore_mcast_check_pending;
3151
3152                 /* 57711 doesn't send a ramrod, so it has unlimited credit
3153                  * for one command.
3154                  */
3155                 mcast_obj->max_cmd_len = -1;
3156                 mcast_obj->wait_comp = ecore_mcast_wait;
3157                 mcast_obj->set_one_rule = NULL;
3158                 mcast_obj->validate = ecore_mcast_validate_e1h;
3159                 mcast_obj->revert = ecore_mcast_revert_e1h;
3160                 mcast_obj->get_registry_size =
3161                     ecore_mcast_get_registry_size_aprox;
3162                 mcast_obj->set_registry_size =
3163                     ecore_mcast_set_registry_size_aprox;
3164         } else {
3165                 mcast_obj->config_mcast = ecore_mcast_setup_e2;
3166                 mcast_obj->enqueue_cmd = ecore_mcast_enqueue_cmd;
3167                 mcast_obj->hdl_restore = ecore_mcast_handle_restore_cmd_e2;
3168                 mcast_obj->check_pending = ecore_mcast_check_pending;
3169                 mcast_obj->max_cmd_len = 16;
3170                 mcast_obj->wait_comp = ecore_mcast_wait;
3171                 mcast_obj->set_one_rule = ecore_mcast_set_one_rule_e2;
3172                 mcast_obj->validate = ecore_mcast_validate_e2;
3173                 mcast_obj->revert = ecore_mcast_revert_e2;
3174                 mcast_obj->get_registry_size =
3175                     ecore_mcast_get_registry_size_aprox;
3176                 mcast_obj->set_registry_size =
3177                     ecore_mcast_set_registry_size_aprox;
3178         }
3179 }
3180
3181 /*************************** Credit handling **********************************/
3182
3183 /**
3184  * atomic_add_ifless - add if the result is less than a given value.
3185  *
3186  * @v:  pointer of type ecore_atomic_t
3187  * @a:  the amount to add to v...
3188  * @u:  ...if (v + a) is less than u.
3189  *
3190  * returns TRUE if (v + a) was less than u, and FALSE otherwise.
3191  *
3192  */
3193 static int __atomic_add_ifless(ecore_atomic_t * v, int a, int u)
3194 {
3195         int c, old;
3196
3197         c = ECORE_ATOMIC_READ(v);
3198         for (;;) {
3199                 if (ECORE_UNLIKELY(c + a >= u))
3200                         return FALSE;
3201
3202                 old = ECORE_ATOMIC_CMPXCHG((v), c, c + a);
3203                 if (ECORE_LIKELY(old == c))
3204                         break;
3205                 c = old;
3206         }
3207
3208         return TRUE;
3209 }
3210
3211 /**
3212  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3213  *
3214  * @v:  pointer of type ecore_atomic_t
3215  * @a:  the amount to dec from v...
3216  * @u:  ...if (v - a) is more or equal than u.
3217  *
3218  * returns TRUE if (v - a) was more or equal than u, and FALSE
3219  * otherwise.
3220  */
3221 static int __atomic_dec_ifmoe(ecore_atomic_t * v, int a, int u)
3222 {
3223         int c, old;
3224
3225         c = ECORE_ATOMIC_READ(v);
3226         for (;;) {
3227                 if (ECORE_UNLIKELY(c - a < u))
3228                         return FALSE;
3229
3230                 old = ECORE_ATOMIC_CMPXCHG((v), c, c - a);
3231                 if (ECORE_LIKELY(old == c))
3232                         break;
3233                 c = old;
3234         }
3235
3236         return TRUE;
3237 }
3238
3239 static int ecore_credit_pool_get(struct ecore_credit_pool_obj *o, int cnt)
3240 {
3241         int rc;
3242
3243         ECORE_SMP_MB();
3244         rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3245         ECORE_SMP_MB();
3246
3247         return rc;
3248 }
3249
3250 static int ecore_credit_pool_put(struct ecore_credit_pool_obj *o, int cnt)
3251 {
3252         int rc;
3253
3254         ECORE_SMP_MB();
3255
3256         /* Don't let to refill if credit + cnt > pool_sz */
3257         rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3258
3259         ECORE_SMP_MB();
3260
3261         return rc;
3262 }
3263
3264 static int ecore_credit_pool_check(struct ecore_credit_pool_obj *o)
3265 {
3266         int cur_credit;
3267
3268         ECORE_SMP_MB();
3269         cur_credit = ECORE_ATOMIC_READ(&o->credit);
3270
3271         return cur_credit;
3272 }
3273
3274 static int ecore_credit_pool_always_TRUE(__rte_unused struct
3275                                          ecore_credit_pool_obj *o,
3276                                          __rte_unused int cnt)
3277 {
3278         return TRUE;
3279 }
3280
3281 static int ecore_credit_pool_get_entry(struct ecore_credit_pool_obj *o,
3282                                        int *offset)
3283 {
3284         int idx, vec, i;
3285
3286         *offset = -1;
3287
3288         /* Find "internal cam-offset" then add to base for this object... */
3289         for (vec = 0; vec < ECORE_POOL_VEC_SIZE; vec++) {
3290
3291                 /* Skip the current vector if there are no free entries in it */
3292                 if (!o->pool_mirror[vec])
3293                         continue;
3294
3295                 /* If we've got here we are going to find a free entry */
3296                 for (idx = vec * BIT_VEC64_ELEM_SZ, i = 0;
3297                      i < BIT_VEC64_ELEM_SZ; idx++, i++)
3298
3299                         if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3300                                 /* Got one!! */
3301                                 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3302                                 *offset = o->base_pool_offset + idx;
3303                                 return TRUE;
3304                         }
3305         }
3306
3307         return FALSE;
3308 }
3309
3310 static int ecore_credit_pool_put_entry(struct ecore_credit_pool_obj *o,
3311                                        int offset)
3312 {
3313         if (offset < o->base_pool_offset)
3314                 return FALSE;
3315
3316         offset -= o->base_pool_offset;
3317
3318         if (offset >= o->pool_sz)
3319                 return FALSE;
3320
3321         /* Return the entry to the pool */
3322         BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3323
3324         return TRUE;
3325 }
3326
3327 static int ecore_credit_pool_put_entry_always_TRUE(__rte_unused struct
3328                                                    ecore_credit_pool_obj *o,
3329                                                    __rte_unused int offset)
3330 {
3331         return TRUE;
3332 }
3333
3334 static int ecore_credit_pool_get_entry_always_TRUE(__rte_unused struct
3335                                                    ecore_credit_pool_obj *o,
3336                                                    __rte_unused int *offset)
3337 {
3338         *offset = -1;
3339         return TRUE;
3340 }
3341
3342 /**
3343  * ecore_init_credit_pool - initialize credit pool internals.
3344  *
3345  * @p:
3346  * @base:       Base entry in the CAM to use.
3347  * @credit:     pool size.
3348  *
3349  * If base is negative no CAM entries handling will be performed.
3350  * If credit is negative pool operations will always succeed (unlimited pool).
3351  *
3352  */
3353 static void ecore_init_credit_pool(struct ecore_credit_pool_obj *p,
3354                                    int base, int credit)
3355 {
3356         /* Zero the object first */
3357         ECORE_MEMSET(p, 0, sizeof(*p));
3358
3359         /* Set the table to all 1s */
3360         ECORE_MEMSET(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3361
3362         /* Init a pool as full */
3363         ECORE_ATOMIC_SET(&p->credit, credit);
3364
3365         /* The total poll size */
3366         p->pool_sz = credit;
3367
3368         p->base_pool_offset = base;
3369
3370         /* Commit the change */
3371         ECORE_SMP_MB();
3372
3373         p->check = ecore_credit_pool_check;
3374
3375         /* if pool credit is negative - disable the checks */
3376         if (credit >= 0) {
3377                 p->put = ecore_credit_pool_put;
3378                 p->get = ecore_credit_pool_get;
3379                 p->put_entry = ecore_credit_pool_put_entry;
3380                 p->get_entry = ecore_credit_pool_get_entry;
3381         } else {
3382                 p->put = ecore_credit_pool_always_TRUE;
3383                 p->get = ecore_credit_pool_always_TRUE;
3384                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3385                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3386         }
3387
3388         /* If base is negative - disable entries handling */
3389         if (base < 0) {
3390                 p->put_entry = ecore_credit_pool_put_entry_always_TRUE;
3391                 p->get_entry = ecore_credit_pool_get_entry_always_TRUE;
3392         }
3393 }
3394
3395 void ecore_init_mac_credit_pool(struct bnx2x_softc *sc,
3396                                 struct ecore_credit_pool_obj *p,
3397                                 uint8_t func_id, uint8_t func_num)
3398 {
3399
3400 #define ECORE_CAM_SIZE_EMUL 5
3401
3402         int cam_sz;
3403
3404         if (CHIP_IS_E1H(sc)) {
3405                 /* CAM credit is equally divided between all active functions
3406                  * on the PORT!.
3407                  */
3408                 if ((func_num > 0)) {
3409                         if (!CHIP_REV_IS_SLOW(sc))
3410                                 cam_sz = (MAX_MAC_CREDIT_E1H / (2 * func_num));
3411                         else
3412                                 cam_sz = ECORE_CAM_SIZE_EMUL;
3413                         ecore_init_credit_pool(p, func_id * cam_sz, cam_sz);
3414                 } else {
3415                         /* this should never happen! Block MAC operations. */
3416                         ecore_init_credit_pool(p, 0, 0);
3417                 }
3418
3419         } else {
3420
3421                 /*
3422                  * CAM credit is equaly divided between all active functions
3423                  * on the PATH.
3424                  */
3425                 if ((func_num > 0)) {
3426                         if (!CHIP_REV_IS_SLOW(sc))
3427                                 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3428                         else
3429                                 cam_sz = ECORE_CAM_SIZE_EMUL;
3430
3431                         /* No need for CAM entries handling for 57712 and
3432                          * newer.
3433                          */
3434                         ecore_init_credit_pool(p, -1, cam_sz);
3435                 } else {
3436                         /* this should never happen! Block MAC operations. */
3437                         ecore_init_credit_pool(p, 0, 0);
3438                 }
3439         }
3440 }
3441
3442 void ecore_init_vlan_credit_pool(struct bnx2x_softc *sc,
3443                                  struct ecore_credit_pool_obj *p,
3444                                  uint8_t func_id, uint8_t func_num)
3445 {
3446         if (CHIP_IS_E1x(sc)) {
3447                 /* There is no VLAN credit in HW on 57711 only
3448                  * MAC / MAC-VLAN can be set
3449                  */
3450                 ecore_init_credit_pool(p, 0, -1);
3451         } else {
3452                 /* CAM credit is equally divided between all active functions
3453                  * on the PATH.
3454                  */
3455                 if (func_num > 0) {
3456                         int credit = MAX_VLAN_CREDIT_E2 / func_num;
3457                         ecore_init_credit_pool(p, func_id * credit, credit);
3458                 } else
3459                         /* this should never happen! Block VLAN operations. */
3460                         ecore_init_credit_pool(p, 0, 0);
3461         }
3462 }
3463
3464 /****************** RSS Configuration ******************/
3465
3466 /**
3467  * ecore_setup_rss - configure RSS
3468  *
3469  * @sc:         device handle
3470  * @p:          rss configuration
3471  *
3472  * sends on UPDATE ramrod for that matter.
3473  */
3474 static int ecore_setup_rss(struct bnx2x_softc *sc,
3475                            struct ecore_config_rss_params *p)
3476 {
3477         struct ecore_rss_config_obj *o = p->rss_obj;
3478         struct ecore_raw_obj *r = &o->raw;
3479         struct eth_rss_update_ramrod_data *data =
3480             (struct eth_rss_update_ramrod_data *)(r->rdata);
3481         uint8_t rss_mode = 0;
3482         int rc;
3483
3484         ECORE_MEMSET(data, 0, sizeof(*data));
3485
3486         ECORE_MSG(sc, "Configuring RSS");
3487
3488         /* Set an echo field */
3489         data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
3490                                        (r->state << ECORE_SWCID_SHIFT));
3491
3492         /* RSS mode */
3493         if (ECORE_TEST_BIT(ECORE_RSS_MODE_DISABLED, &p->rss_flags))
3494                 rss_mode = ETH_RSS_MODE_DISABLED;
3495         else if (ECORE_TEST_BIT(ECORE_RSS_MODE_REGULAR, &p->rss_flags))
3496                 rss_mode = ETH_RSS_MODE_REGULAR;
3497
3498         data->rss_mode = rss_mode;
3499
3500         ECORE_MSG(sc, "rss_mode=%d", rss_mode);
3501
3502         /* RSS capabilities */
3503         if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
3504                 data->capabilities |=
3505                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
3506
3507         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_TCP, &p->rss_flags))
3508                 data->capabilities |=
3509                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
3510
3511         if (ECORE_TEST_BIT(ECORE_RSS_IPV4_UDP, &p->rss_flags))
3512                 data->capabilities |=
3513                     ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
3514
3515         if (ECORE_TEST_BIT(ECORE_RSS_IPV6, &p->rss_flags))
3516                 data->capabilities |=
3517                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
3518
3519         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_TCP, &p->rss_flags))
3520                 data->capabilities |=
3521                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
3522
3523         if (ECORE_TEST_BIT(ECORE_RSS_IPV6_UDP, &p->rss_flags))
3524                 data->capabilities |=
3525                     ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
3526
3527         if (ECORE_TEST_BIT(ECORE_RSS_TUNNELING, &p->rss_flags)) {
3528                 data->udp_4tuple_dst_port_mask =
3529                     ECORE_CPU_TO_LE16(p->tunnel_mask);
3530                 data->udp_4tuple_dst_port_value =
3531                     ECORE_CPU_TO_LE16(p->tunnel_value);
3532         }
3533
3534         /* Hashing mask */
3535         data->rss_result_mask = p->rss_result_mask;
3536
3537         /* RSS engine ID */
3538         data->rss_engine_id = o->engine_id;
3539
3540         ECORE_MSG(sc, "rss_engine_id=%d", data->rss_engine_id);
3541
3542         /* Indirection table */
3543         ECORE_MEMCPY(data->indirection_table, p->ind_table,
3544                      T_ETH_INDIRECTION_TABLE_SIZE);
3545
3546         /* Remember the last configuration */
3547         ECORE_MEMCPY(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
3548
3549         /* RSS keys */
3550         if (ECORE_TEST_BIT(ECORE_RSS_SET_SRCH, &p->rss_flags)) {
3551                 ECORE_MEMCPY(&data->rss_key[0], &p->rss_key[0],
3552                              sizeof(data->rss_key));
3553                 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
3554         }
3555
3556         /* No need for an explicit memory barrier here as long we would
3557          * need to ensure the ordering of writing to the SPQ element
3558          * and updating of the SPQ producer which involves a memory
3559          * read and we will have to put a full memory barrier there
3560          * (inside ecore_sp_post()).
3561          */
3562
3563         /* Send a ramrod */
3564         rc = ecore_sp_post(sc,
3565                            RAMROD_CMD_ID_ETH_RSS_UPDATE,
3566                            r->cid, r->rdata_mapping, ETH_CONNECTION_TYPE);
3567
3568         if (rc < 0)
3569                 return rc;
3570
3571         return ECORE_PENDING;
3572 }
3573
3574 int ecore_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *p)
3575 {
3576         int rc;
3577         struct ecore_rss_config_obj *o = p->rss_obj;
3578         struct ecore_raw_obj *r = &o->raw;
3579
3580         /* Do nothing if only driver cleanup was requested */
3581         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
3582                 return ECORE_SUCCESS;
3583
3584         r->set_pending(r);
3585
3586         rc = o->config_rss(sc, p);
3587         if (rc < 0) {
3588                 r->clear_pending(r);
3589                 return rc;
3590         }
3591
3592         if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &p->ramrod_flags))
3593                 rc = r->wait_comp(sc, r);
3594
3595         return rc;
3596 }
3597
3598 void ecore_init_rss_config_obj(struct ecore_rss_config_obj *rss_obj,
3599                                uint8_t cl_id, uint32_t cid, uint8_t func_id,
3600                                uint8_t engine_id, void *rdata,
3601                                ecore_dma_addr_t rdata_mapping, int state,
3602                                unsigned long *pstate, ecore_obj_type type)
3603 {
3604         ecore_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
3605                            rdata_mapping, state, pstate, type);
3606
3607         rss_obj->engine_id = engine_id;
3608         rss_obj->config_rss = ecore_setup_rss;
3609 }
3610
3611 /********************** Queue state object ***********************************/
3612
3613 /**
3614  * ecore_queue_state_change - perform Queue state change transition
3615  *
3616  * @sc:         device handle
3617  * @params:     parameters to perform the transition
3618  *
3619  * returns 0 in case of successfully completed transition, negative error
3620  * code in case of failure, positive (EBUSY) value if there is a completion
3621  * to that is still pending (possible only if RAMROD_COMP_WAIT is
3622  * not set in params->ramrod_flags for asynchronous commands).
3623  *
3624  */
3625 int ecore_queue_state_change(struct bnx2x_softc *sc,
3626                              struct ecore_queue_state_params *params)
3627 {
3628         struct ecore_queue_sp_obj *o = params->q_obj;
3629         int rc, pending_bit;
3630         unsigned long *pending = &o->pending;
3631
3632         /* Check that the requested transition is legal */
3633         rc = o->check_transition(sc, o, params);
3634         if (rc) {
3635                 PMD_DRV_LOG(ERR, sc, "check transition returned an error. rc %d",
3636                             rc);
3637                 return ECORE_INVAL;
3638         }
3639
3640         /* Set "pending" bit */
3641         ECORE_MSG(sc, "pending bit was=%lx", o->pending);
3642         pending_bit = o->set_pending(o, params);
3643         ECORE_MSG(sc, "pending bit now=%lx", o->pending);
3644
3645         /* Don't send a command if only driver cleanup was requested */
3646         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
3647                 o->complete_cmd(sc, o, pending_bit);
3648         else {
3649                 /* Send a ramrod */
3650                 rc = o->send_cmd(sc, params);
3651                 if (rc) {
3652                         o->next_state = ECORE_Q_STATE_MAX;
3653                         ECORE_CLEAR_BIT(pending_bit, pending);
3654                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3655                         return rc;
3656                 }
3657
3658                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
3659                         rc = o->wait_comp(sc, o, pending_bit);
3660                         if (rc)
3661                                 return rc;
3662
3663                         return ECORE_SUCCESS;
3664                 }
3665         }
3666
3667         return ECORE_RET_PENDING(pending_bit, pending);
3668 }
3669
3670 static int ecore_queue_set_pending(struct ecore_queue_sp_obj *obj,
3671                                    struct ecore_queue_state_params *params)
3672 {
3673         enum ecore_queue_cmd cmd = params->cmd, bit;
3674
3675         /* ACTIVATE and DEACTIVATE commands are implemented on top of
3676          * UPDATE command.
3677          */
3678         if ((cmd == ECORE_Q_CMD_ACTIVATE) || (cmd == ECORE_Q_CMD_DEACTIVATE))
3679                 bit = ECORE_Q_CMD_UPDATE;
3680         else
3681                 bit = cmd;
3682
3683         ECORE_SET_BIT(bit, &obj->pending);
3684         return bit;
3685 }
3686
3687 static int ecore_queue_wait_comp(struct bnx2x_softc *sc,
3688                                  struct ecore_queue_sp_obj *o,
3689                                  enum ecore_queue_cmd cmd)
3690 {
3691         return ecore_state_wait(sc, cmd, &o->pending);
3692 }
3693
3694 /**
3695  * ecore_queue_comp_cmd - complete the state change command.
3696  *
3697  * @sc:         device handle
3698  * @o:
3699  * @cmd:
3700  *
3701  * Checks that the arrived completion is expected.
3702  */
3703 static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
3704                                 struct ecore_queue_sp_obj *o,
3705                                 enum ecore_queue_cmd cmd)
3706 {
3707         unsigned long cur_pending = o->pending;
3708
3709         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
3710                 PMD_DRV_LOG(ERR, sc,
3711                             "Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
3712                             cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
3713                             cur_pending, o->next_state);
3714                 return ECORE_INVAL;
3715         }
3716
3717         if (o->next_tx_only >= o->max_cos)
3718                 /* >= because tx only must always be smaller than cos since the
3719                  * primary connection supports COS 0
3720                  */
3721                 PMD_DRV_LOG(ERR, sc,
3722                             "illegal value for next tx_only: %d. max cos was %d",
3723                             o->next_tx_only, o->max_cos);
3724
3725         ECORE_MSG(sc, "Completing command %d for queue %d, setting state to %d",
3726                   cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
3727
3728         if (o->next_tx_only)    /* print num tx-only if any exist */
3729                 ECORE_MSG(sc, "primary cid %d: num tx-only cons %d",
3730                           o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
3731
3732         o->state = o->next_state;
3733         o->num_tx_only = o->next_tx_only;
3734         o->next_state = ECORE_Q_STATE_MAX;
3735
3736         /* It's important that o->state and o->next_state are
3737          * updated before o->pending.
3738          */
3739         wmb();
3740
3741         ECORE_CLEAR_BIT(cmd, &o->pending);
3742         ECORE_SMP_MB_AFTER_CLEAR_BIT();
3743
3744         return ECORE_SUCCESS;
3745 }
3746
3747 static void ecore_q_fill_setup_data_e2(struct ecore_queue_state_params
3748                                        *cmd_params,
3749                                        struct client_init_ramrod_data *data)
3750 {
3751         struct ecore_queue_setup_params *params = &cmd_params->params.setup;
3752
3753         /* Rx data */
3754
3755         /* IPv6 TPA supported for E2 and above only */
3756         data->rx.tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_IPV6,
3757                                           &params->flags) *
3758             CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
3759 }
3760
3761 static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
3762                                            struct ecore_queue_sp_obj *o,
3763                                            struct ecore_general_setup_params
3764                                            *params, struct client_init_general_data
3765                                            *gen_data, unsigned long *flags)
3766 {
3767         gen_data->client_id = o->cl_id;
3768
3769         if (ECORE_TEST_BIT(ECORE_Q_FLG_STATS, flags)) {
3770                 gen_data->statistics_counter_id = params->stat_id;
3771                 gen_data->statistics_en_flg = 1;
3772                 gen_data->statistics_zero_flg =
3773                     ECORE_TEST_BIT(ECORE_Q_FLG_ZERO_STATS, flags);
3774         } else
3775                 gen_data->statistics_counter_id =
3776                     DISABLE_STATISTIC_COUNTER_ID_VALUE;
3777
3778         gen_data->is_fcoe_flg = ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags);
3779         gen_data->activate_flg = ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE, flags);
3780         gen_data->sp_client_id = params->spcl_id;
3781         gen_data->mtu = ECORE_CPU_TO_LE16(params->mtu);
3782         gen_data->func_id = o->func_id;
3783
3784         gen_data->cos = params->cos;
3785
3786         gen_data->traffic_type =
3787             ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
3788             LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
3789
3790         ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d",
3791                   gen_data->activate_flg, gen_data->cos,
3792                   gen_data->statistics_en_flg);
3793 }
3794
3795 static void ecore_q_fill_init_tx_data(struct ecore_txq_setup_params *params,
3796                                       struct client_init_tx_data *tx_data,
3797                                       unsigned long *flags)
3798 {
3799         tx_data->enforce_security_flg =
3800             ECORE_TEST_BIT(ECORE_Q_FLG_TX_SEC, flags);
3801         tx_data->default_vlan = ECORE_CPU_TO_LE16(params->default_vlan);
3802         tx_data->default_vlan_flg = ECORE_TEST_BIT(ECORE_Q_FLG_DEF_VLAN, flags);
3803         tx_data->tx_switching_flg =
3804             ECORE_TEST_BIT(ECORE_Q_FLG_TX_SWITCH, flags);
3805         tx_data->anti_spoofing_flg =
3806             ECORE_TEST_BIT(ECORE_Q_FLG_ANTI_SPOOF, flags);
3807         tx_data->force_default_pri_flg =
3808             ECORE_TEST_BIT(ECORE_Q_FLG_FORCE_DEFAULT_PRI, flags);
3809         tx_data->refuse_outband_vlan_flg =
3810             ECORE_TEST_BIT(ECORE_Q_FLG_REFUSE_OUTBAND_VLAN, flags);
3811         tx_data->tunnel_non_lso_pcsum_location =
3812             ECORE_TEST_BIT(ECORE_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT :
3813             CSUM_ON_BD;
3814
3815         tx_data->tx_status_block_id = params->fw_sb_id;
3816         tx_data->tx_sb_index_number = params->sb_cq_index;
3817         tx_data->tss_leading_client_id = params->tss_leading_cl_id;
3818
3819         tx_data->tx_bd_page_base.lo =
3820             ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3821         tx_data->tx_bd_page_base.hi =
3822             ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3823
3824         /* Don't configure any Tx switching mode during queue SETUP */
3825         tx_data->state = 0;
3826 }
3827
3828 static void ecore_q_fill_init_pause_data(struct rxq_pause_params *params,
3829                                          struct client_init_rx_data *rx_data)
3830 {
3831         /* flow control data */
3832         rx_data->cqe_pause_thr_low = ECORE_CPU_TO_LE16(params->rcq_th_lo);
3833         rx_data->cqe_pause_thr_high = ECORE_CPU_TO_LE16(params->rcq_th_hi);
3834         rx_data->bd_pause_thr_low = ECORE_CPU_TO_LE16(params->bd_th_lo);
3835         rx_data->bd_pause_thr_high = ECORE_CPU_TO_LE16(params->bd_th_hi);
3836         rx_data->sge_pause_thr_low = ECORE_CPU_TO_LE16(params->sge_th_lo);
3837         rx_data->sge_pause_thr_high = ECORE_CPU_TO_LE16(params->sge_th_hi);
3838         rx_data->rx_cos_mask = ECORE_CPU_TO_LE16(params->pri_map);
3839 }
3840
3841 static void ecore_q_fill_init_rx_data(struct ecore_rxq_setup_params *params,
3842                                       struct client_init_rx_data *rx_data,
3843                                       unsigned long *flags)
3844 {
3845         rx_data->tpa_en = ECORE_TEST_BIT(ECORE_Q_FLG_TPA, flags) *
3846             CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
3847         rx_data->tpa_en |= ECORE_TEST_BIT(ECORE_Q_FLG_TPA_GRO, flags) *
3848             CLIENT_INIT_RX_DATA_TPA_MODE;
3849         rx_data->vmqueue_mode_en_flg = 0;
3850
3851         rx_data->extra_data_over_sgl_en_flg =
3852             ECORE_TEST_BIT(ECORE_Q_FLG_OOO, flags);
3853         rx_data->cache_line_alignment_log_size = params->cache_line_log;
3854         rx_data->enable_dynamic_hc = ECORE_TEST_BIT(ECORE_Q_FLG_DHC, flags);
3855         rx_data->client_qzone_id = params->cl_qzone_id;
3856         rx_data->max_agg_size = ECORE_CPU_TO_LE16(params->tpa_agg_sz);
3857
3858         /* Always start in DROP_ALL mode */
3859         rx_data->state = ECORE_CPU_TO_LE16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
3860                                            CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
3861
3862         /* We don't set drop flags */
3863         rx_data->drop_ip_cs_err_flg = 0;
3864         rx_data->drop_tcp_cs_err_flg = 0;
3865         rx_data->drop_ttl0_flg = 0;
3866         rx_data->drop_udp_cs_err_flg = 0;
3867         rx_data->inner_vlan_removal_enable_flg =
3868             ECORE_TEST_BIT(ECORE_Q_FLG_VLAN, flags);
3869         rx_data->outer_vlan_removal_enable_flg =
3870             ECORE_TEST_BIT(ECORE_Q_FLG_OV, flags);
3871         rx_data->status_block_id = params->fw_sb_id;
3872         rx_data->rx_sb_index_number = params->sb_cq_index;
3873         rx_data->max_tpa_queues = params->max_tpa_queues;
3874         rx_data->max_bytes_on_bd = ECORE_CPU_TO_LE16(params->buf_sz);
3875         rx_data->bd_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->dscr_map));
3876         rx_data->bd_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->dscr_map));
3877         rx_data->cqe_page_base.lo = ECORE_CPU_TO_LE32(U64_LO(params->rcq_map));
3878         rx_data->cqe_page_base.hi = ECORE_CPU_TO_LE32(U64_HI(params->rcq_map));
3879         rx_data->is_leading_rss = ECORE_TEST_BIT(ECORE_Q_FLG_LEADING_RSS,
3880                                                  flags);
3881
3882         if (ECORE_TEST_BIT(ECORE_Q_FLG_MCAST, flags)) {
3883                 rx_data->approx_mcast_engine_id = params->mcast_engine_id;
3884                 rx_data->is_approx_mcast = 1;
3885         }
3886
3887         rx_data->rss_engine_id = params->rss_engine_id;
3888
3889         /* silent vlan removal */
3890         rx_data->silent_vlan_removal_flg =
3891             ECORE_TEST_BIT(ECORE_Q_FLG_SILENT_VLAN_REM, flags);
3892         rx_data->silent_vlan_value =
3893             ECORE_CPU_TO_LE16(params->silent_removal_value);
3894         rx_data->silent_vlan_mask =
3895             ECORE_CPU_TO_LE16(params->silent_removal_mask);
3896 }
3897
3898 /* initialize the general, tx and rx parts of a queue object */
3899 static void ecore_q_fill_setup_data_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
3900                                         *cmd_params,
3901                                         struct client_init_ramrod_data *data)
3902 {
3903         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3904                                        &cmd_params->params.setup.gen_params,
3905                                        &data->general,
3906                                        &cmd_params->params.setup.flags);
3907
3908         ecore_q_fill_init_tx_data(&cmd_params->params.setup.txq_params,
3909                                   &data->tx, &cmd_params->params.setup.flags);
3910
3911         ecore_q_fill_init_rx_data(&cmd_params->params.setup.rxq_params,
3912                                   &data->rx, &cmd_params->params.setup.flags);
3913
3914         ecore_q_fill_init_pause_data(&cmd_params->params.setup.pause_params,
3915                                      &data->rx);
3916 }
3917
3918 /* initialize the general and tx parts of a tx-only queue object */
3919 static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
3920                                        *cmd_params,
3921                                        struct tx_queue_init_ramrod_data *data)
3922 {
3923         ecore_q_fill_init_general_data(sc, cmd_params->q_obj,
3924                                        &cmd_params->params.tx_only.gen_params,
3925                                        &data->general,
3926                                        &cmd_params->params.tx_only.flags);
3927
3928         ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
3929                                   &data->tx, &cmd_params->params.tx_only.flags);
3930
3931         ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
3932                   cmd_params->q_obj->cids[0],
3933                   data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
3934 }
3935
3936 /**
3937  * ecore_q_init - init HW/FW queue
3938  *
3939  * @sc:         device handle
3940  * @params:
3941  *
3942  * HW/FW initial Queue configuration:
3943  *      - HC: Rx and Tx
3944  *      - CDU context validation
3945  *
3946  */
3947 static int ecore_q_init(struct bnx2x_softc *sc,
3948                         struct ecore_queue_state_params *params)
3949 {
3950         struct ecore_queue_sp_obj *o = params->q_obj;
3951         struct ecore_queue_init_params *init = &params->params.init;
3952         uint16_t hc_usec;
3953         uint8_t cos;
3954
3955         /* Tx HC configuration */
3956         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_TX, &o->type) &&
3957             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->tx.flags)) {
3958                 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
3959
3960                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->tx.fw_sb_id,
3961                                                init->tx.sb_cq_index,
3962                                                !ECORE_TEST_BIT
3963                                                (ECORE_Q_FLG_HC_EN,
3964                                                 &init->tx.flags), hc_usec);
3965         }
3966
3967         /* Rx HC configuration */
3968         if (ECORE_TEST_BIT(ECORE_Q_TYPE_HAS_RX, &o->type) &&
3969             ECORE_TEST_BIT(ECORE_Q_FLG_HC, &init->rx.flags)) {
3970                 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
3971
3972                 ECORE_UPDATE_COALESCE_SB_INDEX(sc, init->rx.fw_sb_id,
3973                                                init->rx.sb_cq_index,
3974                                                !ECORE_TEST_BIT
3975                                                (ECORE_Q_FLG_HC_EN,
3976                                                 &init->rx.flags), hc_usec);
3977         }
3978
3979         /* Set CDU context validation values */
3980         for (cos = 0; cos < o->max_cos; cos++) {
3981                 ECORE_MSG(sc, "setting context validation. cid %d, cos %d",
3982                           o->cids[cos], cos);
3983                 ECORE_MSG(sc, "context pointer %p", init->cxts[cos]);
3984                 ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
3985         }
3986
3987         /* As no ramrod is sent, complete the command immediately  */
3988         o->complete_cmd(sc, o, ECORE_Q_CMD_INIT);
3989
3990         ECORE_MMIOWB();
3991         ECORE_SMP_MB();
3992
3993         return ECORE_SUCCESS;
3994 }
3995
3996 static int ecore_q_send_setup_e1x(struct bnx2x_softc *sc, struct ecore_queue_state_params
3997                                   *params)
3998 {
3999         struct ecore_queue_sp_obj *o = params->q_obj;
4000         struct client_init_ramrod_data *rdata =
4001             (struct client_init_ramrod_data *)o->rdata;
4002         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4003         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4004
4005         /* Clear the ramrod data */
4006         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4007
4008         /* Fill the ramrod data */
4009         ecore_q_fill_setup_data_cmn(sc, params, rdata);
4010
4011         /* No need for an explicit memory barrier here as long we would
4012          * need to ensure the ordering of writing to the SPQ element
4013          * and updating of the SPQ producer which involves a memory
4014          * read and we will have to put a full memory barrier there
4015          * (inside ecore_sp_post()).
4016          */
4017
4018         return ecore_sp_post(sc,
4019                              ramrod,
4020                              o->cids[ECORE_PRIMARY_CID_INDEX],
4021                              data_mapping, ETH_CONNECTION_TYPE);
4022 }
4023
4024 static int ecore_q_send_setup_e2(struct bnx2x_softc *sc,
4025                                  struct ecore_queue_state_params *params)
4026 {
4027         struct ecore_queue_sp_obj *o = params->q_obj;
4028         struct client_init_ramrod_data *rdata =
4029             (struct client_init_ramrod_data *)o->rdata;
4030         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4031         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4032
4033         /* Clear the ramrod data */
4034         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4035
4036         /* Fill the ramrod data */
4037         ecore_q_fill_setup_data_cmn(sc, params, rdata);
4038         ecore_q_fill_setup_data_e2(params, rdata);
4039
4040         /* No need for an explicit memory barrier here as long we would
4041          * need to ensure the ordering of writing to the SPQ element
4042          * and updating of the SPQ producer which involves a memory
4043          * read and we will have to put a full memory barrier there
4044          * (inside ecore_sp_post()).
4045          */
4046
4047         return ecore_sp_post(sc,
4048                              ramrod,
4049                              o->cids[ECORE_PRIMARY_CID_INDEX],
4050                              data_mapping, ETH_CONNECTION_TYPE);
4051 }
4052
4053 static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue_state_params
4054                                       *params)
4055 {
4056         struct ecore_queue_sp_obj *o = params->q_obj;
4057         struct tx_queue_init_ramrod_data *rdata =
4058             (struct tx_queue_init_ramrod_data *)o->rdata;
4059         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4060         int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4061         struct ecore_queue_setup_tx_only_params *tx_only_params =
4062             &params->params.tx_only;
4063         uint8_t cid_index = tx_only_params->cid_index;
4064
4065         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
4066                 ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
4067         ECORE_MSG(sc, "sending forward tx-only ramrod");
4068
4069         if (cid_index >= o->max_cos) {
4070                 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4071                             o->cl_id, cid_index);
4072                 return ECORE_INVAL;
4073         }
4074
4075         ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d",
4076                   tx_only_params->gen_params.cos,
4077                   tx_only_params->gen_params.spcl_id);
4078
4079         /* Clear the ramrod data */
4080         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4081
4082         /* Fill the ramrod data */
4083         ecore_q_fill_setup_tx_only(sc, params, rdata);
4084
4085             ECORE_MSG
4086             (sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
4087              o->cids[cid_index], rdata->general.client_id,
4088              rdata->general.sp_client_id, rdata->general.cos);
4089
4090         /* No need for an explicit memory barrier here as long we would
4091          * need to ensure the ordering of writing to the SPQ element
4092          * and updating of the SPQ producer which involves a memory
4093          * read and we will have to put a full memory barrier there
4094          * (inside ecore_sp_post()).
4095          */
4096
4097         return ecore_sp_post(sc, ramrod, o->cids[cid_index],
4098                              data_mapping, ETH_CONNECTION_TYPE);
4099 }
4100
4101 static void ecore_q_fill_update_data(struct ecore_queue_sp_obj *obj,
4102                                      struct ecore_queue_update_params *params,
4103                                      struct client_update_ramrod_data *data)
4104 {
4105         /* Client ID of the client to update */
4106         data->client_id = obj->cl_id;
4107
4108         /* Function ID of the client to update */
4109         data->func_id = obj->func_id;
4110
4111         /* Default VLAN value */
4112         data->default_vlan = ECORE_CPU_TO_LE16(params->def_vlan);
4113
4114         /* Inner VLAN stripping */
4115         data->inner_vlan_removal_enable_flg =
4116             ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4117         data->inner_vlan_removal_change_flg =
4118             ECORE_TEST_BIT(ECORE_Q_UPDATE_IN_VLAN_REM_CHNG,
4119                            &params->update_flags);
4120
4121         /* Outer VLAN stripping */
4122         data->outer_vlan_removal_enable_flg =
4123             ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4124         data->outer_vlan_removal_change_flg =
4125             ECORE_TEST_BIT(ECORE_Q_UPDATE_OUT_VLAN_REM_CHNG,
4126                            &params->update_flags);
4127
4128         /* Drop packets that have source MAC that doesn't belong to this
4129          * Queue.
4130          */
4131         data->anti_spoofing_enable_flg =
4132             ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4133         data->anti_spoofing_change_flg =
4134             ECORE_TEST_BIT(ECORE_Q_UPDATE_ANTI_SPOOF_CHNG,
4135                            &params->update_flags);
4136
4137         /* Activate/Deactivate */
4138         data->activate_flg =
4139             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE, &params->update_flags);
4140         data->activate_change_flg =
4141             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4142
4143         /* Enable default VLAN */
4144         data->default_vlan_enable_flg =
4145             ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4146         data->default_vlan_change_flg =
4147             ECORE_TEST_BIT(ECORE_Q_UPDATE_DEF_VLAN_EN_CHNG,
4148                            &params->update_flags);
4149
4150         /* silent vlan removal */
4151         data->silent_vlan_change_flg =
4152             ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4153                            &params->update_flags);
4154         data->silent_vlan_removal_flg =
4155             ECORE_TEST_BIT(ECORE_Q_UPDATE_SILENT_VLAN_REM,
4156                            &params->update_flags);
4157         data->silent_vlan_value =
4158             ECORE_CPU_TO_LE16(params->silent_removal_value);
4159         data->silent_vlan_mask = ECORE_CPU_TO_LE16(params->silent_removal_mask);
4160
4161         /* tx switching */
4162         data->tx_switching_flg =
4163             ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING, &params->update_flags);
4164         data->tx_switching_change_flg =
4165             ECORE_TEST_BIT(ECORE_Q_UPDATE_TX_SWITCHING_CHNG,
4166                            &params->update_flags);
4167 }
4168
4169 static int ecore_q_send_update(struct bnx2x_softc *sc,
4170                                struct ecore_queue_state_params *params)
4171 {
4172         struct ecore_queue_sp_obj *o = params->q_obj;
4173         struct client_update_ramrod_data *rdata =
4174             (struct client_update_ramrod_data *)o->rdata;
4175         ecore_dma_addr_t data_mapping = o->rdata_mapping;
4176         struct ecore_queue_update_params *update_params =
4177             &params->params.update;
4178         uint8_t cid_index = update_params->cid_index;
4179
4180         if (cid_index >= o->max_cos) {
4181                 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4182                             o->cl_id, cid_index);
4183                 return ECORE_INVAL;
4184         }
4185
4186         /* Clear the ramrod data */
4187         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
4188
4189         /* Fill the ramrod data */
4190         ecore_q_fill_update_data(o, update_params, rdata);
4191
4192         /* No need for an explicit memory barrier here as long we would
4193          * need to ensure the ordering of writing to the SPQ element
4194          * and updating of the SPQ producer which involves a memory
4195          * read and we will have to put a full memory barrier there
4196          * (inside ecore_sp_post()).
4197          */
4198
4199         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4200                              o->cids[cid_index], data_mapping,
4201                              ETH_CONNECTION_TYPE);
4202 }
4203
4204 /**
4205  * ecore_q_send_deactivate - send DEACTIVATE command
4206  *
4207  * @sc:         device handle
4208  * @params:
4209  *
4210  * implemented using the UPDATE command.
4211  */
4212 static int ecore_q_send_deactivate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4213                                    *params)
4214 {
4215         struct ecore_queue_update_params *update = &params->params.update;
4216
4217         ECORE_MEMSET(update, 0, sizeof(*update));
4218
4219         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4220
4221         return ecore_q_send_update(sc, params);
4222 }
4223
4224 /**
4225  * ecore_q_send_activate - send ACTIVATE command
4226  *
4227  * @sc:         device handle
4228  * @params:
4229  *
4230  * implemented using the UPDATE command.
4231  */
4232 static int ecore_q_send_activate(struct bnx2x_softc *sc,
4233                                  struct ecore_queue_state_params *params)
4234 {
4235         struct ecore_queue_update_params *update = &params->params.update;
4236
4237         ECORE_MEMSET(update, 0, sizeof(*update));
4238
4239         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE, &update->update_flags);
4240         ECORE_SET_BIT_NA(ECORE_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4241
4242         return ecore_q_send_update(sc, params);
4243 }
4244
4245 static int ecore_q_send_update_tpa(__rte_unused struct bnx2x_softc *sc,
4246                                    __rte_unused struct
4247                                    ecore_queue_state_params *params)
4248 {
4249         /* Not implemented yet. */
4250         return -1;
4251 }
4252
4253 static int ecore_q_send_halt(struct bnx2x_softc *sc,
4254                              struct ecore_queue_state_params *params)
4255 {
4256         struct ecore_queue_sp_obj *o = params->q_obj;
4257
4258         /* build eth_halt_ramrod_data.client_id in a big-endian friendly way */
4259         ecore_dma_addr_t data_mapping = 0;
4260         data_mapping = (ecore_dma_addr_t) o->cl_id;
4261
4262         return ecore_sp_post(sc,
4263                              RAMROD_CMD_ID_ETH_HALT,
4264                              o->cids[ECORE_PRIMARY_CID_INDEX],
4265                              data_mapping, ETH_CONNECTION_TYPE);
4266 }
4267
4268 static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
4269                                 struct ecore_queue_state_params *params)
4270 {
4271         struct ecore_queue_sp_obj *o = params->q_obj;
4272         uint8_t cid_idx = params->params.cfc_del.cid_index;
4273
4274         if (cid_idx >= o->max_cos) {
4275                 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4276                             o->cl_id, cid_idx);
4277                 return ECORE_INVAL;
4278         }
4279
4280         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_CFC_DEL,
4281                              o->cids[cid_idx], 0, NONE_CONNECTION_TYPE);
4282 }
4283
4284 static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_state_params
4285                                   *params)
4286 {
4287         struct ecore_queue_sp_obj *o = params->q_obj;
4288         uint8_t cid_index = params->params.terminate.cid_index;
4289
4290         if (cid_index >= o->max_cos) {
4291                 PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
4292                             o->cl_id, cid_index);
4293                 return ECORE_INVAL;
4294         }
4295
4296         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_TERMINATE,
4297                              o->cids[cid_index], 0, ETH_CONNECTION_TYPE);
4298 }
4299
4300 static int ecore_q_send_empty(struct bnx2x_softc *sc,
4301                               struct ecore_queue_state_params *params)
4302 {
4303         struct ecore_queue_sp_obj *o = params->q_obj;
4304
4305         return ecore_sp_post(sc, RAMROD_CMD_ID_ETH_EMPTY,
4306                              o->cids[ECORE_PRIMARY_CID_INDEX], 0,
4307                              ETH_CONNECTION_TYPE);
4308 }
4309
4310 static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_state_params
4311                                     *params)
4312 {
4313         switch (params->cmd) {
4314         case ECORE_Q_CMD_INIT:
4315                 return ecore_q_init(sc, params);
4316         case ECORE_Q_CMD_SETUP_TX_ONLY:
4317                 return ecore_q_send_setup_tx_only(sc, params);
4318         case ECORE_Q_CMD_DEACTIVATE:
4319                 return ecore_q_send_deactivate(sc, params);
4320         case ECORE_Q_CMD_ACTIVATE:
4321                 return ecore_q_send_activate(sc, params);
4322         case ECORE_Q_CMD_UPDATE:
4323                 return ecore_q_send_update(sc, params);
4324         case ECORE_Q_CMD_UPDATE_TPA:
4325                 return ecore_q_send_update_tpa(sc, params);
4326         case ECORE_Q_CMD_HALT:
4327                 return ecore_q_send_halt(sc, params);
4328         case ECORE_Q_CMD_CFC_DEL:
4329                 return ecore_q_send_cfc_del(sc, params);
4330         case ECORE_Q_CMD_TERMINATE:
4331                 return ecore_q_send_terminate(sc, params);
4332         case ECORE_Q_CMD_EMPTY:
4333                 return ecore_q_send_empty(sc, params);
4334         default:
4335                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4336                 return ECORE_INVAL;
4337         }
4338 }
4339
4340 static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
4341                                     struct ecore_queue_state_params *params)
4342 {
4343         switch (params->cmd) {
4344         case ECORE_Q_CMD_SETUP:
4345                 return ecore_q_send_setup_e1x(sc, params);
4346         case ECORE_Q_CMD_INIT:
4347         case ECORE_Q_CMD_SETUP_TX_ONLY:
4348         case ECORE_Q_CMD_DEACTIVATE:
4349         case ECORE_Q_CMD_ACTIVATE:
4350         case ECORE_Q_CMD_UPDATE:
4351         case ECORE_Q_CMD_UPDATE_TPA:
4352         case ECORE_Q_CMD_HALT:
4353         case ECORE_Q_CMD_CFC_DEL:
4354         case ECORE_Q_CMD_TERMINATE:
4355         case ECORE_Q_CMD_EMPTY:
4356                 return ecore_queue_send_cmd_cmn(sc, params);
4357         default:
4358                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4359                 return ECORE_INVAL;
4360         }
4361 }
4362
4363 static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
4364                                    struct ecore_queue_state_params *params)
4365 {
4366         switch (params->cmd) {
4367         case ECORE_Q_CMD_SETUP:
4368                 return ecore_q_send_setup_e2(sc, params);
4369         case ECORE_Q_CMD_INIT:
4370         case ECORE_Q_CMD_SETUP_TX_ONLY:
4371         case ECORE_Q_CMD_DEACTIVATE:
4372         case ECORE_Q_CMD_ACTIVATE:
4373         case ECORE_Q_CMD_UPDATE:
4374         case ECORE_Q_CMD_UPDATE_TPA:
4375         case ECORE_Q_CMD_HALT:
4376         case ECORE_Q_CMD_CFC_DEL:
4377         case ECORE_Q_CMD_TERMINATE:
4378         case ECORE_Q_CMD_EMPTY:
4379                 return ecore_queue_send_cmd_cmn(sc, params);
4380         default:
4381                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
4382                 return ECORE_INVAL;
4383         }
4384 }
4385
4386 /**
4387  * ecore_queue_chk_transition - check state machine of a regular Queue
4388  *
4389  * @sc:         device handle
4390  * @o:
4391  * @params:
4392  *
4393  * (not Forwarding)
4394  * It both checks if the requested command is legal in a current
4395  * state and, if it's legal, sets a `next_state' in the object
4396  * that will be used in the completion flow to set the `state'
4397  * of the object.
4398  *
4399  * returns 0 if a requested command is a legal transition,
4400  *         ECORE_INVAL otherwise.
4401  */
4402 static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
4403                                       struct ecore_queue_sp_obj *o,
4404                                       struct ecore_queue_state_params *params)
4405 {
4406         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4407         enum ecore_queue_cmd cmd = params->cmd;
4408         struct ecore_queue_update_params *update_params =
4409             &params->params.update;
4410         uint8_t next_tx_only = o->num_tx_only;
4411
4412         /* Forget all pending for completion commands if a driver only state
4413          * transition has been requested.
4414          */
4415         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4416                 o->pending = 0;
4417                 o->next_state = ECORE_Q_STATE_MAX;
4418         }
4419
4420         /* Don't allow a next state transition if we are in the middle of
4421          * the previous one.
4422          */
4423         if (o->pending) {
4424                 PMD_DRV_LOG(ERR, sc, "Blocking transition since pending was %lx",
4425                             o->pending);
4426                 return ECORE_BUSY;
4427         }
4428
4429         switch (state) {
4430         case ECORE_Q_STATE_RESET:
4431                 if (cmd == ECORE_Q_CMD_INIT)
4432                         next_state = ECORE_Q_STATE_INITIALIZED;
4433
4434                 break;
4435         case ECORE_Q_STATE_INITIALIZED:
4436                 if (cmd == ECORE_Q_CMD_SETUP) {
4437                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4438                                            &params->params.setup.flags))
4439                                 next_state = ECORE_Q_STATE_ACTIVE;
4440                         else
4441                                 next_state = ECORE_Q_STATE_INACTIVE;
4442                 }
4443
4444                 break;
4445         case ECORE_Q_STATE_ACTIVE:
4446                 if (cmd == ECORE_Q_CMD_DEACTIVATE)
4447                         next_state = ECORE_Q_STATE_INACTIVE;
4448
4449                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4450                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4451                         next_state = ECORE_Q_STATE_ACTIVE;
4452
4453                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4454                         next_state = ECORE_Q_STATE_MULTI_COS;
4455                         next_tx_only = 1;
4456                 }
4457
4458                 else if (cmd == ECORE_Q_CMD_HALT)
4459                         next_state = ECORE_Q_STATE_STOPPED;
4460
4461                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4462                         /* If "active" state change is requested, update the
4463                          *  state accordingly.
4464                          */
4465                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4466                                            &update_params->update_flags) &&
4467                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4468                                             &update_params->update_flags))
4469                                 next_state = ECORE_Q_STATE_INACTIVE;
4470                         else
4471                                 next_state = ECORE_Q_STATE_ACTIVE;
4472                 }
4473
4474                 break;
4475         case ECORE_Q_STATE_MULTI_COS:
4476                 if (cmd == ECORE_Q_CMD_TERMINATE)
4477                         next_state = ECORE_Q_STATE_MCOS_TERMINATED;
4478
4479                 else if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4480                         next_state = ECORE_Q_STATE_MULTI_COS;
4481                         next_tx_only = o->num_tx_only + 1;
4482                 }
4483
4484                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4485                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4486                         next_state = ECORE_Q_STATE_MULTI_COS;
4487
4488                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4489                         /* If "active" state change is requested, update the
4490                          *  state accordingly.
4491                          */
4492                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4493                                            &update_params->update_flags) &&
4494                             !ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4495                                             &update_params->update_flags))
4496                                 next_state = ECORE_Q_STATE_INACTIVE;
4497                         else
4498                                 next_state = ECORE_Q_STATE_MULTI_COS;
4499                 }
4500
4501                 break;
4502         case ECORE_Q_STATE_MCOS_TERMINATED:
4503                 if (cmd == ECORE_Q_CMD_CFC_DEL) {
4504                         next_tx_only = o->num_tx_only - 1;
4505                         if (next_tx_only == 0)
4506                                 next_state = ECORE_Q_STATE_ACTIVE;
4507                         else
4508                                 next_state = ECORE_Q_STATE_MULTI_COS;
4509                 }
4510
4511                 break;
4512         case ECORE_Q_STATE_INACTIVE:
4513                 if (cmd == ECORE_Q_CMD_ACTIVATE)
4514                         next_state = ECORE_Q_STATE_ACTIVE;
4515
4516                 else if ((cmd == ECORE_Q_CMD_EMPTY) ||
4517                          (cmd == ECORE_Q_CMD_UPDATE_TPA))
4518                         next_state = ECORE_Q_STATE_INACTIVE;
4519
4520                 else if (cmd == ECORE_Q_CMD_HALT)
4521                         next_state = ECORE_Q_STATE_STOPPED;
4522
4523                 else if (cmd == ECORE_Q_CMD_UPDATE) {
4524                         /* If "active" state change is requested, update the
4525                          * state accordingly.
4526                          */
4527                         if (ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE_CHNG,
4528                                            &update_params->update_flags) &&
4529                             ECORE_TEST_BIT(ECORE_Q_UPDATE_ACTIVATE,
4530                                            &update_params->update_flags)) {
4531                                 if (o->num_tx_only == 0)
4532                                         next_state = ECORE_Q_STATE_ACTIVE;
4533                                 else    /* tx only queues exist for this queue */
4534                                         next_state = ECORE_Q_STATE_MULTI_COS;
4535                         } else
4536                                 next_state = ECORE_Q_STATE_INACTIVE;
4537                 }
4538
4539                 break;
4540         case ECORE_Q_STATE_STOPPED:
4541                 if (cmd == ECORE_Q_CMD_TERMINATE)
4542                         next_state = ECORE_Q_STATE_TERMINATED;
4543
4544                 break;
4545         case ECORE_Q_STATE_TERMINATED:
4546                 if (cmd == ECORE_Q_CMD_CFC_DEL)
4547                         next_state = ECORE_Q_STATE_RESET;
4548
4549                 break;
4550         default:
4551                 PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4552         }
4553
4554         /* Transition is assured */
4555         if (next_state != ECORE_Q_STATE_MAX) {
4556                 ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4557                           state, cmd, next_state);
4558                 o->next_state = next_state;
4559                 o->next_tx_only = next_tx_only;
4560                 return ECORE_SUCCESS;
4561         }
4562
4563         ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4564
4565         return ECORE_INVAL;
4566 }
4567
4568 /**
4569  * ecore_queue_chk_fwd_transition - check state machine of a Forwarding Queue.
4570  *
4571  * @sc:         device handle
4572  * @o:
4573  * @params:
4574  *
4575  * It both checks if the requested command is legal in a current
4576  * state and, if it's legal, sets a `next_state' in the object
4577  * that will be used in the completion flow to set the `state'
4578  * of the object.
4579  *
4580  * returns 0 if a requested command is a legal transition,
4581  *         ECORE_INVAL otherwise.
4582  */
4583 static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
4584                                           struct ecore_queue_sp_obj *o,
4585                                           struct ecore_queue_state_params
4586                                           *params)
4587 {
4588         enum ecore_q_state state = o->state, next_state = ECORE_Q_STATE_MAX;
4589         enum ecore_queue_cmd cmd = params->cmd;
4590
4591         switch (state) {
4592         case ECORE_Q_STATE_RESET:
4593                 if (cmd == ECORE_Q_CMD_INIT)
4594                         next_state = ECORE_Q_STATE_INITIALIZED;
4595
4596                 break;
4597         case ECORE_Q_STATE_INITIALIZED:
4598                 if (cmd == ECORE_Q_CMD_SETUP_TX_ONLY) {
4599                         if (ECORE_TEST_BIT(ECORE_Q_FLG_ACTIVE,
4600                                            &params->params.tx_only.flags))
4601                                 next_state = ECORE_Q_STATE_ACTIVE;
4602                         else
4603                                 next_state = ECORE_Q_STATE_INACTIVE;
4604                 }
4605
4606                 break;
4607         case ECORE_Q_STATE_ACTIVE:
4608         case ECORE_Q_STATE_INACTIVE:
4609                 if (cmd == ECORE_Q_CMD_CFC_DEL)
4610                         next_state = ECORE_Q_STATE_RESET;
4611
4612                 break;
4613         default:
4614                 PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
4615         }
4616
4617         /* Transition is assured */
4618         if (next_state != ECORE_Q_STATE_MAX) {
4619                 ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
4620                           state, cmd, next_state);
4621                 o->next_state = next_state;
4622                 return ECORE_SUCCESS;
4623         }
4624
4625         ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
4626         return ECORE_INVAL;
4627 }
4628
4629 void ecore_init_queue_obj(struct bnx2x_softc *sc,
4630                           struct ecore_queue_sp_obj *obj,
4631                           uint8_t cl_id, uint32_t * cids, uint8_t cid_cnt,
4632                           uint8_t func_id, void *rdata,
4633                           ecore_dma_addr_t rdata_mapping, unsigned long type)
4634 {
4635         ECORE_MEMSET(obj, 0, sizeof(*obj));
4636
4637         /* We support only ECORE_MULTI_TX_COS Tx CoS at the moment */
4638         ECORE_BUG_ON(ECORE_MULTI_TX_COS < cid_cnt);
4639
4640         rte_memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
4641         obj->max_cos = cid_cnt;
4642         obj->cl_id = cl_id;
4643         obj->func_id = func_id;
4644         obj->rdata = rdata;
4645         obj->rdata_mapping = rdata_mapping;
4646         obj->type = type;
4647         obj->next_state = ECORE_Q_STATE_MAX;
4648
4649         if (CHIP_IS_E1x(sc))
4650                 obj->send_cmd = ecore_queue_send_cmd_e1x;
4651         else
4652                 obj->send_cmd = ecore_queue_send_cmd_e2;
4653
4654         if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &type))
4655                 obj->check_transition = ecore_queue_chk_fwd_transition;
4656         else
4657                 obj->check_transition = ecore_queue_chk_transition;
4658
4659         obj->complete_cmd = ecore_queue_comp_cmd;
4660         obj->wait_comp = ecore_queue_wait_comp;
4661         obj->set_pending = ecore_queue_set_pending;
4662 }
4663
4664 /********************** Function state object *********************************/
4665 enum ecore_func_state ecore_func_get_state(__rte_unused struct bnx2x_softc *sc,
4666                                            struct ecore_func_sp_obj *o)
4667 {
4668         /* in the middle of transaction - return INVALID state */
4669         if (o->pending)
4670                 return ECORE_F_STATE_MAX;
4671
4672         /* unsure the order of reading of o->pending and o->state
4673          * o->pending should be read first
4674          */
4675         rmb();
4676
4677         return o->state;
4678 }
4679
4680 static int ecore_func_wait_comp(struct bnx2x_softc *sc,
4681                                 struct ecore_func_sp_obj *o,
4682                                 enum ecore_func_cmd cmd)
4683 {
4684         return ecore_state_wait(sc, cmd, &o->pending);
4685 }
4686
4687 /**
4688  * ecore_func_state_change_comp - complete the state machine transition
4689  *
4690  * @sc:         device handle
4691  * @o:
4692  * @cmd:
4693  *
4694  * Called on state change transition. Completes the state
4695  * machine transition only - no HW interaction.
4696  */
4697 static int
4698 ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
4699                              struct ecore_func_sp_obj *o,
4700                              enum ecore_func_cmd cmd)
4701 {
4702         unsigned long cur_pending = o->pending;
4703
4704         if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
4705                 PMD_DRV_LOG(ERR, sc,
4706                             "Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
4707                             cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
4708                             o->next_state);
4709                 return ECORE_INVAL;
4710         }
4711
4712         ECORE_MSG(sc, "Completing command %d for func %d, setting state to %d",
4713                   cmd, ECORE_FUNC_ID(sc), o->next_state);
4714
4715         o->state = o->next_state;
4716         o->next_state = ECORE_F_STATE_MAX;
4717
4718         /* It's important that o->state and o->next_state are
4719          * updated before o->pending.
4720          */
4721         wmb();
4722
4723         ECORE_CLEAR_BIT(cmd, &o->pending);
4724         ECORE_SMP_MB_AFTER_CLEAR_BIT();
4725
4726         return ECORE_SUCCESS;
4727 }
4728
4729 /**
4730  * ecore_func_comp_cmd - complete the state change command
4731  *
4732  * @sc:         device handle
4733  * @o:
4734  * @cmd:
4735  *
4736  * Checks that the arrived completion is expected.
4737  */
4738 static int ecore_func_comp_cmd(struct bnx2x_softc *sc,
4739                                struct ecore_func_sp_obj *o,
4740                                enum ecore_func_cmd cmd)
4741 {
4742         /* Complete the state machine part first, check if it's a
4743          * legal completion.
4744          */
4745         int rc = ecore_func_state_change_comp(sc, o, cmd);
4746         return rc;
4747 }
4748
4749 /**
4750  * ecore_func_chk_transition - perform function state machine transition
4751  *
4752  * @sc:         device handle
4753  * @o:
4754  * @params:
4755  *
4756  * It both checks if the requested command is legal in a current
4757  * state and, if it's legal, sets a `next_state' in the object
4758  * that will be used in the completion flow to set the `state'
4759  * of the object.
4760  *
4761  * returns 0 if a requested command is a legal transition,
4762  *         ECORE_INVAL otherwise.
4763  */
4764 static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
4765                                      struct ecore_func_sp_obj *o,
4766                                      struct ecore_func_state_params *params)
4767 {
4768         enum ecore_func_state state = o->state, next_state = ECORE_F_STATE_MAX;
4769         enum ecore_func_cmd cmd = params->cmd;
4770
4771         /* Forget all pending for completion commands if a driver only state
4772          * transition has been requested.
4773          */
4774         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4775                 o->pending = 0;
4776                 o->next_state = ECORE_F_STATE_MAX;
4777         }
4778
4779         /* Don't allow a next state transition if we are in the middle of
4780          * the previous one.
4781          */
4782         if (o->pending)
4783                 return ECORE_BUSY;
4784
4785         switch (state) {
4786         case ECORE_F_STATE_RESET:
4787                 if (cmd == ECORE_F_CMD_HW_INIT)
4788                         next_state = ECORE_F_STATE_INITIALIZED;
4789
4790                 break;
4791         case ECORE_F_STATE_INITIALIZED:
4792                 if (cmd == ECORE_F_CMD_START)
4793                         next_state = ECORE_F_STATE_STARTED;
4794
4795                 else if (cmd == ECORE_F_CMD_HW_RESET)
4796                         next_state = ECORE_F_STATE_RESET;
4797
4798                 break;
4799         case ECORE_F_STATE_STARTED:
4800                 if (cmd == ECORE_F_CMD_STOP)
4801                         next_state = ECORE_F_STATE_INITIALIZED;
4802                 /* afex ramrods can be sent only in started mode, and only
4803                  * if not pending for function_stop ramrod completion
4804                  * for these events - next state remained STARTED.
4805                  */
4806                 else if ((cmd == ECORE_F_CMD_AFEX_UPDATE) &&
4807                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4808                         next_state = ECORE_F_STATE_STARTED;
4809
4810                 else if ((cmd == ECORE_F_CMD_AFEX_VIFLISTS) &&
4811                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4812                         next_state = ECORE_F_STATE_STARTED;
4813
4814                 /* Switch_update ramrod can be sent in either started or
4815                  * tx_stopped state, and it doesn't change the state.
4816                  */
4817                 else if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4818                          (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4819                         next_state = ECORE_F_STATE_STARTED;
4820
4821                 else if (cmd == ECORE_F_CMD_TX_STOP)
4822                         next_state = ECORE_F_STATE_TX_STOPPED;
4823
4824                 break;
4825         case ECORE_F_STATE_TX_STOPPED:
4826                 if ((cmd == ECORE_F_CMD_SWITCH_UPDATE) &&
4827                     (!ECORE_TEST_BIT(ECORE_F_CMD_STOP, &o->pending)))
4828                         next_state = ECORE_F_STATE_TX_STOPPED;
4829
4830                 else if (cmd == ECORE_F_CMD_TX_START)
4831                         next_state = ECORE_F_STATE_STARTED;
4832
4833                 break;
4834         default:
4835                 PMD_DRV_LOG(ERR, sc, "Unknown state: %d", state);
4836         }
4837
4838         /* Transition is assured */
4839         if (next_state != ECORE_F_STATE_MAX) {
4840                 ECORE_MSG(sc, "Good function state transition: %d(%d)->%d",
4841                           state, cmd, next_state);
4842                 o->next_state = next_state;
4843                 return ECORE_SUCCESS;
4844         }
4845
4846         ECORE_MSG(sc,
4847                   "Bad function state transition request: %d %d", state, cmd);
4848
4849         return ECORE_INVAL;
4850 }
4851
4852 /**
4853  * ecore_func_init_func - performs HW init at function stage
4854  *
4855  * @sc:         device handle
4856  * @drv:
4857  *
4858  * Init HW when the current phase is
4859  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4860  * HW blocks.
4861  */
4862 static int ecore_func_init_func(struct bnx2x_softc *sc,
4863                                 const struct ecore_func_sp_drv_ops *drv)
4864 {
4865         return drv->init_hw_func(sc);
4866 }
4867
4868 /**
4869  * ecore_func_init_port - performs HW init at port stage
4870  *
4871  * @sc:         device handle
4872  * @drv:
4873  *
4874  * Init HW when the current phase is
4875  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4876  * FUNCTION-only HW blocks.
4877  *
4878  */
4879 static int ecore_func_init_port(struct bnx2x_softc *sc,
4880                                 const struct ecore_func_sp_drv_ops *drv)
4881 {
4882         int rc = drv->init_hw_port(sc);
4883         if (rc)
4884                 return rc;
4885
4886         return ecore_func_init_func(sc, drv);
4887 }
4888
4889 /**
4890  * ecore_func_init_cmn_chip - performs HW init at chip-common stage
4891  *
4892  * @sc:         device handle
4893  * @drv:
4894  *
4895  * Init HW when the current phase is
4896  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
4897  * PORT-only and FUNCTION-only HW blocks.
4898  */
4899 static int ecore_func_init_cmn_chip(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4900                                     *drv)
4901 {
4902         int rc = drv->init_hw_cmn_chip(sc);
4903         if (rc)
4904                 return rc;
4905
4906         return ecore_func_init_port(sc, drv);
4907 }
4908
4909 /**
4910  * ecore_func_init_cmn - performs HW init at common stage
4911  *
4912  * @sc:         device handle
4913  * @drv:
4914  *
4915  * Init HW when the current phase is
4916  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
4917  * PORT-only and FUNCTION-only HW blocks.
4918  */
4919 static int ecore_func_init_cmn(struct bnx2x_softc *sc,
4920                                const struct ecore_func_sp_drv_ops *drv)
4921 {
4922         int rc = drv->init_hw_cmn(sc);
4923         if (rc)
4924                 return rc;
4925
4926         return ecore_func_init_port(sc, drv);
4927 }
4928
4929 static int ecore_func_hw_init(struct bnx2x_softc *sc,
4930                               struct ecore_func_state_params *params)
4931 {
4932         uint32_t load_code = params->params.hw_init.load_phase;
4933         struct ecore_func_sp_obj *o = params->f_obj;
4934         const struct ecore_func_sp_drv_ops *drv = o->drv;
4935         int rc = 0;
4936
4937         ECORE_MSG(sc, "function %d  load_code %x",
4938                   ECORE_ABS_FUNC_ID(sc), load_code);
4939
4940         /* Prepare FW */
4941         rc = drv->init_fw(sc);
4942         if (rc) {
4943                 PMD_DRV_LOG(ERR, sc, "Error loading firmware");
4944                 goto init_err;
4945         }
4946
4947         /* Handle the beginning of COMMON_XXX pases separately... */
4948         switch (load_code) {
4949         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
4950                 rc = ecore_func_init_cmn_chip(sc, drv);
4951                 if (rc)
4952                         goto init_err;
4953
4954                 break;
4955         case FW_MSG_CODE_DRV_LOAD_COMMON:
4956                 rc = ecore_func_init_cmn(sc, drv);
4957                 if (rc)
4958                         goto init_err;
4959
4960                 break;
4961         case FW_MSG_CODE_DRV_LOAD_PORT:
4962                 rc = ecore_func_init_port(sc, drv);
4963                 if (rc)
4964                         goto init_err;
4965
4966                 break;
4967         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4968                 rc = ecore_func_init_func(sc, drv);
4969                 if (rc)
4970                         goto init_err;
4971
4972                 break;
4973         default:
4974                 PMD_DRV_LOG(ERR, sc, "Unknown load_code (0x%x) from MCP",
4975                             load_code);
4976                 rc = ECORE_INVAL;
4977         }
4978
4979 init_err:
4980         /* In case of success, complete the command immediately: no ramrods
4981          * have been sent.
4982          */
4983         if (!rc)
4984                 o->complete_cmd(sc, o, ECORE_F_CMD_HW_INIT);
4985
4986         return rc;
4987 }
4988
4989 /**
4990  * ecore_func_reset_func - reset HW at function stage
4991  *
4992  * @sc:         device handle
4993  * @drv:
4994  *
4995  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
4996  * FUNCTION-only HW blocks.
4997  */
4998 static void ecore_func_reset_func(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
4999                                   *drv)
5000 {
5001         drv->reset_hw_func(sc);
5002 }
5003
5004 /**
5005  * ecore_func_reset_port - reser HW at port stage
5006  *
5007  * @sc:         device handle
5008  * @drv:
5009  *
5010  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5011  * FUNCTION-only and PORT-only HW blocks.
5012  *
5013  *                 !!!IMPORTANT!!!
5014  *
5015  * It's important to call reset_port before reset_func() as the last thing
5016  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5017  * makes impossible any DMAE transactions.
5018  */
5019 static void ecore_func_reset_port(struct bnx2x_softc *sc, const struct ecore_func_sp_drv_ops
5020                                   *drv)
5021 {
5022         drv->reset_hw_port(sc);
5023         ecore_func_reset_func(sc, drv);
5024 }
5025
5026 /**
5027  * ecore_func_reset_cmn - reser HW at common stage
5028  *
5029  * @sc:         device handle
5030  * @drv:
5031  *
5032  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5033  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5034  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5035  */
5036 static void ecore_func_reset_cmn(struct bnx2x_softc *sc,
5037                                  const struct ecore_func_sp_drv_ops *drv)
5038 {
5039         ecore_func_reset_port(sc, drv);
5040         drv->reset_hw_cmn(sc);
5041 }
5042
5043 static int ecore_func_hw_reset(struct bnx2x_softc *sc,
5044                                struct ecore_func_state_params *params)
5045 {
5046         uint32_t reset_phase = params->params.hw_reset.reset_phase;
5047         struct ecore_func_sp_obj *o = params->f_obj;
5048         const struct ecore_func_sp_drv_ops *drv = o->drv;
5049
5050         ECORE_MSG(sc, "function %d  reset_phase %x", ECORE_ABS_FUNC_ID(sc),
5051                   reset_phase);
5052
5053         switch (reset_phase) {
5054         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5055                 ecore_func_reset_cmn(sc, drv);
5056                 break;
5057         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5058                 ecore_func_reset_port(sc, drv);
5059                 break;
5060         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5061                 ecore_func_reset_func(sc, drv);
5062                 break;
5063         default:
5064                 PMD_DRV_LOG(ERR, sc, "Unknown reset_phase (0x%x) from MCP",
5065                             reset_phase);
5066                 break;
5067         }
5068
5069         /* Complete the command immediately: no ramrods have been sent. */
5070         o->complete_cmd(sc, o, ECORE_F_CMD_HW_RESET);
5071
5072         return ECORE_SUCCESS;
5073 }
5074
5075 static int ecore_func_send_start(struct bnx2x_softc *sc,
5076                                  struct ecore_func_state_params *params)
5077 {
5078         struct ecore_func_sp_obj *o = params->f_obj;
5079         struct function_start_data *rdata =
5080             (struct function_start_data *)o->rdata;
5081         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5082         struct ecore_func_start_params *start_params = &params->params.start;
5083
5084         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5085
5086         /* Fill the ramrod data with provided parameters */
5087         rdata->function_mode = (uint8_t) start_params->mf_mode;
5088         rdata->sd_vlan_tag = ECORE_CPU_TO_LE16(start_params->sd_vlan_tag);
5089         rdata->path_id = ECORE_PATH_ID(sc);
5090         rdata->network_cos_mode = start_params->network_cos_mode;
5091         rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5092         rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5093
5094         /*
5095          *  No need for an explicit memory barrier here as long we would
5096          *  need to ensure the ordering of writing to the SPQ element
5097          *  and updating of the SPQ producer which involves a memory
5098          *  read and we will have to put a full memory barrier there
5099          *  (inside ecore_sp_post()).
5100          */
5101
5102         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5103                              data_mapping, NONE_CONNECTION_TYPE);
5104 }
5105
5106 static int ecore_func_send_switch_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5107                                          *params)
5108 {
5109         struct ecore_func_sp_obj *o = params->f_obj;
5110         struct function_update_data *rdata =
5111             (struct function_update_data *)o->rdata;
5112         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5113         struct ecore_func_switch_update_params *switch_update_params =
5114             &params->params.switch_update;
5115
5116         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5117
5118         /* Fill the ramrod data with provided parameters */
5119         rdata->tx_switch_suspend_change_flg = 1;
5120         rdata->tx_switch_suspend = switch_update_params->suspend;
5121         rdata->echo = SWITCH_UPDATE;
5122
5123         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5124                              data_mapping, NONE_CONNECTION_TYPE);
5125 }
5126
5127 static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func_state_params
5128                                        *params)
5129 {
5130         struct ecore_func_sp_obj *o = params->f_obj;
5131         struct function_update_data *rdata =
5132             (struct function_update_data *)o->afex_rdata;
5133         ecore_dma_addr_t data_mapping = o->afex_rdata_mapping;
5134         struct ecore_func_afex_update_params *afex_update_params =
5135             &params->params.afex_update;
5136
5137         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5138
5139         /* Fill the ramrod data with provided parameters */
5140         rdata->vif_id_change_flg = 1;
5141         rdata->vif_id = ECORE_CPU_TO_LE16(afex_update_params->vif_id);
5142         rdata->afex_default_vlan_change_flg = 1;
5143         rdata->afex_default_vlan =
5144             ECORE_CPU_TO_LE16(afex_update_params->afex_default_vlan);
5145         rdata->allowed_priorities_change_flg = 1;
5146         rdata->allowed_priorities = afex_update_params->allowed_priorities;
5147         rdata->echo = AFEX_UPDATE;
5148
5149         /*  No need for an explicit memory barrier here as long we would
5150          *  need to ensure the ordering of writing to the SPQ element
5151          *  and updating of the SPQ producer which involves a memory
5152          *  read and we will have to put a full memory barrier there
5153          *  (inside ecore_sp_post()).
5154          */
5155         ECORE_MSG(sc, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
5156                   rdata->vif_id,
5157                   rdata->afex_default_vlan, rdata->allowed_priorities);
5158
5159         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5160                              data_mapping, NONE_CONNECTION_TYPE);
5161 }
5162
5163 static
5164 inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
5165                                          struct ecore_func_state_params *params)
5166 {
5167         struct ecore_func_sp_obj *o = params->f_obj;
5168         struct afex_vif_list_ramrod_data *rdata =
5169             (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5170         struct ecore_func_afex_viflists_params *afex_vif_params =
5171             &params->params.afex_viflists;
5172         uint64_t *p_rdata = (uint64_t *) rdata;
5173
5174         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5175
5176         /* Fill the ramrod data with provided parameters */
5177         rdata->vif_list_index =
5178             ECORE_CPU_TO_LE16(afex_vif_params->vif_list_index);
5179         rdata->func_bit_map = afex_vif_params->func_bit_map;
5180         rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
5181         rdata->func_to_clear = afex_vif_params->func_to_clear;
5182
5183         /* send in echo type of sub command */
5184         rdata->echo = afex_vif_params->afex_vif_list_command;
5185
5186         /*  No need for an explicit memory barrier here as long we would
5187          *  need to ensure the ordering of writing to the SPQ element
5188          *  and updating of the SPQ producer which involves a memory
5189          *  read and we will have to put a full memory barrier there
5190          *  (inside ecore_sp_post()).
5191          */
5192
5193             ECORE_MSG
5194             (sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
5195              rdata->afex_vif_list_command, rdata->vif_list_index,
5196              rdata->func_bit_map, rdata->func_to_clear);
5197
5198         /* this ramrod sends data directly and not through DMA mapping */
5199         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5200                              *p_rdata, NONE_CONNECTION_TYPE);
5201 }
5202
5203 static int ecore_func_send_stop(struct bnx2x_softc *sc, __rte_unused struct
5204                                 ecore_func_state_params *params)
5205 {
5206         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0,
5207                              NONE_CONNECTION_TYPE);
5208 }
5209
5210 static int ecore_func_send_tx_stop(struct bnx2x_softc *sc, __rte_unused struct
5211                                    ecore_func_state_params *params)
5212 {
5213         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0,
5214                              NONE_CONNECTION_TYPE);
5215 }
5216
5217 static int ecore_func_send_tx_start(struct bnx2x_softc *sc, struct ecore_func_state_params
5218                                     *params)
5219 {
5220         struct ecore_func_sp_obj *o = params->f_obj;
5221         struct flow_control_configuration *rdata =
5222             (struct flow_control_configuration *)o->rdata;
5223         ecore_dma_addr_t data_mapping = o->rdata_mapping;
5224         struct ecore_func_tx_start_params *tx_start_params =
5225             &params->params.tx_start;
5226         uint32_t i;
5227
5228         ECORE_MEMSET(rdata, 0, sizeof(*rdata));
5229
5230         rdata->dcb_enabled = tx_start_params->dcb_enabled;
5231         rdata->dcb_version = tx_start_params->dcb_version;
5232         rdata->dont_add_pri_0 = tx_start_params->dont_add_pri_0;
5233
5234         for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5235                 rdata->traffic_type_to_priority_cos[i] =
5236                     tx_start_params->traffic_type_to_priority_cos[i];
5237
5238         return ecore_sp_post(sc, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5239                              data_mapping, NONE_CONNECTION_TYPE);
5240 }
5241
5242 static int ecore_func_send_cmd(struct bnx2x_softc *sc,
5243                                struct ecore_func_state_params *params)
5244 {
5245         switch (params->cmd) {
5246         case ECORE_F_CMD_HW_INIT:
5247                 return ecore_func_hw_init(sc, params);
5248         case ECORE_F_CMD_START:
5249                 return ecore_func_send_start(sc, params);
5250         case ECORE_F_CMD_STOP:
5251                 return ecore_func_send_stop(sc, params);
5252         case ECORE_F_CMD_HW_RESET:
5253                 return ecore_func_hw_reset(sc, params);
5254         case ECORE_F_CMD_AFEX_UPDATE:
5255                 return ecore_func_send_afex_update(sc, params);
5256         case ECORE_F_CMD_AFEX_VIFLISTS:
5257                 return ecore_func_send_afex_viflists(sc, params);
5258         case ECORE_F_CMD_TX_STOP:
5259                 return ecore_func_send_tx_stop(sc, params);
5260         case ECORE_F_CMD_TX_START:
5261                 return ecore_func_send_tx_start(sc, params);
5262         case ECORE_F_CMD_SWITCH_UPDATE:
5263                 return ecore_func_send_switch_update(sc, params);
5264         default:
5265                 PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
5266                 return ECORE_INVAL;
5267         }
5268 }
5269
5270 void ecore_init_func_obj(__rte_unused struct bnx2x_softc *sc,
5271                          struct ecore_func_sp_obj *obj,
5272                          void *rdata, ecore_dma_addr_t rdata_mapping,
5273                          void *afex_rdata, ecore_dma_addr_t afex_rdata_mapping,
5274                          struct ecore_func_sp_drv_ops *drv_iface)
5275 {
5276         ECORE_MEMSET(obj, 0, sizeof(*obj));
5277
5278         ECORE_MUTEX_INIT(&obj->one_pending_mutex);
5279
5280         obj->rdata = rdata;
5281         obj->rdata_mapping = rdata_mapping;
5282         obj->afex_rdata = afex_rdata;
5283         obj->afex_rdata_mapping = afex_rdata_mapping;
5284         obj->send_cmd = ecore_func_send_cmd;
5285         obj->check_transition = ecore_func_chk_transition;
5286         obj->complete_cmd = ecore_func_comp_cmd;
5287         obj->wait_comp = ecore_func_wait_comp;
5288         obj->drv = drv_iface;
5289 }
5290
5291 /**
5292  * ecore_func_state_change - perform Function state change transition
5293  *
5294  * @sc:         device handle
5295  * @params:     parameters to perform the transaction
5296  *
5297  * returns 0 in case of successfully completed transition,
5298  *         negative error code in case of failure, positive
5299  *         (EBUSY) value if there is a completion to that is
5300  *         still pending (possible only if RAMROD_COMP_WAIT is
5301  *         not set in params->ramrod_flags for asynchronous
5302  *         commands).
5303  */
5304 int ecore_func_state_change(struct bnx2x_softc *sc,
5305                             struct ecore_func_state_params *params)
5306 {
5307         struct ecore_func_sp_obj *o = params->f_obj;
5308         int rc, cnt = 300;
5309         enum ecore_func_cmd cmd = params->cmd;
5310         unsigned long *pending = &o->pending;
5311
5312         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5313
5314         /* Check that the requested transition is legal */
5315         rc = o->check_transition(sc, o, params);
5316         if ((rc == ECORE_BUSY) &&
5317             (ECORE_TEST_BIT(RAMROD_RETRY, &params->ramrod_flags))) {
5318                 while ((rc == ECORE_BUSY) && (--cnt > 0)) {
5319                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5320                         ECORE_MSLEEP(10);
5321                         ECORE_MUTEX_LOCK(&o->one_pending_mutex);
5322                         rc = o->check_transition(sc, o, params);
5323                 }
5324                 if (rc == ECORE_BUSY) {
5325                         ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5326                         PMD_DRV_LOG(ERR, sc,
5327                                     "timeout waiting for previous ramrod completion");
5328                         return rc;
5329                 }
5330         } else if (rc) {
5331                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5332                 return rc;
5333         }
5334
5335         /* Set "pending" bit */
5336         ECORE_SET_BIT(cmd, pending);
5337
5338         /* Don't send a command if only driver cleanup was requested */
5339         if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5340                 ecore_func_state_change_comp(sc, o, cmd);
5341                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5342         } else {
5343                 /* Send a ramrod */
5344                 rc = o->send_cmd(sc, params);
5345
5346                 ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
5347
5348                 if (rc) {
5349                         o->next_state = ECORE_F_STATE_MAX;
5350                         ECORE_CLEAR_BIT(cmd, pending);
5351                         ECORE_SMP_MB_AFTER_CLEAR_BIT();
5352                         return rc;
5353                 }
5354
5355                 if (ECORE_TEST_BIT(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5356                         rc = o->wait_comp(sc, o, cmd);
5357                         if (rc)
5358                                 return rc;
5359
5360                         return ECORE_SUCCESS;
5361                 }
5362         }
5363
5364         return ECORE_RET_PENDING(cmd, pending);
5365 }
5366
5367 /******************************************************************************
5368  * Description:
5369  *         Calculates crc 8 on a word value: polynomial 0-1-2-8
5370  *         Code was translated from Verilog.
5371  * Return:
5372  *****************************************************************************/
5373 uint8_t ecore_calc_crc8(uint32_t data, uint8_t crc)
5374 {
5375         uint8_t D[32];
5376         uint8_t NewCRC[8];
5377         uint8_t C[8];
5378         uint8_t crc_res;
5379         uint8_t i;
5380
5381         /* split the data into 31 bits */
5382         for (i = 0; i < 32; i++) {
5383                 D[i] = (uint8_t) (data & 1);
5384                 data = data >> 1;
5385         }
5386
5387         /* split the crc into 8 bits */
5388         for (i = 0; i < 8; i++) {
5389                 C[i] = crc & 1;
5390                 crc = crc >> 1;
5391         }
5392
5393         NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
5394             D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
5395             C[6] ^ C[7];
5396         NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
5397             D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
5398             D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^ C[6];
5399         NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
5400             D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
5401             C[0] ^ C[1] ^ C[4] ^ C[5];
5402         NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
5403             D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
5404             C[1] ^ C[2] ^ C[5] ^ C[6];
5405         NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
5406             D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
5407             C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
5408         NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
5409             D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
5410             C[3] ^ C[4] ^ C[7];
5411         NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
5412             D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^ C[5];
5413         NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
5414             D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^ C[6];
5415
5416         crc_res = 0;
5417         for (i = 0; i < 8; i++) {
5418                 crc_res |= (NewCRC[i] << i);
5419         }
5420
5421         return crc_res;
5422 }
5423
5424 uint32_t
5425 ecore_calc_crc32(uint32_t crc, uint8_t const *p, uint32_t len, uint32_t magic)
5426 {
5427         int i;
5428         while (len--) {
5429                 crc ^= *p++;
5430                 for (i = 0; i < 8; i++)
5431                         crc = (crc >> 1) ^ ((crc & 1) ? magic : 0);
5432         }
5433         return crc;
5434 }