New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / net / qede / base / ecore_l2_api.h
index b41dd7f..004fb61 100644 (file)
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
  * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
  */
 
 #ifndef __ECORE_L2_API_H__
 
 #include "ecore_status.h"
 #include "ecore_sp_api.h"
+#include "ecore_int_api.h"
 
 #ifndef __EXTRACT__LINUX__
 enum ecore_rss_caps {
-       ECORE_RSS_IPV4 = 0x1,
-       ECORE_RSS_IPV6 = 0x2,
-       ECORE_RSS_IPV4_TCP = 0x4,
-       ECORE_RSS_IPV6_TCP = 0x8,
-       ECORE_RSS_IPV4_UDP = 0x10,
-       ECORE_RSS_IPV6_UDP = 0x20,
+       ECORE_RSS_IPV4          = 0x1,
+       ECORE_RSS_IPV6          = 0x2,
+       ECORE_RSS_IPV4_TCP      = 0x4,
+       ECORE_RSS_IPV6_TCP      = 0x8,
+       ECORE_RSS_IPV4_UDP      = 0x10,
+       ECORE_RSS_IPV6_UDP      = 0x20,
 };
 
 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
 #define ECORE_RSS_IND_TABLE_SIZE 128
-#define ECORE_RSS_KEY_SIZE 10  /* size in 32b chunks */
+#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
 #endif
 
+struct ecore_queue_start_common_params {
+       /* Should always be relative to entity sending this. */
+       u8 vport_id;
+       u16 queue_id;
+
+       /* Relative, but relevant only for PFs */
+       u8 stats_id;
+
+       struct ecore_sb_info *p_sb;
+       u8 sb_idx;
+};
+
+struct ecore_rxq_start_ret_params {
+       void OSAL_IOMEM *p_prod;
+       void *p_handle;
+};
+
+struct ecore_txq_start_ret_params {
+       void OSAL_IOMEM *p_doorbell;
+       void *p_handle;
+};
+
 struct ecore_rss_params {
        u8 update_rss_config;
        u8 rss_enable;
@@ -35,8 +56,10 @@ struct ecore_rss_params {
        u8 update_rss_ind_table;
        u8 update_rss_key;
        u8 rss_caps;
-       u8 rss_table_size_log;  /* The table size is 2 ^ rss_table_size_log */
-       u16 rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+       u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+
+       /* Indirection table consist of rx queue handles */
+       void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
        u32 rss_key[ECORE_RSS_KEY_SIZE];
 };
 
@@ -63,8 +86,8 @@ enum ecore_filter_opcode {
        ECORE_FILTER_ADD,
        ECORE_FILTER_REMOVE,
        ECORE_FILTER_MOVE,
-       ECORE_FILTER_REPLACE,   /* Delete all MACs and add new one instead */
-       ECORE_FILTER_FLUSH,     /* Removes all filters */
+       ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+       ECORE_FILTER_FLUSH, /* Removes all filters */
 };
 
 enum ecore_filter_ucast_type {
@@ -77,6 +100,7 @@ enum ecore_filter_ucast_type {
        ECORE_FILTER_INNER_MAC_VNI_PAIR,
        ECORE_FILTER_MAC_VNI_PAIR,
        ECORE_FILTER_VNI,
+       ECORE_FILTER_UNUSED, /* @DPDK */
 };
 
 struct ecore_filter_ucast {
@@ -97,7 +121,7 @@ struct ecore_filter_mcast {
        enum ecore_filter_opcode opcode;
        u8 vport_to_add_to;
        u8 vport_to_remove_from;
-       u8 num_mc_addrs;
+       u8      num_mc_addrs;
 #define ECORE_MAX_MC_ADDRS     64
        unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
 };
@@ -113,6 +137,24 @@ struct ecore_filter_accept_flags {
 #define ECORE_ACCEPT_MCAST_MATCHED     0x08
 #define ECORE_ACCEPT_MCAST_UNMATCHED   0x10
 #define ECORE_ACCEPT_BCAST             0x20
+#define ECORE_ACCEPT_ANY_VNI           0x40
+};
+
+enum ecore_filter_config_mode {
+       ECORE_FILTER_CONFIG_MODE_DISABLE,
+       ECORE_FILTER_CONFIG_MODE_5_TUPLE,
+       ECORE_FILTER_CONFIG_MODE_L4_PORT,
+       ECORE_FILTER_CONFIG_MODE_IP_DEST,
+       ECORE_FILTER_CONFIG_MODE_TUNN_TYPE,
+       ECORE_FILTER_CONFIG_MODE_IP_SRC,
+};
+
+struct ecore_arfs_config_params {
+       bool tcp;
+       bool udp;
+       bool ipv4;
+       bool ipv6;
+       enum ecore_filter_config_mode mode;
 };
 
 /* Add / remove / move / remove-all unicast MAC-VLAN filters.
@@ -137,61 +179,47 @@ ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
 
 /* Set "accept" filters */
 enum _ecore_status_t
-ecore_filter_accept_cmd(struct ecore_dev *p_dev,
-                       u8 vport,
-                       struct ecore_filter_accept_flags accept_flags,
-                       u8 update_accept_any_vlan,
-                       u8 accept_any_vlan,
-                       enum spq_mode comp_mode,
-                       struct ecore_spq_comp_cb *p_comp_data);
+ecore_filter_accept_cmd(
+       struct ecore_dev                 *p_dev,
+       u8                               vport,
+       struct ecore_filter_accept_flags accept_flags,
+       u8                               update_accept_any_vlan,
+       u8                               accept_any_vlan,
+       enum spq_mode                    comp_mode,
+       struct ecore_spq_comp_cb         *p_comp_data);
 
 /**
- * @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod
+ * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
  *
  * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
  * the VPort ID is not currently initialized.
  *
  * @param p_hwfn
  * @param opaque_fid
- * @param rx_queue_id          RX Queue ID: Zero based, per VPort, allocated
- *                             by assignment (=rssId)
- * @param vport_id             VPort ID
- * @param u8 stats_id           VPort ID which the queue stats
- *                             will be added to
- * @param sb                   Status Block of the Function Event Ring
- * @param sb_index             Index into the status block of the
- *                     Function Event Ring
+ * @p_params                   Inputs; Relative for PF [SB being an exception]
  * @param bd_max_bytes         Maximum bytes that can be placed on a BD
  * @param bd_chain_phys_addr   Physical address of BDs for receive.
  * @param cqe_pbl_addr         Physical address of the CQE PBL Table.
  * @param cqe_pbl_size         Size of the CQE PBL Table
- * @param pp_prod              Pointer to place producer's
- *                              address for the Rx Q (May be
- *                             NULL).
+ * @param p_ret_params         Pointed struct to be filled with outputs.
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
-                                                u16 opaque_fid,
-                                                u8 rx_queue_id,
-                                                u8 vport_id,
-                                                u8 stats_id,
-                                                u16 sb,
-                                                u8 sb_index,
-                                                u16 bd_max_bytes,
-                                                dma_addr_t bd_chain_phys_addr,
-                                                dma_addr_t cqe_pbl_addr,
-                                                u16 cqe_pbl_size,
-                                                void OSAL_IOMEM * *pp_prod);
+enum _ecore_status_t
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+                        u16 opaque_fid,
+                        struct ecore_queue_start_common_params *p_params,
+                        u16 bd_max_bytes,
+                        dma_addr_t bd_chain_phys_addr,
+                        dma_addr_t cqe_pbl_addr,
+                        u16 cqe_pbl_size,
+                        struct ecore_rxq_start_ret_params *p_ret_params);
 
 /**
- * @brief ecore_sp_eth_rx_queue_stop -
- *
- * This ramrod closes an RX queue. It sends RX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
  *
  * @param p_hwfn
- * @param rx_queue_id          RX Queue ID
+ * @param p_rxq                        Handler of queue to close
  * @param eq_completion_only   If True completion will be on
  *                             EQe, if False completion will be
  *                             on EQe if p_hwfn opaque
@@ -202,60 +230,48 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t
-ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
-                          u16 rx_queue_id,
-                          bool eq_completion_only, bool cqe_completion);
+ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+                       void *p_rxq,
+                       bool eq_completion_only,
+                       bool cqe_completion);
 
 /**
- * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod
+ * @brief - TX Queue Start Ramrod
  *
  * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
  * the VPort is not currently initialized.
  *
  * @param p_hwfn
  * @param opaque_fid
- * @param tx_queue_id          TX Queue ID
- * @param vport_id             VPort ID
- * @param stats_id              VPort ID which the queue stats
- *                             will be added to
- * @param sb                   Status Block of the Function Event Ring
- * @param sb_index             Index into the status block of the Function
- *                             Event Ring
+ * @p_params
+ * @param tc                   traffic class to use with this L2 txq
  * @param pbl_addr             address of the pbl array
  * @param pbl_size             number of entries in pbl
- * @param pp_doorbell          Pointer to place doorbell pointer (May be NULL).
- *                     This address should be used with the
- *                             DIRECT_REG_WR macro.
+ * @param p_ret_params         Pointer to fill the return parameters in.
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
-                                                u16 opaque_fid,
-                                                u16 tx_queue_id,
-                                                u8 vport_id,
-                                                u8 stats_id,
-                                                u16 sb,
-                                                u8 sb_index,
-                                                dma_addr_t pbl_addr,
-                                                u16 pbl_size,
-                                                void OSAL_IOMEM * *
-                                                pp_doorbell);
+enum _ecore_status_t
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+                        u16 opaque_fid,
+                        struct ecore_queue_start_common_params *p_params,
+                        u8 tc,
+                        dma_addr_t pbl_addr,
+                        u16 pbl_size,
+                        struct ecore_txq_start_ret_params *p_ret_params);
 
 /**
- * @brief ecore_sp_eth_tx_queue_stop -
- *
- * This ramrod closes a TX queue. It sends TX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_tx_queue_stop - closes a Tx queue
  *
  * @param p_hwfn
- * @param tx_queue_id          TX Queue ID
+ * @param p_txq - handle to Tx queue needed to be closed
  *
  * @return enum _ecore_status_t
  */
-enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
-                                               u16 tx_queue_id);
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+                                            void *p_txq);
 
-enum ecore_tpa_mode {
+enum ecore_tpa_mode    {
        ECORE_TPA_MODE_NONE,
        ECORE_TPA_MODE_RSC,
        ECORE_TPA_MODE_GRO,
@@ -275,6 +291,17 @@ struct ecore_sp_vport_start_params {
        u8 vport_id;            /* VPORT ID */
        u16 mtu;                /* VPORT MTU */
        bool zero_placement_offset;
+       bool check_mac;
+       bool check_ethtype;
+
+       /* Strict behavior on transmission errors */
+       bool b_err_illegal_vlan_mode;
+       bool b_err_illegal_inband_mode;
+       bool b_err_vlan_insert_with_inband;
+       bool b_err_small_pkt;
+       bool b_err_big_pkt;
+       bool b_err_anti_spoof;
+       bool b_err_ctrl_frame;
 };
 
 /**
@@ -293,30 +320,37 @@ ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
                     struct ecore_sp_vport_start_params *p_params);
 
 struct ecore_sp_vport_update_params {
-       u16 opaque_fid;
-       u8 vport_id;
-       u8 update_vport_active_rx_flg;
-       u8 vport_active_rx_flg;
-       u8 update_vport_active_tx_flg;
-       u8 vport_active_tx_flg;
-       u8 update_inner_vlan_removal_flg;
-       u8 inner_vlan_removal_flg;
-       u8 silent_vlan_removal_flg;
-       u8 update_default_vlan_enable_flg;
-       u8 default_vlan_enable_flg;
-       u8 update_default_vlan_flg;
-       u16 default_vlan;
-       u8 update_tx_switching_flg;
-       u8 tx_switching_flg;
-       u8 update_approx_mcast_flg;
-       u8 update_anti_spoofing_en_flg;
-       u8 anti_spoofing_en;
-       u8 update_accept_any_vlan_flg;
-       u8 accept_any_vlan;
-       unsigned long bins[8];
-       struct ecore_rss_params *rss_params;
+       u16                     opaque_fid;
+       u8                      vport_id;
+       u8                      update_vport_active_rx_flg;
+       u8                      vport_active_rx_flg;
+       u8                      update_vport_active_tx_flg;
+       u8                      vport_active_tx_flg;
+       u8                      update_inner_vlan_removal_flg;
+       u8                      inner_vlan_removal_flg;
+       u8                      silent_vlan_removal_flg;
+       u8                      update_default_vlan_enable_flg;
+       u8                      default_vlan_enable_flg;
+       u8                      update_default_vlan_flg;
+       u16                     default_vlan;
+       u8                      update_tx_switching_flg;
+       u8                      tx_switching_flg;
+       u8                      update_approx_mcast_flg;
+       u8                      update_anti_spoofing_en_flg;
+       u8                      anti_spoofing_en;
+       u8                      update_accept_any_vlan_flg;
+       u8                      accept_any_vlan;
+       u32                     bins[8];
+       struct ecore_rss_params *rss_params;
        struct ecore_filter_accept_flags accept_flags;
        struct ecore_sge_tpa_params *sge_tpa_params;
+       /* MTU change - notice this requires the vport to be disabled.
+        * If non-zero, value would be used.
+        */
+       u16                     mtu;
+       u8                      update_ctl_frame_check;
+       u8                      mac_chk_en;
+       u8                      ethtype_chk_en;
 };
 
 /**
@@ -351,7 +385,8 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
  * @return enum _ecore_status_t
  */
 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
-                                        u16 opaque_fid, u8 vport_id);
+                                        u16 opaque_fid,
+                                        u8 vport_id);
 
 enum _ecore_status_t
 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
@@ -369,19 +404,19 @@ ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
  * @note Final phase API.
  *
  * @param p_hwfn
- * @param rx_queue_id          RX Queue ID
- * @param num_rxqs              Allow to update multiple rx
- *                             queues, from rx_queue_id to
- *                             (rx_queue_id + num_rxqs)
+ * @param pp_rxq_handlers      An array of queue handlers to be updated.
+ * @param num_rxqs              number of queues to update.
  * @param complete_cqe_flg     Post completion to the CQE Ring if set
  * @param complete_event_flg   Post completion to the Event Ring if set
+ * @param comp_mode
+ * @param p_comp_data
  *
  * @return enum _ecore_status_t
  */
 
 enum _ecore_status_t
 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
-                             u16 rx_queue_id,
+                             void **pp_rxq_handlers,
                              u8 num_rxqs,
                              u8 complete_cqe_flg,
                              u8 complete_event_flg,
@@ -398,4 +433,68 @@ void ecore_get_vport_stats(struct ecore_dev *p_dev,
 
 void ecore_reset_vport_stats(struct ecore_dev *p_dev);
 
+/**
+ *@brief ecore_arfs_mode_configure -
+ *
+ *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ *and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ *@param p_hwfn
+ *@param p_ptt
+ *@param p_cfg_params          arfs mode configuration parameters.
+ *
+ */
+void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt,
+                              struct ecore_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - ecore_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb                Used for ECORE_SPQ_MODE_CB,where client would initialize
+ *                     it with cookie and callback function address, if not
+ *                     using this mode then client must pass NULL.
+ * @params p_addr      p_addr is an actual packet header that needs to be
+ *                     filter. It has to mapped with IO to read prior to
+ *                     calling this, [contains 4 tuples- src ip, dest ip,
+ *                     src port, dest port].
+ * @params length      length of p_addr header up to past the transport header.
+ * @params qid         receive packet will be directed to this queue.
+ * @params vport_id
+ * @params b_is_add    flag to add or remove filter.
+ *
+ */
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_spq_comp_cb *p_cb,
+                                 dma_addr_t p_addr, u16 length,
+                                 u16 qid, u8 vport_id,
+                                 bool b_is_add);
+
+/**
+ * @brief - ecore_update_eth_rss_ind_table_entry
+ *
+ * This function being used to update RSS indirection table entry to FW RAM
+ * instead of using the SP vport update ramrod with rss params.
+ *
+ * Notice:
+ * This function supports only one outstanding command per engine. Ecore
+ * clients which use this function should call ecore_mcp_ind_table_lock() prior
+ * to it and ecore_mcp_ind_table_unlock() after it.
+ *
+ * @params p_hwfn
+ * @params vport_id
+ * @params ind_table_index
+ * @params ind_table_value
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
+                                    u8 vport_id,
+                                    u8 ind_table_index,
+                                    u16 ind_table_value);
 #endif