X-Git-Url: https://gerrit.fd.io/r/gitweb?p=deb_dpdk.git;a=blobdiff_plain;f=drivers%2Fnet%2Fmlx4%2Fmlx4.h;h=3aeef87e96068291926e3cb97afe26b0445b1462;hp=c0ade4f1ae3596d0ec647e223802c2b099b9c140;hb=055c52583a2794da8ba1e85a48cce3832372b12f;hpb=f239aed5e674965691846e8ce3f187dd47523689 diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index c0ade4f1..3aeef87e 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h @@ -1,8 +1,8 @@ /*- * BSD LICENSE * - * Copyright 2012-2017 6WIND S.A. - * Copyright 2012-2017 Mellanox. + * Copyright 2012 6WIND S.A. + * Copyright 2012 Mellanox * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -34,29 +34,11 @@ #ifndef RTE_PMD_MLX4_H_ #define RTE_PMD_MLX4_H_ -#include +#include #include -#include +#include -/* - * Runtime logging through RTE_LOG() is enabled when not in debugging mode. - * Intermediate LOG_*() macros add the required end-of-line characters. - */ -#ifndef NDEBUG -#define INFO(...) DEBUG(__VA_ARGS__) -#define WARN(...) DEBUG(__VA_ARGS__) -#define ERROR(...) DEBUG(__VA_ARGS__) -#else -#define LOG__(level, m, ...) \ - RTE_LOG(level, PMD, MLX4_DRIVER_NAME ": " m "%c", __VA_ARGS__) -#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n') -#define INFO(...) LOG_(INFO, __VA_ARGS__) -#define WARN(...) LOG_(WARNING, __VA_ARGS__) -#define ERROR(...) LOG_(ERR, __VA_ARGS__) -#endif - -/* Verbs header. */ -/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +/* Verbs headers do not support -pedantic. */ #ifdef PEDANTIC #pragma GCC diagnostic ignored "-Wpedantic" #endif @@ -65,36 +47,25 @@ #pragma GCC diagnostic error "-Wpedantic" #endif -/* - * Maximum number of simultaneous MAC addresses supported. - * - * According to ConnectX's Programmer Reference Manual: - * The L2 Address Match is implemented by comparing a MAC/VLAN combination - * of 128 MAC addresses and 127 VLAN values, comprising 128x127 possible - * L2 addresses. - */ -#define MLX4_MAX_MAC_ADDRESSES 128 +#include +#include +#include +#include +#include -/* Maximum number of simultaneous VLAN filters supported. See above. */ -#define MLX4_MAX_VLAN_IDS 127 +/** Maximum number of simultaneous MAC addresses. This value is arbitrary. */ +#define MLX4_MAX_MAC_ADDRESSES 128 -/* Request send completion once in every 64 sends, might be less. */ +/** Request send completion once in every 64 sends, might be less. */ #define MLX4_PMD_TX_PER_COMP_REQ 64 -/* Maximum number of physical ports. */ -#define MLX4_PMD_MAX_PHYS_PORTS 2 - -/* Maximum number of Scatter/Gather Elements per Work Request. */ -#ifndef MLX4_PMD_SGE_WR_N -#define MLX4_PMD_SGE_WR_N 4 -#endif - -/* Maximum size for inline data. */ -#ifndef MLX4_PMD_MAX_INLINE +/** Maximum size for inline data. */ #define MLX4_PMD_MAX_INLINE 0 -#endif -/* +/** Fixed RSS hash key size in bytes. Cannot be modified. */ +#define MLX4_RSS_HASH_KEY_SIZE 40 + +/** * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP * from which buffers are to be transmitted will have to be mapped by this * driver to their own Memory Region (MR). This is a slow operation. @@ -105,18 +76,10 @@ #define MLX4_PMD_TX_MP_CACHE 8 #endif -/* - * If defined, only use software counters. The PMD will never ask the hardware - * for these, and many of them won't be available. - */ -#ifndef MLX4_PMD_SOFT_COUNTERS -#define MLX4_PMD_SOFT_COUNTERS 1 -#endif - -/* Alarm timeout. */ -#define MLX4_ALARM_TIMEOUT_US 100000 +/** Interrupt alarm timeout value in microseconds. */ +#define MLX4_INTR_ALARM_TIMEOUT 100000 -/* Port parameter. */ +/** Port parameter. */ #define MLX4_PMD_PORT_KVARG "port" enum { @@ -129,258 +92,92 @@ enum { PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO = 0x1007, }; +/** Driver name reported to lower layers and used in log output. */ #define MLX4_DRIVER_NAME "net_mlx4" -/* Bit-field manipulation. */ -#define BITFIELD_DECLARE(bf, type, size) \ - type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \ - !!((size_t)(size) % (sizeof(type) * CHAR_BIT)))] -#define BITFIELD_DEFINE(bf, type, size) \ - BITFIELD_DECLARE((bf), type, (size)) = { 0 } -#define BITFIELD_SET(bf, b) \ - (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \ - (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \ - ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))) -#define BITFIELD_RESET(bf, b) \ - (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \ - (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \ - ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))) -#define BITFIELD_ISSET(bf, b) \ - (assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \ - !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \ - ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))) - -/* Number of elements in array. */ -#define elemof(a) (sizeof(a) / sizeof((a)[0])) - -/* Cast pointer p to structure member m to its parent structure of type t. */ -#define containerof(p, t, m) ((t *)((uint8_t *)(p) - offsetof(t, m))) - -/* Branch prediction helpers. */ -#ifndef likely -#define likely(c) __builtin_expect(!!(c), 1) -#endif -#ifndef unlikely -#define unlikely(c) __builtin_expect(!!(c), 0) -#endif - -/* Debugging */ -#ifndef NDEBUG -#include -#define DEBUG__(m, ...) \ - (fprintf(stderr, "%s:%d: %s(): " m "%c", \ - __FILE__, __LINE__, __func__, __VA_ARGS__), \ - fflush(stderr), \ - (void)0) -/* - * Save/restore errno around DEBUG__(). - * XXX somewhat undefined behavior, but works. - */ -#define DEBUG_(...) \ - (errno = ((int []){ \ - *(volatile int *)&errno, \ - (DEBUG__(__VA_ARGS__), 0) \ - })[0]) -#define DEBUG(...) DEBUG_(__VA_ARGS__, '\n') -#ifndef MLX4_PMD_DEBUG_BROKEN_VERBS -#define claim_zero(...) assert((__VA_ARGS__) == 0) -#else /* MLX4_PMD_DEBUG_BROKEN_VERBS */ -#define claim_zero(...) \ - (void)(((__VA_ARGS__) == 0) || \ - DEBUG("Assertion `(" # __VA_ARGS__ ") == 0' failed (IGNORED).")) -#endif /* MLX4_PMD_DEBUG_BROKEN_VERBS */ -#define claim_nonzero(...) assert((__VA_ARGS__) != 0) -#define claim_positive(...) assert((__VA_ARGS__) >= 0) -#else /* NDEBUG */ -/* No-ops. */ -#define DEBUG(...) (void)0 -#define claim_zero(...) (__VA_ARGS__) -#define claim_nonzero(...) (__VA_ARGS__) -#define claim_positive(...) (__VA_ARGS__) -#endif /* NDEBUG */ - -struct mlx4_rxq_stats { - unsigned int idx; /**< Mapping index. */ -#ifdef MLX4_PMD_SOFT_COUNTERS - uint64_t ipackets; /**< Total of successfully received packets. */ - uint64_t ibytes; /**< Total of successfully received bytes. */ -#endif - uint64_t idropped; /**< Total of packets dropped when RX ring full. */ - uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */ -}; - -/* RX element (scattered packets). */ -struct rxq_elt_sp { - struct ibv_recv_wr wr; /* Work Request. */ - struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */ - struct rte_mbuf *bufs[MLX4_PMD_SGE_WR_N]; /* SGEs buffers. */ -}; - -/* RX element. */ -struct rxq_elt { - struct ibv_recv_wr wr; /* Work Request. */ - struct ibv_sge sge; /* Scatter/Gather Element. */ - /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */ -}; - -/* RX queue descriptor. */ -struct rxq { - LIST_ENTRY(rxq) next; /* Used by parent queue only */ - struct priv *priv; /* Back pointer to private data. */ - struct rte_mempool *mp; /* Memory Pool for allocations. */ - struct ibv_mr *mr; /* Memory Region (for mp). */ - struct ibv_cq *cq; /* Completion Queue. */ - struct ibv_qp *qp; /* Queue Pair. */ - struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */ - struct ibv_exp_cq_family *if_cq; /* CQ interface. */ - struct ibv_comp_channel *channel; - /* - * Each VLAN ID requires a separate flow steering rule. - */ - BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES); - struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS]; - struct ibv_flow *promisc_flow; /* Promiscuous flow. */ - struct ibv_flow *allmulti_flow; /* Multicast flow. */ - unsigned int port_id; /* Port ID for incoming packets. */ - unsigned int elts_n; /* (*elts)[] length. */ - unsigned int elts_head; /* Current index in (*elts)[]. */ - union { - struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */ - struct rxq_elt (*no_sp)[]; /* RX elements. */ - } elts; - unsigned int sp:1; /* Use scattered RX elements. */ - unsigned int csum:1; /* Enable checksum offloading. */ - unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ - struct mlx4_rxq_stats stats; /* RX queue counters. */ - unsigned int socket; /* CPU socket ID for allocations. */ - struct ibv_exp_res_domain *rd; /* Resource Domain. */ - struct { - uint16_t queues_n; - uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; - } rss; -}; - -/* TX element. */ -struct txq_elt { - struct rte_mbuf *buf; -}; - -struct mlx4_txq_stats { - unsigned int idx; /**< Mapping index. */ -#ifdef MLX4_PMD_SOFT_COUNTERS - uint64_t opackets; /**< Total of successfully sent packets. */ - uint64_t obytes; /**< Total of successfully sent bytes. */ -#endif - uint64_t odropped; /**< Total of packets not sent when TX ring full. */ -}; - -/* - * Linear buffer type. It is used when transmitting buffers with too many - * segments that do not fit the hardware queue (see max_send_sge). - * Extra segments are copied (linearized) in such buffers, replacing the - * last SGE during TX. - * The size is arbitrary but large enough to hold a jumbo frame with - * 8 segments considering mbuf.buf_len is about 2048 bytes. - */ -typedef uint8_t linear_t[16384]; +struct mlx4_drop; +struct mlx4_rss; +struct rxq; +struct txq; +struct rte_flow; -/* TX queue descriptor. */ -struct txq { - struct priv *priv; /* Back pointer to private data. */ - struct { - const struct rte_mempool *mp; /* Cached Memory Pool. */ - struct ibv_mr *mr; /* Memory Region (for mp). */ - uint32_t lkey; /* mr->lkey */ - } mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ - struct ibv_cq *cq; /* Completion Queue. */ - struct ibv_qp *qp; /* Queue Pair. */ - struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */ - struct ibv_exp_cq_family *if_cq; /* CQ interface. */ -#if MLX4_PMD_MAX_INLINE > 0 - uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */ -#endif - unsigned int elts_n; /* (*elts)[] length. */ - struct txq_elt (*elts)[]; /* TX elements. */ - unsigned int elts_head; /* Current index in (*elts)[]. */ - unsigned int elts_tail; /* First element awaiting completion. */ - unsigned int elts_comp; /* Number of completion requests. */ - unsigned int elts_comp_cd; /* Countdown for next completion request. */ - unsigned int elts_comp_cd_init; /* Initial value for countdown. */ - struct mlx4_txq_stats stats; /* TX queue counters. */ - linear_t (*elts_linear)[]; /* Linearized buffers. */ - struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */ - unsigned int socket; /* CPU socket ID for allocations. */ - struct ibv_exp_res_domain *rd; /* Resource Domain. */ +/** Memory region descriptor. */ +struct mlx4_mr { + LIST_ENTRY(mlx4_mr) next; /**< Next entry in list. */ + uintptr_t start; /**< Base address for memory region. */ + uintptr_t end; /**< End address for memory region. */ + uint32_t lkey; /**< L_Key extracted from @p mr. */ + uint32_t refcnt; /**< Reference count for this object. */ + struct priv *priv; /**< Back pointer to private data. */ + struct ibv_mr *mr; /**< Memory region associated with @p mp. */ + struct rte_mempool *mp; /**< Target memory pool (mempool). */ }; -struct rte_flow; - +/** Private data structure. */ struct priv { - struct rte_eth_dev *dev; /* Ethernet device. */ - struct ibv_context *ctx; /* Verbs context. */ - struct ibv_device_attr device_attr; /* Device properties. */ - struct ibv_pd *pd; /* Protection Domain. */ - /* - * MAC addresses array and configuration bit-field. - * An extra entry that cannot be modified by the DPDK is reserved - * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff). - */ - struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES]; - BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES); - /* VLAN filters. */ - struct { - unsigned int enabled:1; /* If enabled. */ - unsigned int id:12; /* VLAN ID (0-4095). */ - } vlan_filter[MLX4_MAX_VLAN_IDS]; /* VLAN filters table. */ + struct rte_eth_dev *dev; /**< Ethernet device. */ + struct ibv_context *ctx; /**< Verbs context. */ + struct ibv_device_attr device_attr; /**< Device properties. */ + struct ibv_pd *pd; /**< Protection Domain. */ /* Device properties. */ - uint16_t mtu; /* Configured MTU. */ - uint8_t port; /* Physical port number. */ - unsigned int started:1; /* Device started, flows enabled. */ - unsigned int promisc:1; /* Device in promiscuous mode. */ - unsigned int allmulti:1; /* Device receives all multicast packets. */ - unsigned int hw_qpg:1; /* QP groups are supported. */ - unsigned int hw_tss:1; /* TSS is supported. */ - unsigned int hw_rss:1; /* RSS is supported. */ - unsigned int hw_csum:1; /* Checksum offload is supported. */ - unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */ - unsigned int rss:1; /* RSS is enabled. */ - unsigned int vf:1; /* This is a VF device. */ - unsigned int pending_alarm:1; /* An alarm is pending. */ - unsigned int isolated:1; /* Toggle isolated mode. */ -#ifdef INLINE_RECV - unsigned int inl_recv_size; /* Inline recv size */ -#endif - unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */ - /* RX/TX queues. */ - unsigned int rxqs_n; /* RX queues array size. */ - unsigned int txqs_n; /* TX queues array size. */ - struct rxq *(*rxqs)[]; /* RX queues. */ - struct txq *(*txqs)[]; /* TX queues. */ - struct rte_intr_handle intr_handle_dev; /* Device interrupt handler. */ - struct rte_intr_handle intr_handle; /* Interrupt handler. */ - struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */ - LIST_HEAD(mlx4_flows, rte_flow) flows; - struct rte_intr_conf intr_conf; /* Active interrupt configuration. */ - LIST_HEAD(mlx4_parents, rxq) parents; - rte_spinlock_t lock; /* Lock for control functions. */ + uint16_t mtu; /**< Configured MTU. */ + uint8_t port; /**< Physical port number. */ + uint32_t started:1; /**< Device started, flows enabled. */ + uint32_t vf:1; /**< This is a VF device. */ + uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */ + uint32_t isolated:1; /**< Toggle isolated mode. */ + uint32_t hw_csum:1; /* Checksum offload is supported. */ + uint32_t hw_csum_l2tun:1; /* Checksum support for L2 tunnels. */ + struct rte_intr_handle intr_handle; /**< Port interrupt handle. */ + struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */ + LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */ + LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */ + LIST_HEAD(, mlx4_mr) mr; /**< Registered memory regions. */ + rte_spinlock_t mr_lock; /**< Lock for @p mr access. */ + struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES]; + /**< Configured MAC addresses. Unused entries are zeroed. */ }; -void priv_lock(struct priv *priv); -void priv_unlock(struct priv *priv); - -int -rxq_create_qp(struct rxq *rxq, - uint16_t desc, - int inactive, - int children_n, - struct rxq *rxq_parent); - -void -rxq_parent_cleanup(struct rxq *parent); - -struct rxq * -priv_parent_create(struct priv *priv, - uint16_t queues[], - uint16_t children_n); +/* mlx4_ethdev.c */ + +int mlx4_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]); +int mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]); +int mlx4_mtu_get(struct priv *priv, uint16_t *mtu); +int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); +int mlx4_dev_set_link_down(struct rte_eth_dev *dev); +int mlx4_dev_set_link_up(struct rte_eth_dev *dev); +void mlx4_promiscuous_enable(struct rte_eth_dev *dev); +void mlx4_promiscuous_disable(struct rte_eth_dev *dev); +void mlx4_allmulticast_enable(struct rte_eth_dev *dev); +void mlx4_allmulticast_disable(struct rte_eth_dev *dev); +void mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index); +int mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t vmdq); +void mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr); +int mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); +int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +void mlx4_stats_reset(struct rte_eth_dev *dev); +void mlx4_dev_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *info); +int mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete); +int mlx4_flow_ctrl_get(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +int mlx4_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +const uint32_t *mlx4_dev_supported_ptypes_get(struct rte_eth_dev *dev); + +/* mlx4_intr.c */ + +int mlx4_intr_uninstall(struct priv *priv); +int mlx4_intr_install(struct priv *priv); +int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx); +int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx); + +/* mlx4_mr.c */ + +struct mlx4_mr *mlx4_mr_get(struct priv *priv, struct rte_mempool *mp); +void mlx4_mr_put(struct mlx4_mr *mr); +uint32_t mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, + uint32_t i); #endif /* RTE_PMD_MLX4_H_ */