X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx4%2Fmlx4.h;h=9a3bae90c66cdf15f01674c328a09e2effbb7ad9;hb=a2ddb5e56bbd6a991eb8256004c0d3aec900b5b3;hp=d0c7bc290ecc72f5a0a4f1613981af06b437724b;hpb=97f17497d162afdb82c8704bf097f0fee3724b2e;p=deb_dpdk.git diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index d0c7bc29..9a3bae90 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h @@ -1,8 +1,8 @@ /*- * BSD LICENSE * - * Copyright 2012-2015 6WIND S.A. - * Copyright 2012 Mellanox. + * Copyright 2012-2017 6WIND S.A. + * Copyright 2012-2017 Mellanox. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -38,6 +38,33 @@ #include #include +/* + * Runtime logging through RTE_LOG() is enabled when not in debugging mode. + * Intermediate LOG_*() macros add the required end-of-line characters. + */ +#ifndef NDEBUG +#define INFO(...) DEBUG(__VA_ARGS__) +#define WARN(...) DEBUG(__VA_ARGS__) +#define ERROR(...) DEBUG(__VA_ARGS__) +#else +#define LOG__(level, m, ...) \ + RTE_LOG(level, PMD, MLX4_DRIVER_NAME ": " m "%c", __VA_ARGS__) +#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n') +#define INFO(...) LOG_(INFO, __VA_ARGS__) +#define WARN(...) LOG_(WARNING, __VA_ARGS__) +#define ERROR(...) LOG_(ERR, __VA_ARGS__) +#endif + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + /* * Maximum number of simultaneous MAC addresses supported. * @@ -54,6 +81,9 @@ /* Request send completion once in every 64 sends, might be less. */ #define MLX4_PMD_TX_PER_COMP_REQ 64 +/* Maximum number of physical ports. */ +#define MLX4_PMD_MAX_PHYS_PORTS 2 + /* Maximum number of Scatter/Gather Elements per Work Request. */ #ifndef MLX4_PMD_SGE_WR_N #define MLX4_PMD_SGE_WR_N 4 @@ -86,6 +116,9 @@ /* Alarm timeout. */ #define MLX4_ALARM_TIMEOUT_US 100000 +/* Port parameter. */ +#define MLX4_PMD_PORT_KVARG "port" + enum { PCI_VENDOR_ID_MELLANOX = 0x15b3, }; @@ -96,7 +129,7 @@ enum { PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO = 0x1007, }; -#define MLX4_DRIVER_NAME "librte_pmd_mlx4" +#define MLX4_DRIVER_NAME "net_mlx4" /* Bit-field manipulation. */ #define BITFIELD_DECLARE(bf, type, size) \ @@ -160,4 +193,165 @@ enum { #define claim_positive(...) (__VA_ARGS__) #endif /* NDEBUG */ +struct mlx4_rxq_stats { + unsigned int idx; /**< Mapping index. */ +#ifdef MLX4_PMD_SOFT_COUNTERS + uint64_t ipackets; /**< Total of successfully received packets. */ + uint64_t ibytes; /**< Total of successfully received bytes. */ +#endif + uint64_t idropped; /**< Total of packets dropped when RX ring full. */ + uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */ +}; + +/* RX element (scattered packets). */ +struct rxq_elt_sp { + struct ibv_recv_wr wr; /* Work Request. */ + struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */ + struct rte_mbuf *bufs[MLX4_PMD_SGE_WR_N]; /* SGEs buffers. */ +}; + +/* RX element. */ +struct rxq_elt { + struct ibv_recv_wr wr; /* Work Request. */ + struct ibv_sge sge; /* Scatter/Gather Element. */ + /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */ +}; + +/* RX queue descriptor. */ +struct rxq { + struct priv *priv; /* Back pointer to private data. */ + struct rte_mempool *mp; /* Memory Pool for allocations. */ + struct ibv_mr *mr; /* Memory Region (for mp). */ + struct ibv_cq *cq; /* Completion Queue. */ + struct ibv_qp *qp; /* Queue Pair. */ + struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */ + struct ibv_exp_cq_family *if_cq; /* CQ interface. */ + /* + * Each VLAN ID requires a separate flow steering rule. + */ + BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES); + struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS]; + struct ibv_flow *promisc_flow; /* Promiscuous flow. */ + struct ibv_flow *allmulti_flow; /* Multicast flow. */ + unsigned int port_id; /* Port ID for incoming packets. */ + unsigned int elts_n; /* (*elts)[] length. */ + unsigned int elts_head; /* Current index in (*elts)[]. */ + union { + struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */ + struct rxq_elt (*no_sp)[]; /* RX elements. */ + } elts; + unsigned int sp:1; /* Use scattered RX elements. */ + unsigned int csum:1; /* Enable checksum offloading. */ + unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ + struct mlx4_rxq_stats stats; /* RX queue counters. */ + unsigned int socket; /* CPU socket ID for allocations. */ + struct ibv_exp_res_domain *rd; /* Resource Domain. */ +}; + +/* TX element. */ +struct txq_elt { + struct rte_mbuf *buf; +}; + +struct mlx4_txq_stats { + unsigned int idx; /**< Mapping index. */ +#ifdef MLX4_PMD_SOFT_COUNTERS + uint64_t opackets; /**< Total of successfully sent packets. */ + uint64_t obytes; /**< Total of successfully sent bytes. */ +#endif + uint64_t odropped; /**< Total of packets not sent when TX ring full. */ +}; + +/* + * Linear buffer type. It is used when transmitting buffers with too many + * segments that do not fit the hardware queue (see max_send_sge). + * Extra segments are copied (linearized) in such buffers, replacing the + * last SGE during TX. + * The size is arbitrary but large enough to hold a jumbo frame with + * 8 segments considering mbuf.buf_len is about 2048 bytes. + */ +typedef uint8_t linear_t[16384]; + +/* TX queue descriptor. */ +struct txq { + struct priv *priv; /* Back pointer to private data. */ + struct { + const struct rte_mempool *mp; /* Cached Memory Pool. */ + struct ibv_mr *mr; /* Memory Region (for mp). */ + uint32_t lkey; /* mr->lkey */ + } mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ + struct ibv_cq *cq; /* Completion Queue. */ + struct ibv_qp *qp; /* Queue Pair. */ + struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */ + struct ibv_exp_cq_family *if_cq; /* CQ interface. */ +#if MLX4_PMD_MAX_INLINE > 0 + uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */ +#endif + unsigned int elts_n; /* (*elts)[] length. */ + struct txq_elt (*elts)[]; /* TX elements. */ + unsigned int elts_head; /* Current index in (*elts)[]. */ + unsigned int elts_tail; /* First element awaiting completion. */ + unsigned int elts_comp; /* Number of completion requests. */ + unsigned int elts_comp_cd; /* Countdown for next completion request. */ + unsigned int elts_comp_cd_init; /* Initial value for countdown. */ + struct mlx4_txq_stats stats; /* TX queue counters. */ + linear_t (*elts_linear)[]; /* Linearized buffers. */ + struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */ + unsigned int socket; /* CPU socket ID for allocations. */ + struct ibv_exp_res_domain *rd; /* Resource Domain. */ +}; + +struct rte_flow; + +struct priv { + struct rte_eth_dev *dev; /* Ethernet device. */ + struct ibv_context *ctx; /* Verbs context. */ + struct ibv_device_attr device_attr; /* Device properties. */ + struct ibv_pd *pd; /* Protection Domain. */ + /* + * MAC addresses array and configuration bit-field. + * An extra entry that cannot be modified by the DPDK is reserved + * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff). + */ + struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES]; + BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES); + /* VLAN filters. */ + struct { + unsigned int enabled:1; /* If enabled. */ + unsigned int id:12; /* VLAN ID (0-4095). */ + } vlan_filter[MLX4_MAX_VLAN_IDS]; /* VLAN filters table. */ + /* Device properties. */ + uint16_t mtu; /* Configured MTU. */ + uint8_t port; /* Physical port number. */ + unsigned int started:1; /* Device started, flows enabled. */ + unsigned int promisc:1; /* Device in promiscuous mode. */ + unsigned int allmulti:1; /* Device receives all multicast packets. */ + unsigned int hw_qpg:1; /* QP groups are supported. */ + unsigned int hw_tss:1; /* TSS is supported. */ + unsigned int hw_rss:1; /* RSS is supported. */ + unsigned int hw_csum:1; /* Checksum offload is supported. */ + unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */ + unsigned int rss:1; /* RSS is enabled. */ + unsigned int vf:1; /* This is a VF device. */ + unsigned int pending_alarm:1; /* An alarm is pending. */ +#ifdef INLINE_RECV + unsigned int inl_recv_size; /* Inline recv size */ +#endif + unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */ + /* RX/TX queues. */ + struct rxq rxq_parent; /* Parent queue when RSS is enabled. */ + unsigned int rxqs_n; /* RX queues array size. */ + unsigned int txqs_n; /* TX queues array size. */ + struct rxq *(*rxqs)[]; /* RX queues. */ + struct txq *(*txqs)[]; /* TX queues. */ + struct rte_intr_handle intr_handle; /* Interrupt handler. */ + struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */ + LIST_HEAD(mlx4_flows, rte_flow) flows; + struct rte_intr_conf intr_conf; /* Active interrupt configuration. */ + rte_spinlock_t lock; /* Lock for control functions. */ +}; + +void priv_lock(struct priv *priv); +void priv_unlock(struct priv *priv); + #endif /* RTE_PMD_MLX4_H_ */