4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_interrupts.h>
38 #include <rte_alarm.h>
41 #include "mlx5_rxtx.h"
42 #include "mlx5_utils.h"
45 * Stop traffic on Tx queues.
48 * Pointer to Ethernet device structure.
51 mlx5_txq_stop(struct rte_eth_dev *dev)
53 struct priv *priv = dev->data->dev_private;
56 for (i = 0; i != priv->txqs_n; ++i)
57 mlx5_txq_release(dev, i);
61 * Start traffic on Tx queues.
64 * Pointer to Ethernet device structure.
67 * 0 on success, a negative errno value otherwise and rte_errno is set.
70 mlx5_txq_start(struct rte_eth_dev *dev)
72 struct priv *priv = dev->data->dev_private;
76 /* Add memory regions to Tx queues. */
77 for (i = 0; i != priv->txqs_n; ++i) {
80 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
84 LIST_FOREACH(mr, &priv->mr, next) {
85 mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mr->mp, idx++);
86 if (idx == MLX5_PMD_TX_MP_CACHE)
89 txq_alloc_elts(txq_ctrl);
90 txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i);
96 ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
101 ret = rte_errno; /* Save rte_errno before cleanup. */
103 rte_errno = ret; /* Restore rte_errno. */
108 * Stop traffic on Rx queues.
111 * Pointer to Ethernet device structure.
114 mlx5_rxq_stop(struct rte_eth_dev *dev)
116 struct priv *priv = dev->data->dev_private;
119 for (i = 0; i != priv->rxqs_n; ++i)
120 mlx5_rxq_release(dev, i);
124 * Start traffic on Rx queues.
127 * Pointer to Ethernet device structure.
130 * 0 on success, a negative errno value otherwise and rte_errno is set.
133 mlx5_rxq_start(struct rte_eth_dev *dev)
135 struct priv *priv = dev->data->dev_private;
139 for (i = 0; i != priv->rxqs_n; ++i) {
140 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
144 ret = rxq_alloc_elts(rxq_ctrl);
147 rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i);
153 ret = rte_errno; /* Save rte_errno before cleanup. */
155 rte_errno = ret; /* Restore rte_errno. */
160 * DPDK callback to start the device.
162 * Simulate device start by attaching all configured flows.
165 * Pointer to Ethernet device structure.
168 * 0 on success, a negative errno value otherwise and rte_errno is set.
171 mlx5_dev_start(struct rte_eth_dev *dev)
173 struct priv *priv = dev->data->dev_private;
174 struct mlx5_mr *mr = NULL;
177 dev->data->dev_started = 1;
178 ret = mlx5_flow_create_drop_queue(dev);
180 DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
181 dev->data->port_id, strerror(rte_errno));
184 DRV_LOG(DEBUG, "port %u allocating and configuring hash Rx queues",
186 rte_mempool_walk(mlx5_mp2mr_iter, priv);
187 ret = mlx5_txq_start(dev);
189 DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
190 dev->data->port_id, strerror(rte_errno));
193 ret = mlx5_rxq_start(dev);
195 DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
196 dev->data->port_id, strerror(rte_errno));
199 ret = mlx5_rx_intr_vec_enable(dev);
201 DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
205 mlx5_xstats_init(dev);
206 ret = mlx5_traffic_enable(dev);
208 DRV_LOG(DEBUG, "port %u failed to set defaults flows",
212 ret = mlx5_flow_start(dev, &priv->flows);
214 DRV_LOG(DEBUG, "port %u failed to set flows",
218 dev->tx_pkt_burst = mlx5_select_tx_function(dev);
219 dev->rx_pkt_burst = mlx5_select_rx_function(dev);
220 mlx5_dev_interrupt_handler_install(dev);
223 ret = rte_errno; /* Save rte_errno before cleanup. */
225 dev->data->dev_started = 0;
226 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
228 mlx5_flow_stop(dev, &priv->flows);
229 mlx5_traffic_disable(dev);
232 mlx5_flow_delete_drop_queue(dev);
233 rte_errno = ret; /* Restore rte_errno. */
238 * DPDK callback to stop the device.
240 * Simulate device stop by detaching all configured flows.
243 * Pointer to Ethernet device structure.
246 mlx5_dev_stop(struct rte_eth_dev *dev)
248 struct priv *priv = dev->data->dev_private;
251 dev->data->dev_started = 0;
252 /* Prevent crashes when queues are still in use. */
253 dev->rx_pkt_burst = removed_rx_burst;
254 dev->tx_pkt_burst = removed_tx_burst;
256 usleep(1000 * priv->rxqs_n);
257 DRV_LOG(DEBUG, "port %u cleaning up and destroying hash Rx queues",
259 mlx5_flow_stop(dev, &priv->flows);
260 mlx5_traffic_disable(dev);
261 mlx5_rx_intr_vec_disable(dev);
262 mlx5_dev_interrupt_handler_uninstall(dev);
265 for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
267 mlx5_flow_delete_drop_queue(dev);
271 * Enable traffic flows configured by control plane
274 * Pointer to Ethernet device private data.
276 * Pointer to Ethernet device structure.
279 * 0 on success, a negative errno value otherwise and rte_errno is set.
282 mlx5_traffic_enable(struct rte_eth_dev *dev)
284 struct priv *priv = dev->data->dev_private;
285 struct rte_flow_item_eth bcast = {
286 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
288 struct rte_flow_item_eth ipv6_multi_spec = {
289 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
291 struct rte_flow_item_eth ipv6_multi_mask = {
292 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
294 struct rte_flow_item_eth unicast = {
295 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
297 struct rte_flow_item_eth unicast_mask = {
298 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
300 const unsigned int vlan_filter_n = priv->vlan_filter_n;
301 const struct ether_addr cmp = {
302 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
310 if (dev->data->promiscuous) {
311 struct rte_flow_item_eth promisc = {
312 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
313 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
317 ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
321 if (dev->data->all_multicast) {
322 struct rte_flow_item_eth multicast = {
323 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
324 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
328 ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
332 /* Add broadcast/multicast flows. */
333 for (i = 0; i != vlan_filter_n; ++i) {
334 uint16_t vlan = priv->vlan_filter[i];
336 struct rte_flow_item_vlan vlan_spec = {
337 .tci = rte_cpu_to_be_16(vlan),
339 struct rte_flow_item_vlan vlan_mask = {
343 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
344 &vlan_spec, &vlan_mask);
347 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
349 &vlan_spec, &vlan_mask);
353 if (!vlan_filter_n) {
354 ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
357 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
363 /* Add MAC address flows. */
364 for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
365 struct ether_addr *mac = &dev->data->mac_addrs[i];
367 if (!memcmp(mac, &cmp, sizeof(*mac)))
369 memcpy(&unicast.dst.addr_bytes,
372 for (j = 0; j != vlan_filter_n; ++j) {
373 uint16_t vlan = priv->vlan_filter[j];
375 struct rte_flow_item_vlan vlan_spec = {
376 .tci = rte_cpu_to_be_16(vlan),
378 struct rte_flow_item_vlan vlan_mask = {
382 ret = mlx5_ctrl_flow_vlan(dev, &unicast,
389 if (!vlan_filter_n) {
390 ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
397 ret = rte_errno; /* Save rte_errno before cleanup. */
398 mlx5_flow_list_flush(dev, &priv->ctrl_flows);
399 rte_errno = ret; /* Restore rte_errno. */
405 * Disable traffic flows configured by control plane
408 * Pointer to Ethernet device private data.
411 mlx5_traffic_disable(struct rte_eth_dev *dev)
413 struct priv *priv = dev->data->dev_private;
415 mlx5_flow_list_flush(dev, &priv->ctrl_flows);
419 * Restart traffic flows configured by control plane
422 * Pointer to Ethernet device private data.
425 * 0 on success, a negative errno value otherwise and rte_errno is set.
428 mlx5_traffic_restart(struct rte_eth_dev *dev)
430 if (dev->data->dev_started) {
431 mlx5_traffic_disable(dev);
432 return mlx5_traffic_enable(dev);