4 * Copyright 2015 6WIND S.A.
5 * Copyright 2015 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ether.h>
36 #include <rte_ethdev.h>
37 #include <rte_interrupts.h>
38 #include <rte_alarm.h>
41 #include "mlx5_rxtx.h"
42 #include "mlx5_utils.h"
45 * Stop traffic on Tx queues.
48 * Pointer to Ethernet device structure.
51 mlx5_txq_stop(struct rte_eth_dev *dev)
53 struct priv *priv = dev->data->dev_private;
56 for (i = 0; i != priv->txqs_n; ++i)
57 mlx5_txq_release(dev, i);
61 * Start traffic on Tx queues.
64 * Pointer to Ethernet device structure.
67 * 0 on success, a negative errno value otherwise and rte_errno is set.
70 mlx5_txq_start(struct rte_eth_dev *dev)
72 struct priv *priv = dev->data->dev_private;
76 for (i = 0; i != priv->txqs_n; ++i) {
77 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
81 txq_alloc_elts(txq_ctrl);
82 txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i);
88 ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
90 /* Adjust index for rollback. */
96 ret = rte_errno; /* Save rte_errno before cleanup. */
98 mlx5_txq_release(dev, i);
100 rte_errno = ret; /* Restore rte_errno. */
105 * Stop traffic on Rx queues.
108 * Pointer to Ethernet device structure.
111 mlx5_rxq_stop(struct rte_eth_dev *dev)
113 struct priv *priv = dev->data->dev_private;
116 for (i = 0; i != priv->rxqs_n; ++i)
117 mlx5_rxq_release(dev, i);
121 * Start traffic on Rx queues.
124 * Pointer to Ethernet device structure.
127 * 0 on success, a negative errno value otherwise and rte_errno is set.
130 mlx5_rxq_start(struct rte_eth_dev *dev)
132 struct priv *priv = dev->data->dev_private;
136 for (i = 0; i != priv->rxqs_n; ++i) {
137 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
141 ret = rxq_alloc_elts(rxq_ctrl);
144 rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i);
150 ret = rte_errno; /* Save rte_errno before cleanup. */
152 mlx5_rxq_release(dev, i);
154 rte_errno = ret; /* Restore rte_errno. */
159 * DPDK callback to start the device.
161 * Simulate device start by attaching all configured flows.
164 * Pointer to Ethernet device structure.
167 * 0 on success, a negative errno value otherwise and rte_errno is set.
170 mlx5_dev_start(struct rte_eth_dev *dev)
172 struct priv *priv = dev->data->dev_private;
175 DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
176 ret = mlx5_flow_create_drop_queue(dev);
178 DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
179 dev->data->port_id, strerror(rte_errno));
182 ret = mlx5_txq_start(dev);
184 DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
185 dev->data->port_id, strerror(rte_errno));
188 ret = mlx5_rxq_start(dev);
190 DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
191 dev->data->port_id, strerror(rte_errno));
195 dev->data->dev_started = 1;
196 ret = mlx5_rx_intr_vec_enable(dev);
198 DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
202 mlx5_xstats_init(dev);
203 ret = mlx5_traffic_enable(dev);
205 DRV_LOG(DEBUG, "port %u failed to set defaults flows",
209 ret = mlx5_flow_start(dev, &priv->flows);
211 DRV_LOG(DEBUG, "port %u failed to set flows",
215 dev->tx_pkt_burst = mlx5_select_tx_function(dev);
216 dev->rx_pkt_burst = mlx5_select_rx_function(dev);
217 mlx5_dev_interrupt_handler_install(dev);
220 ret = rte_errno; /* Save rte_errno before cleanup. */
222 dev->data->dev_started = 0;
223 mlx5_flow_stop(dev, &priv->flows);
224 mlx5_traffic_disable(dev);
227 mlx5_flow_delete_drop_queue(dev);
228 rte_errno = ret; /* Restore rte_errno. */
233 * DPDK callback to stop the device.
235 * Simulate device stop by detaching all configured flows.
238 * Pointer to Ethernet device structure.
241 mlx5_dev_stop(struct rte_eth_dev *dev)
243 struct priv *priv = dev->data->dev_private;
245 dev->data->dev_started = 0;
246 /* Prevent crashes when queues are still in use. */
247 dev->rx_pkt_burst = removed_rx_burst;
248 dev->tx_pkt_burst = removed_tx_burst;
250 usleep(1000 * priv->rxqs_n);
251 DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
252 mlx5_flow_stop(dev, &priv->flows);
253 mlx5_traffic_disable(dev);
254 mlx5_rx_intr_vec_disable(dev);
255 mlx5_dev_interrupt_handler_uninstall(dev);
258 mlx5_flow_delete_drop_queue(dev);
262 * Enable traffic flows configured by control plane
265 * Pointer to Ethernet device private data.
267 * Pointer to Ethernet device structure.
270 * 0 on success, a negative errno value otherwise and rte_errno is set.
273 mlx5_traffic_enable(struct rte_eth_dev *dev)
275 struct priv *priv = dev->data->dev_private;
276 struct rte_flow_item_eth bcast = {
277 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
279 struct rte_flow_item_eth ipv6_multi_spec = {
280 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
282 struct rte_flow_item_eth ipv6_multi_mask = {
283 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
285 struct rte_flow_item_eth unicast = {
286 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
288 struct rte_flow_item_eth unicast_mask = {
289 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
291 const unsigned int vlan_filter_n = priv->vlan_filter_n;
292 const struct ether_addr cmp = {
293 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
301 if (dev->data->promiscuous) {
302 struct rte_flow_item_eth promisc = {
303 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
304 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
308 ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
312 if (dev->data->all_multicast) {
313 struct rte_flow_item_eth multicast = {
314 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
315 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
319 ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
323 /* Add broadcast/multicast flows. */
324 for (i = 0; i != vlan_filter_n; ++i) {
325 uint16_t vlan = priv->vlan_filter[i];
327 struct rte_flow_item_vlan vlan_spec = {
328 .tci = rte_cpu_to_be_16(vlan),
330 struct rte_flow_item_vlan vlan_mask =
331 rte_flow_item_vlan_mask;
333 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
334 &vlan_spec, &vlan_mask);
337 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
339 &vlan_spec, &vlan_mask);
343 if (!vlan_filter_n) {
344 ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
347 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
353 /* Add MAC address flows. */
354 for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
355 struct ether_addr *mac = &dev->data->mac_addrs[i];
357 if (!memcmp(mac, &cmp, sizeof(*mac)))
359 memcpy(&unicast.dst.addr_bytes,
362 for (j = 0; j != vlan_filter_n; ++j) {
363 uint16_t vlan = priv->vlan_filter[j];
365 struct rte_flow_item_vlan vlan_spec = {
366 .tci = rte_cpu_to_be_16(vlan),
368 struct rte_flow_item_vlan vlan_mask =
369 rte_flow_item_vlan_mask;
371 ret = mlx5_ctrl_flow_vlan(dev, &unicast,
378 if (!vlan_filter_n) {
379 ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
386 ret = rte_errno; /* Save rte_errno before cleanup. */
387 mlx5_flow_list_flush(dev, &priv->ctrl_flows);
388 rte_errno = ret; /* Restore rte_errno. */
394 * Disable traffic flows configured by control plane
397 * Pointer to Ethernet device private data.
400 mlx5_traffic_disable(struct rte_eth_dev *dev)
402 struct priv *priv = dev->data->dev_private;
404 mlx5_flow_list_flush(dev, &priv->ctrl_flows);
408 * Restart traffic flows configured by control plane
411 * Pointer to Ethernet device private data.
414 * 0 on success, a negative errno value otherwise and rte_errno is set.
417 mlx5_traffic_restart(struct rte_eth_dev *dev)
419 if (dev->data->dev_started) {
420 mlx5_traffic_disable(dev);
421 return mlx5_traffic_enable(dev);