New upstream version 18.02
[deb_dpdk.git] / drivers / net / mlx4 / mlx4_intr.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox
4  */
5
6 /**
7  * @file
8  * Interrupts handling for mlx4 driver.
9  */
10
11 #include <assert.h>
12 #include <errno.h>
13 #include <stdint.h>
14 #include <stdlib.h>
15
16 /* Verbs headers do not support -pedantic. */
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic ignored "-Wpedantic"
19 #endif
20 #include <infiniband/verbs.h>
21 #ifdef PEDANTIC
22 #pragma GCC diagnostic error "-Wpedantic"
23 #endif
24
25 #include <rte_alarm.h>
26 #include <rte_errno.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_io.h>
29 #include <rte_interrupts.h>
30
31 #include "mlx4.h"
32 #include "mlx4_glue.h"
33 #include "mlx4_rxtx.h"
34 #include "mlx4_utils.h"
35
36 static int mlx4_link_status_check(struct priv *priv);
37
38 /**
39  * Clean up Rx interrupts handler.
40  *
41  * @param priv
42  *   Pointer to private structure.
43  */
44 static void
45 mlx4_rx_intr_vec_disable(struct priv *priv)
46 {
47         struct rte_intr_handle *intr_handle = &priv->intr_handle;
48
49         rte_intr_free_epoll_fd(intr_handle);
50         free(intr_handle->intr_vec);
51         intr_handle->nb_efd = 0;
52         intr_handle->intr_vec = NULL;
53 }
54
55 /**
56  * Allocate queue vector and fill epoll fd list for Rx interrupts.
57  *
58  * @param priv
59  *   Pointer to private structure.
60  *
61  * @return
62  *   0 on success, negative errno value otherwise and rte_errno is set.
63  */
64 static int
65 mlx4_rx_intr_vec_enable(struct priv *priv)
66 {
67         unsigned int i;
68         unsigned int rxqs_n = priv->dev->data->nb_rx_queues;
69         unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
70         unsigned int count = 0;
71         struct rte_intr_handle *intr_handle = &priv->intr_handle;
72
73         mlx4_rx_intr_vec_disable(priv);
74         intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
75         if (intr_handle->intr_vec == NULL) {
76                 rte_errno = ENOMEM;
77                 ERROR("failed to allocate memory for interrupt vector,"
78                       " Rx interrupts will not be supported");
79                 return -rte_errno;
80         }
81         for (i = 0; i != n; ++i) {
82                 struct rxq *rxq = priv->dev->data->rx_queues[i];
83
84                 /* Skip queues that cannot request interrupts. */
85                 if (!rxq || !rxq->channel) {
86                         /* Use invalid intr_vec[] index to disable entry. */
87                         intr_handle->intr_vec[i] =
88                                 RTE_INTR_VEC_RXTX_OFFSET +
89                                 RTE_MAX_RXTX_INTR_VEC_ID;
90                         continue;
91                 }
92                 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
93                         rte_errno = E2BIG;
94                         ERROR("too many Rx queues for interrupt vector size"
95                               " (%d), Rx interrupts cannot be enabled",
96                               RTE_MAX_RXTX_INTR_VEC_ID);
97                         mlx4_rx_intr_vec_disable(priv);
98                         return -rte_errno;
99                 }
100                 intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
101                 intr_handle->efds[count] = rxq->channel->fd;
102                 count++;
103         }
104         if (!count)
105                 mlx4_rx_intr_vec_disable(priv);
106         else
107                 intr_handle->nb_efd = count;
108         return 0;
109 }
110
111 /**
112  * Process scheduled link status check.
113  *
114  * If LSC interrupts are requested, process related callback.
115  *
116  * @param priv
117  *   Pointer to private structure.
118  */
119 static void
120 mlx4_link_status_alarm(struct priv *priv)
121 {
122         const struct rte_intr_conf *const intr_conf =
123                 &priv->dev->data->dev_conf.intr_conf;
124
125         assert(priv->intr_alarm == 1);
126         priv->intr_alarm = 0;
127         if (intr_conf->lsc && !mlx4_link_status_check(priv))
128                 _rte_eth_dev_callback_process(priv->dev,
129                                               RTE_ETH_EVENT_INTR_LSC,
130                                               NULL);
131 }
132
133 /**
134  * Check link status.
135  *
136  * In case of inconsistency, another check is scheduled.
137  *
138  * @param priv
139  *   Pointer to private structure.
140  *
141  * @return
142  *   0 on success (link status is consistent), negative errno value
143  *   otherwise and rte_errno is set.
144  */
145 static int
146 mlx4_link_status_check(struct priv *priv)
147 {
148         struct rte_eth_link *link = &priv->dev->data->dev_link;
149         int ret = mlx4_link_update(priv->dev, 0);
150
151         if (ret)
152                 return ret;
153         if ((!link->link_speed && link->link_status) ||
154             (link->link_speed && !link->link_status)) {
155                 if (!priv->intr_alarm) {
156                         /* Inconsistent status, check again later. */
157                         ret = rte_eal_alarm_set(MLX4_INTR_ALARM_TIMEOUT,
158                                                 (void (*)(void *))
159                                                 mlx4_link_status_alarm,
160                                                 priv);
161                         if (ret)
162                                 return ret;
163                         priv->intr_alarm = 1;
164                 }
165                 rte_errno = EINPROGRESS;
166                 return -rte_errno;
167         }
168         return 0;
169 }
170
171 /**
172  * Handle interrupts from the NIC.
173  *
174  * @param priv
175  *   Pointer to private structure.
176  */
177 static void
178 mlx4_interrupt_handler(struct priv *priv)
179 {
180         enum { LSC, RMV, };
181         static const enum rte_eth_event_type type[] = {
182                 [LSC] = RTE_ETH_EVENT_INTR_LSC,
183                 [RMV] = RTE_ETH_EVENT_INTR_RMV,
184         };
185         uint32_t caught[RTE_DIM(type)] = { 0 };
186         struct ibv_async_event event;
187         const struct rte_intr_conf *const intr_conf =
188                 &priv->dev->data->dev_conf.intr_conf;
189         unsigned int i;
190
191         /* Read all message and acknowledge them. */
192         while (!mlx4_glue->get_async_event(priv->ctx, &event)) {
193                 switch (event.event_type) {
194                 case IBV_EVENT_PORT_ACTIVE:
195                 case IBV_EVENT_PORT_ERR:
196                         if (intr_conf->lsc && !mlx4_link_status_check(priv))
197                                 ++caught[LSC];
198                         break;
199                 case IBV_EVENT_DEVICE_FATAL:
200                         if (intr_conf->rmv)
201                                 ++caught[RMV];
202                         break;
203                 default:
204                         DEBUG("event type %d on physical port %d not handled",
205                               event.event_type, event.element.port_num);
206                 }
207                 mlx4_glue->ack_async_event(&event);
208         }
209         for (i = 0; i != RTE_DIM(caught); ++i)
210                 if (caught[i])
211                         _rte_eth_dev_callback_process(priv->dev, type[i],
212                                                       NULL);
213 }
214
215 /**
216  * MLX4 CQ notification .
217  *
218  * @param rxq
219  *   Pointer to receive queue structure.
220  * @param solicited
221  *   Is request solicited or not.
222  */
223 static void
224 mlx4_arm_cq(struct rxq *rxq, int solicited)
225 {
226         struct mlx4_cq *cq = &rxq->mcq;
227         uint64_t doorbell;
228         uint32_t sn = cq->arm_sn & MLX4_CQ_DB_GEQ_N_MASK;
229         uint32_t ci = cq->cons_index & MLX4_CQ_DB_CI_MASK;
230         uint32_t cmd = solicited ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT;
231
232         *cq->arm_db = rte_cpu_to_be_32(sn << 28 | cmd | ci);
233         /*
234          * Make sure that the doorbell record in host memory is
235          * written before ringing the doorbell via PCI MMIO.
236          */
237         rte_wmb();
238         doorbell = sn << 28 | cmd | cq->cqn;
239         doorbell <<= 32;
240         doorbell |= ci;
241         rte_write64(rte_cpu_to_be_64(doorbell), cq->cq_db_reg);
242 }
243
244 /**
245  * Uninstall interrupt handler.
246  *
247  * @param priv
248  *   Pointer to private structure.
249  *
250  * @return
251  *   0 on success, negative errno value otherwise and rte_errno is set.
252  */
253 int
254 mlx4_intr_uninstall(struct priv *priv)
255 {
256         int err = rte_errno; /* Make sure rte_errno remains unchanged. */
257
258         if (priv->intr_handle.fd != -1) {
259                 rte_intr_callback_unregister(&priv->intr_handle,
260                                              (void (*)(void *))
261                                              mlx4_interrupt_handler,
262                                              priv);
263                 priv->intr_handle.fd = -1;
264         }
265         rte_eal_alarm_cancel((void (*)(void *))mlx4_link_status_alarm, priv);
266         priv->intr_alarm = 0;
267         mlx4_rxq_intr_disable(priv);
268         rte_errno = err;
269         return 0;
270 }
271
272 /**
273  * Install interrupt handler.
274  *
275  * @param priv
276  *   Pointer to private structure.
277  *
278  * @return
279  *   0 on success, negative errno value otherwise and rte_errno is set.
280  */
281 int
282 mlx4_intr_install(struct priv *priv)
283 {
284         const struct rte_intr_conf *const intr_conf =
285                 &priv->dev->data->dev_conf.intr_conf;
286         int rc;
287
288         mlx4_intr_uninstall(priv);
289         if (intr_conf->lsc | intr_conf->rmv) {
290                 priv->intr_handle.fd = priv->ctx->async_fd;
291                 rc = rte_intr_callback_register(&priv->intr_handle,
292                                                 (void (*)(void *))
293                                                 mlx4_interrupt_handler,
294                                                 priv);
295                 if (rc < 0) {
296                         rte_errno = -rc;
297                         goto error;
298                 }
299         }
300         return 0;
301 error:
302         mlx4_intr_uninstall(priv);
303         return -rte_errno;
304 }
305
306 /**
307  * DPDK callback for Rx queue interrupt disable.
308  *
309  * @param dev
310  *   Pointer to Ethernet device structure.
311  * @param idx
312  *   Rx queue index.
313  *
314  * @return
315  *   0 on success, negative errno value otherwise and rte_errno is set.
316  */
317 int
318 mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
319 {
320         struct rxq *rxq = dev->data->rx_queues[idx];
321         struct ibv_cq *ev_cq;
322         void *ev_ctx;
323         int ret;
324
325         if (!rxq || !rxq->channel) {
326                 ret = EINVAL;
327         } else {
328                 ret = mlx4_glue->get_cq_event(rxq->cq->channel, &ev_cq,
329                                               &ev_ctx);
330                 if (ret || ev_cq != rxq->cq)
331                         ret = EINVAL;
332         }
333         if (ret) {
334                 rte_errno = ret;
335                 WARN("unable to disable interrupt on rx queue %d",
336                      idx);
337         } else {
338                 rxq->mcq.arm_sn++;
339                 mlx4_glue->ack_cq_events(rxq->cq, 1);
340         }
341         return -ret;
342 }
343
344 /**
345  * DPDK callback for Rx queue interrupt enable.
346  *
347  * @param dev
348  *   Pointer to Ethernet device structure.
349  * @param idx
350  *   Rx queue index.
351  *
352  * @return
353  *   0 on success, negative errno value otherwise and rte_errno is set.
354  */
355 int
356 mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
357 {
358         struct rxq *rxq = dev->data->rx_queues[idx];
359         int ret = 0;
360
361         if (!rxq || !rxq->channel) {
362                 ret = EINVAL;
363                 rte_errno = ret;
364                 WARN("unable to arm interrupt on rx queue %d", idx);
365         } else {
366                 mlx4_arm_cq(rxq, 0);
367         }
368         return -ret;
369 }
370
371 /**
372  * Enable datapath interrupts.
373  *
374  * @param priv
375  *   Pointer to private structure.
376  *
377  * @return
378  *   0 on success, negative errno value otherwise and rte_errno is set.
379  */
380 int
381 mlx4_rxq_intr_enable(struct priv *priv)
382 {
383         const struct rte_intr_conf *const intr_conf =
384                 &priv->dev->data->dev_conf.intr_conf;
385
386         if (intr_conf->rxq && mlx4_rx_intr_vec_enable(priv) < 0)
387                 goto error;
388         return 0;
389 error:
390         return -rte_errno;
391 }
392
393 /**
394  * Disable datapath interrupts, keeping other interrupts intact.
395  *
396  * @param priv
397  *   Pointer to private structure.
398  */
399 void
400 mlx4_rxq_intr_disable(struct priv *priv)
401 {
402         int err = rte_errno; /* Make sure rte_errno remains unchanged. */
403
404         mlx4_rx_intr_vec_disable(priv);
405         rte_errno = err;
406 }