New upstream version 18.11.2
[deb_dpdk.git] / drivers / net / netvsc / hn_vf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2018 Microsoft Corp.
3  * All rights reserved.
4  */
5
6 #include <stdio.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <errno.h>
11 #include <unistd.h>
12 #include <dirent.h>
13 #include <fcntl.h>
14 #include <sys/types.h>
15 #include <sys/uio.h>
16
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_lcore.h>
21 #include <rte_memory.h>
22 #include <rte_bus_vmbus.h>
23 #include <rte_pci.h>
24 #include <rte_bus_pci.h>
25 #include <rte_log.h>
26 #include <rte_string_fns.h>
27
28 #include "hn_logs.h"
29 #include "hn_var.h"
30 #include "hn_nvs.h"
31
32 /* Search for VF with matching MAC address, return port id */
33 static int hn_vf_match(const struct rte_eth_dev *dev)
34 {
35         const struct ether_addr *mac = dev->data->mac_addrs;
36         char buf[32];
37         int i;
38
39         ether_format_addr(buf, sizeof(buf), mac);
40         RTE_ETH_FOREACH_DEV(i) {
41                 const struct rte_eth_dev *vf_dev = &rte_eth_devices[i];
42                 const struct ether_addr *vf_mac = vf_dev->data->mac_addrs;
43
44                 if (vf_dev == dev)
45                         continue;
46
47                 ether_format_addr(buf, sizeof(buf), vf_mac);
48                 if (is_same_ether_addr(mac, vf_mac))
49                         return i;
50         }
51         return -ENOENT;
52 }
53
54
55 /*
56  * Attach new PCI VF device and return the port_id
57  */
58 static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
59 {
60         struct rte_eth_dev_owner owner = { .id = RTE_ETH_DEV_NO_OWNER };
61         int ret;
62
63         if (hn_vf_attached(hv)) {
64                 PMD_DRV_LOG(ERR, "VF already attached");
65                 return -EEXIST;
66         }
67
68         ret = rte_eth_dev_owner_get(port_id, &owner);
69         if (ret < 0) {
70                 PMD_DRV_LOG(ERR, "Can not find owner for port %d", port_id);
71                 return ret;
72         }
73
74         if (owner.id != RTE_ETH_DEV_NO_OWNER) {
75                 PMD_DRV_LOG(ERR, "Port %u already owned by other device %s",
76                             port_id, owner.name);
77                 return -EBUSY;
78         }
79
80         ret = rte_eth_dev_owner_set(port_id, &hv->owner);
81         if (ret < 0) {
82                 PMD_DRV_LOG(ERR, "Can set owner for port %d", port_id);
83                 return ret;
84         }
85
86         PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
87         hv->vf_port = port_id;
88         rte_smp_wmb();
89
90         return 0;
91 }
92
93 /* Add new VF device to synthetic device */
94 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
95 {
96         int port, err;
97
98         port = hn_vf_match(dev);
99         if (port < 0) {
100                 PMD_DRV_LOG(NOTICE, "No matching MAC found");
101                 return port;
102         }
103
104         rte_spinlock_lock(&hv->vf_lock);
105         err = hn_vf_attach(hv, port);
106
107         if (err == 0) {
108                 dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
109                 hv->vf_intr = (struct rte_intr_handle) {
110                         .fd = -1,
111                         .type = RTE_INTR_HANDLE_EXT,
112                 };
113                 dev->intr_handle = &hv->vf_intr;
114                 hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
115         }
116         rte_spinlock_unlock(&hv->vf_lock);
117
118         return err;
119 }
120
121 /* Remove new VF device */
122 static void hn_vf_remove(struct hn_data *hv)
123 {
124
125         rte_spinlock_lock(&hv->vf_lock);
126
127         if (!hn_vf_attached(hv)) {
128                 PMD_DRV_LOG(ERR, "VF path not active");
129         } else {
130                 /* Stop incoming packets from arriving on VF */
131                 hn_nvs_set_datapath(hv, NVS_DATAPATH_SYNTHETIC);
132
133                 /* Stop transmission over VF */
134                 hv->vf_port = HN_INVALID_PORT;
135                 rte_smp_wmb();
136
137                 /* Give back ownership */
138                 rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
139         }
140         rte_spinlock_unlock(&hv->vf_lock);
141 }
142
143 /* Handle VF association message from host */
144 void
145 hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
146                       const struct vmbus_chanpkt_hdr *hdr,
147                       const void *data)
148 {
149         struct hn_data *hv = dev->data->dev_private;
150         const struct hn_nvs_vf_association *vf_assoc = data;
151
152         if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*vf_assoc))) {
153                 PMD_DRV_LOG(ERR, "invalid vf association NVS");
154                 return;
155         }
156
157         PMD_DRV_LOG(DEBUG, "VF serial %u %s port %u",
158                     vf_assoc->serial,
159                     vf_assoc->allocated ? "add to" : "remove from",
160                     dev->data->port_id);
161
162         hv->vf_present = vf_assoc->allocated;
163
164         if (dev->state != RTE_ETH_DEV_ATTACHED)
165                 return;
166
167         if (vf_assoc->allocated)
168                 hn_vf_add(dev, hv);
169         else
170                 hn_vf_remove(hv);
171 }
172
173 /*
174  * Merge the info from the VF and synthetic path.
175  * use the default config of the VF
176  * and the minimum number of queues and buffer sizes.
177  */
178 static void hn_vf_info_merge(struct rte_eth_dev *vf_dev,
179                              struct rte_eth_dev_info *info)
180 {
181         struct rte_eth_dev_info vf_info;
182
183         rte_eth_dev_info_get(vf_dev->data->port_id, &vf_info);
184
185         info->speed_capa = vf_info.speed_capa;
186         info->default_rxportconf = vf_info.default_rxportconf;
187         info->default_txportconf = vf_info.default_txportconf;
188
189         info->max_rx_queues = RTE_MIN(vf_info.max_rx_queues,
190                                       info->max_rx_queues);
191         info->rx_offload_capa &= vf_info.rx_offload_capa;
192         info->rx_queue_offload_capa &= vf_info.rx_queue_offload_capa;
193         info->flow_type_rss_offloads &= vf_info.flow_type_rss_offloads;
194
195         info->max_tx_queues = RTE_MIN(vf_info.max_tx_queues,
196                                       info->max_tx_queues);
197         info->tx_offload_capa &= vf_info.tx_offload_capa;
198         info->tx_queue_offload_capa &= vf_info.tx_queue_offload_capa;
199
200         info->min_rx_bufsize = RTE_MAX(vf_info.min_rx_bufsize,
201                                        info->min_rx_bufsize);
202         info->max_rx_pktlen  = RTE_MAX(vf_info.max_rx_pktlen,
203                                        info->max_rx_pktlen);
204 }
205
206 void hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
207 {
208         struct rte_eth_dev *vf_dev;
209
210         rte_spinlock_lock(&hv->vf_lock);
211         vf_dev = hn_get_vf_dev(hv);
212         if (vf_dev)
213                 hn_vf_info_merge(vf_dev, info);
214         rte_spinlock_unlock(&hv->vf_lock);
215 }
216
217 int hn_vf_link_update(struct rte_eth_dev *dev,
218                       int wait_to_complete)
219 {
220         struct hn_data *hv = dev->data->dev_private;
221         struct rte_eth_dev *vf_dev;
222         int ret = 0;
223
224         rte_spinlock_lock(&hv->vf_lock);
225         vf_dev = hn_get_vf_dev(hv);
226         if (vf_dev && vf_dev->dev_ops->link_update)
227                 ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
228         rte_spinlock_unlock(&hv->vf_lock);
229
230         return ret;
231 }
232
233 /* called when VF has link state interrupts enabled */
234 static int hn_vf_lsc_event(uint16_t port_id __rte_unused,
235                            enum rte_eth_event_type event,
236                            void *cb_arg, void *out __rte_unused)
237 {
238         struct rte_eth_dev *dev = cb_arg;
239
240         if (event != RTE_ETH_EVENT_INTR_LSC)
241                 return 0;
242
243         /* if link state has changed pass on */
244         if (hn_dev_link_update(dev, 0) == 0)
245                 return 0; /* no change */
246
247         return _rte_eth_dev_callback_process(dev,
248                                              RTE_ETH_EVENT_INTR_LSC,
249                                              NULL);
250 }
251
252 static int _hn_vf_configure(struct rte_eth_dev *dev,
253                             uint16_t vf_port,
254                             const struct rte_eth_conf *dev_conf)
255 {
256         struct rte_eth_conf vf_conf = *dev_conf;
257         struct rte_eth_dev *vf_dev;
258         int ret;
259
260         vf_dev = &rte_eth_devices[vf_port];
261         if (dev_conf->intr_conf.lsc &&
262             (vf_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
263                 PMD_DRV_LOG(DEBUG, "enabling LSC for VF %u",
264                             vf_port);
265                 vf_conf.intr_conf.lsc = 1;
266         } else {
267                 PMD_DRV_LOG(DEBUG, "disabling LSC for VF %u",
268                             vf_port);
269                 vf_conf.intr_conf.lsc = 0;
270         }
271
272         ret = rte_eth_dev_configure(vf_port,
273                                     dev->data->nb_rx_queues,
274                                     dev->data->nb_tx_queues,
275                                     &vf_conf);
276         if (ret) {
277                 PMD_DRV_LOG(ERR,
278                             "VF configuration failed: %d", ret);
279         } else if (vf_conf.intr_conf.lsc) {
280                 ret = rte_eth_dev_callback_register(vf_port,
281                                                     RTE_ETH_DEV_INTR_LSC,
282                                                     hn_vf_lsc_event, dev);
283                 if (ret)
284                         PMD_DRV_LOG(ERR,
285                                     "Failed to register LSC callback for VF %u",
286                                     vf_port);
287         }
288         return ret;
289 }
290
291 /*
292  * Configure VF if present.
293  * Force VF to have same number of queues as synthetic device
294  */
295 int hn_vf_configure(struct rte_eth_dev *dev,
296                     const struct rte_eth_conf *dev_conf)
297 {
298         struct hn_data *hv = dev->data->dev_private;
299         int ret = 0;
300
301         rte_spinlock_lock(&hv->vf_lock);
302         if (hv->vf_port != HN_INVALID_PORT)
303                 ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
304         rte_spinlock_unlock(&hv->vf_lock);
305         return ret;
306 }
307
308 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
309 {
310         struct hn_data *hv = dev->data->dev_private;
311         struct rte_eth_dev *vf_dev;
312         const uint32_t *ptypes = NULL;
313
314         rte_spinlock_lock(&hv->vf_lock);
315         vf_dev = hn_get_vf_dev(hv);
316         if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
317                 ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
318         rte_spinlock_unlock(&hv->vf_lock);
319
320         return ptypes;
321 }
322
323 int hn_vf_start(struct rte_eth_dev *dev)
324 {
325         struct hn_data *hv = dev->data->dev_private;
326         struct rte_eth_dev *vf_dev;
327         int ret = 0;
328
329         rte_spinlock_lock(&hv->vf_lock);
330         vf_dev = hn_get_vf_dev(hv);
331         if (vf_dev)
332                 ret = rte_eth_dev_start(vf_dev->data->port_id);
333         rte_spinlock_unlock(&hv->vf_lock);
334         return ret;
335 }
336
337 void hn_vf_stop(struct rte_eth_dev *dev)
338 {
339         struct hn_data *hv = dev->data->dev_private;
340         struct rte_eth_dev *vf_dev;
341
342         rte_spinlock_lock(&hv->vf_lock);
343         vf_dev = hn_get_vf_dev(hv);
344         if (vf_dev)
345                 rte_eth_dev_stop(vf_dev->data->port_id);
346         rte_spinlock_unlock(&hv->vf_lock);
347 }
348
349 /* If VF is present, then cascade configuration down */
350 #define VF_ETHDEV_FUNC(dev, func)                               \
351         {                                                       \
352                 struct hn_data *hv = (dev)->data->dev_private;  \
353                 struct rte_eth_dev *vf_dev;                     \
354                 rte_spinlock_lock(&hv->vf_lock);                \
355                 vf_dev = hn_get_vf_dev(hv);                     \
356                 if (vf_dev)                                     \
357                         func(vf_dev->data->port_id);            \
358                 rte_spinlock_unlock(&hv->vf_lock);              \
359         }
360
361 void hn_vf_reset(struct rte_eth_dev *dev)
362 {
363         VF_ETHDEV_FUNC(dev, rte_eth_dev_reset);
364 }
365
366 void hn_vf_close(struct rte_eth_dev *dev)
367 {
368         VF_ETHDEV_FUNC(dev, rte_eth_dev_close);
369 }
370
371 void hn_vf_stats_reset(struct rte_eth_dev *dev)
372 {
373         VF_ETHDEV_FUNC(dev, rte_eth_stats_reset);
374 }
375
376 void hn_vf_allmulticast_enable(struct rte_eth_dev *dev)
377 {
378         VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_enable);
379 }
380
381 void hn_vf_allmulticast_disable(struct rte_eth_dev *dev)
382 {
383         VF_ETHDEV_FUNC(dev, rte_eth_allmulticast_disable);
384 }
385
386 void hn_vf_promiscuous_enable(struct rte_eth_dev *dev)
387 {
388         VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_enable);
389 }
390
391 void hn_vf_promiscuous_disable(struct rte_eth_dev *dev)
392 {
393         VF_ETHDEV_FUNC(dev, rte_eth_promiscuous_disable);
394 }
395
396 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
397                         struct ether_addr *mc_addr_set,
398                         uint32_t nb_mc_addr)
399 {
400         struct hn_data *hv = dev->data->dev_private;
401         struct rte_eth_dev *vf_dev;
402         int ret = 0;
403
404         rte_spinlock_lock(&hv->vf_lock);
405         vf_dev = hn_get_vf_dev(hv);
406         if (vf_dev)
407                 ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
408                                                    mc_addr_set, nb_mc_addr);
409         rte_spinlock_unlock(&hv->vf_lock);
410         return ret;
411 }
412
413 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
414                          uint16_t queue_idx, uint16_t nb_desc,
415                          unsigned int socket_id,
416                          const struct rte_eth_txconf *tx_conf)
417 {
418         struct hn_data *hv = dev->data->dev_private;
419         struct rte_eth_dev *vf_dev;
420         int ret = 0;
421
422         rte_spinlock_lock(&hv->vf_lock);
423         vf_dev = hn_get_vf_dev(hv);
424         if (vf_dev)
425                 ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
426                                              queue_idx, nb_desc,
427                                              socket_id, tx_conf);
428         rte_spinlock_unlock(&hv->vf_lock);
429         return ret;
430 }
431
432 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
433 {
434         struct rte_eth_dev *vf_dev;
435
436         rte_spinlock_lock(&hv->vf_lock);
437         vf_dev = hn_get_vf_dev(hv);
438         if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
439                 void *subq = vf_dev->data->tx_queues[queue_id];
440
441                 (*vf_dev->dev_ops->tx_queue_release)(subq);
442         }
443
444         rte_spinlock_unlock(&hv->vf_lock);
445 }
446
447 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
448                          uint16_t queue_idx, uint16_t nb_desc,
449                          unsigned int socket_id,
450                          const struct rte_eth_rxconf *rx_conf,
451                          struct rte_mempool *mp)
452 {
453         struct hn_data *hv = dev->data->dev_private;
454         struct rte_eth_dev *vf_dev;
455         int ret = 0;
456
457         rte_spinlock_lock(&hv->vf_lock);
458         vf_dev = hn_get_vf_dev(hv);
459         if (vf_dev)
460                 ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
461                                              queue_idx, nb_desc,
462                                              socket_id, rx_conf, mp);
463         rte_spinlock_unlock(&hv->vf_lock);
464         return ret;
465 }
466
467 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
468 {
469         struct rte_eth_dev *vf_dev;
470
471         rte_spinlock_lock(&hv->vf_lock);
472         vf_dev = hn_get_vf_dev(hv);
473         if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
474                 void *subq = vf_dev->data->rx_queues[queue_id];
475
476                 (*vf_dev->dev_ops->rx_queue_release)(subq);
477         }
478         rte_spinlock_unlock(&hv->vf_lock);
479 }
480
481 int hn_vf_stats_get(struct rte_eth_dev *dev,
482                     struct rte_eth_stats *stats)
483 {
484         struct hn_data *hv = dev->data->dev_private;
485         struct rte_eth_dev *vf_dev;
486         int ret = 0;
487
488         rte_spinlock_lock(&hv->vf_lock);
489         vf_dev = hn_get_vf_dev(hv);
490         if (vf_dev)
491                 ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
492         rte_spinlock_unlock(&hv->vf_lock);
493         return ret;
494 }
495
496 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
497                            struct rte_eth_xstat_name *names,
498                            unsigned int n)
499 {
500         struct hn_data *hv = dev->data->dev_private;
501         struct rte_eth_dev *vf_dev;
502         int i, count = 0;
503         char tmp[RTE_ETH_XSTATS_NAME_SIZE];
504
505         rte_spinlock_lock(&hv->vf_lock);
506         vf_dev = hn_get_vf_dev(hv);
507         if (vf_dev && vf_dev->dev_ops->xstats_get_names)
508                 count = vf_dev->dev_ops->xstats_get_names(vf_dev, names, n);
509         rte_spinlock_unlock(&hv->vf_lock);
510
511         /* add vf_ prefix to xstat names */
512         if (names) {
513                 for (i = 0; i < count; i++) {
514                         snprintf(tmp, sizeof(tmp), "vf_%s", names[i].name);
515                         strlcpy(names[i].name, tmp, sizeof(names[i].name));
516                 }
517         }
518
519         return count;
520 }
521
522 int hn_vf_xstats_get(struct rte_eth_dev *dev,
523                      struct rte_eth_xstat *xstats,
524                      unsigned int n)
525 {
526         struct hn_data *hv = dev->data->dev_private;
527         struct rte_eth_dev *vf_dev;
528         int count = 0;
529
530         rte_spinlock_lock(&hv->vf_lock);
531         vf_dev = hn_get_vf_dev(hv);
532         if (vf_dev && vf_dev->dev_ops->xstats_get)
533                 count = vf_dev->dev_ops->xstats_get(vf_dev, xstats, n);
534         rte_spinlock_unlock(&hv->vf_lock);
535
536         return count;
537 }
538
539 void hn_vf_xstats_reset(struct rte_eth_dev *dev)
540 {
541         struct hn_data *hv = dev->data->dev_private;
542         struct rte_eth_dev *vf_dev;
543
544         rte_spinlock_lock(&hv->vf_lock);
545         vf_dev = hn_get_vf_dev(hv);
546         if (vf_dev && vf_dev->dev_ops->xstats_reset)
547                 vf_dev->dev_ops->xstats_reset(vf_dev);
548         rte_spinlock_unlock(&hv->vf_lock);
549 }