4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution
19 * in the file called LICENSE.GPL.
21 * Contact Information:
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/device.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/uio_driver.h>
32 #include <linux/irq.h>
33 #include <linux/msi.h>
34 #include <linux/version.h>
35 #include <linux/slab.h>
37 #include <rte_pci_dev_features.h>
42 * A structure describing the private information for a uio device.
44 struct rte_uio_pci_dev {
47 enum rte_intr_mode mode;
52 static char *intr_mode;
53 static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
56 show_max_vfs(struct device *dev, struct device_attribute *attr,
59 return snprintf(buf, 10, "%u\n", dev_num_vf(dev));
63 store_max_vfs(struct device *dev, struct device_attribute *attr,
64 const char *buf, size_t count)
67 unsigned long max_vfs;
68 struct pci_dev *pdev = to_pci_dev(dev);
70 if (0 != kstrtoul(buf, 0, &max_vfs))
74 pci_disable_sriov(pdev);
75 else if (0 == pci_num_vf(pdev))
76 err = pci_enable_sriov(pdev, max_vfs);
77 else /* do nothing if change max_vfs number */
80 return err ? err : count;
83 static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
85 static struct attribute *dev_attrs[] = {
86 &dev_attr_max_vfs.attr,
90 static const struct attribute_group dev_attr_grp = {
94 #ifndef HAVE_PCI_MSI_MASK_IRQ
96 * It masks the msix on/off of generating MSI-X messages.
99 igbuio_msix_mask_irq(struct msi_desc *desc, s32 state)
101 u32 mask_bits = desc->masked;
102 unsigned int offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
103 PCI_MSIX_ENTRY_VECTOR_CTRL;
106 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
108 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
110 if (mask_bits != desc->masked) {
111 writel(mask_bits, desc->mask_base + offset);
112 readl(desc->mask_base);
113 desc->masked = mask_bits;
118 * It masks the msi on/off of generating MSI messages.
121 igbuio_msi_mask_irq(struct pci_dev *pdev, struct msi_desc *desc, int32_t state)
123 u32 mask_bits = desc->masked;
124 u32 offset = desc->irq - pdev->irq;
125 u32 mask = 1 << offset;
127 if (!desc->msi_attrib.maskbit)
135 if (mask_bits != desc->masked) {
136 pci_write_config_dword(pdev, desc->mask_pos, mask_bits);
137 desc->masked = mask_bits;
142 igbuio_mask_irq(struct pci_dev *pdev, enum rte_intr_mode mode, s32 irq_state)
144 struct msi_desc *desc;
145 struct list_head *msi_list;
147 #ifdef HAVE_MSI_LIST_IN_GENERIC_DEVICE
148 msi_list = &pdev->dev.msi_list;
150 msi_list = &pdev->msi_list;
153 if (mode == RTE_INTR_MODE_MSIX) {
154 list_for_each_entry(desc, msi_list, list)
155 igbuio_msix_mask_irq(desc, irq_state);
156 } else if (mode == RTE_INTR_MODE_MSI) {
157 list_for_each_entry(desc, msi_list, list)
158 igbuio_msi_mask_irq(pdev, desc, irq_state);
164 * This is the irqcontrol callback to be registered to uio_info.
165 * It can be used to disable/enable interrupt from user space processes.
168 * pointer to uio_info.
170 * state value. 1 to enable interrupt, 0 to disable interrupt.
174 * - On failure, a negative value.
177 igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
179 struct rte_uio_pci_dev *udev = info->priv;
180 struct pci_dev *pdev = udev->pdev;
182 #ifdef HAVE_PCI_MSI_MASK_IRQ
183 struct irq_data *irq = irq_get_irq_data(udev->info.irq);
186 pci_cfg_access_lock(pdev);
188 if (udev->mode == RTE_INTR_MODE_MSIX || udev->mode == RTE_INTR_MODE_MSI) {
189 #ifdef HAVE_PCI_MSI_MASK_IRQ
191 pci_msi_unmask_irq(irq);
193 pci_msi_mask_irq(irq);
195 igbuio_mask_irq(pdev, udev->mode, irq_state);
199 if (udev->mode == RTE_INTR_MODE_LEGACY)
200 pci_intx(pdev, !!irq_state);
202 pci_cfg_access_unlock(pdev);
208 * This is interrupt handler which will check if the interrupt is for the right device.
209 * If yes, disable it here and will be enable later.
212 igbuio_pci_irqhandler(int irq, void *dev_id)
214 struct rte_uio_pci_dev *udev = (struct rte_uio_pci_dev *)dev_id;
215 struct uio_info *info = &udev->info;
217 /* Legacy mode need to mask in hardware */
218 if (udev->mode == RTE_INTR_MODE_LEGACY &&
219 !pci_check_and_mask_intx(udev->pdev))
222 uio_event_notify(info);
224 /* Message signal mode, no share IRQ and automasked */
229 igbuio_pci_enable_interrupts(struct rte_uio_pci_dev *udev)
232 #ifndef HAVE_ALLOC_IRQ_VECTORS
233 struct msix_entry msix_entry;
236 switch (igbuio_intr_mode_preferred) {
237 case RTE_INTR_MODE_MSIX:
238 /* Only 1 msi-x vector needed */
239 #ifndef HAVE_ALLOC_IRQ_VECTORS
240 msix_entry.entry = 0;
241 if (pci_enable_msix(udev->pdev, &msix_entry, 1) == 0) {
242 dev_dbg(&udev->pdev->dev, "using MSI-X");
243 udev->info.irq_flags = IRQF_NO_THREAD;
244 udev->info.irq = msix_entry.vector;
245 udev->mode = RTE_INTR_MODE_MSIX;
249 if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSIX) == 1) {
250 dev_dbg(&udev->pdev->dev, "using MSI-X");
251 udev->info.irq_flags = IRQF_NO_THREAD;
252 udev->info.irq = pci_irq_vector(udev->pdev, 0);
253 udev->mode = RTE_INTR_MODE_MSIX;
258 /* fall back to MSI */
259 case RTE_INTR_MODE_MSI:
260 #ifndef HAVE_ALLOC_IRQ_VECTORS
261 if (pci_enable_msi(udev->pdev) == 0) {
262 dev_dbg(&udev->pdev->dev, "using MSI");
263 udev->info.irq_flags = IRQF_NO_THREAD;
264 udev->info.irq = udev->pdev->irq;
265 udev->mode = RTE_INTR_MODE_MSI;
269 if (pci_alloc_irq_vectors(udev->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
270 dev_dbg(&udev->pdev->dev, "using MSI");
271 udev->info.irq_flags = IRQF_NO_THREAD;
272 udev->info.irq = pci_irq_vector(udev->pdev, 0);
273 udev->mode = RTE_INTR_MODE_MSI;
277 /* fall back to INTX */
278 case RTE_INTR_MODE_LEGACY:
279 if (pci_intx_mask_supported(udev->pdev)) {
280 dev_dbg(&udev->pdev->dev, "using INTX");
281 udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD;
282 udev->info.irq = udev->pdev->irq;
283 udev->mode = RTE_INTR_MODE_LEGACY;
286 dev_notice(&udev->pdev->dev, "PCI INTX mask not supported\n");
287 /* fall back to no IRQ */
288 case RTE_INTR_MODE_NONE:
289 udev->mode = RTE_INTR_MODE_NONE;
290 udev->info.irq = UIO_IRQ_NONE;
294 dev_err(&udev->pdev->dev, "invalid IRQ mode %u",
295 igbuio_intr_mode_preferred);
296 udev->info.irq = UIO_IRQ_NONE;
300 if (udev->info.irq != UIO_IRQ_NONE)
301 err = request_irq(udev->info.irq, igbuio_pci_irqhandler,
302 udev->info.irq_flags, udev->info.name,
304 dev_info(&udev->pdev->dev, "uio device registered with irq %lx\n",
311 igbuio_pci_disable_interrupts(struct rte_uio_pci_dev *udev)
313 if (udev->info.irq) {
314 free_irq(udev->info.irq, udev);
318 #ifndef HAVE_ALLOC_IRQ_VECTORS
319 if (udev->mode == RTE_INTR_MODE_MSIX)
320 pci_disable_msix(udev->pdev);
321 if (udev->mode == RTE_INTR_MODE_MSI)
322 pci_disable_msi(udev->pdev);
324 if (udev->mode == RTE_INTR_MODE_MSIX ||
325 udev->mode == RTE_INTR_MODE_MSI)
326 pci_free_irq_vectors(udev->pdev);
332 * This gets called while opening uio device file.
335 igbuio_pci_open(struct uio_info *info, struct inode *inode)
337 struct rte_uio_pci_dev *udev = info->priv;
338 struct pci_dev *dev = udev->pdev;
341 mutex_lock(&udev->lock);
342 if (++udev->refcnt > 1) {
343 mutex_unlock(&udev->lock);
347 /* set bus master, which was cleared by the reset function */
350 /* enable interrupts */
351 err = igbuio_pci_enable_interrupts(udev);
352 mutex_unlock(&udev->lock);
354 dev_err(&dev->dev, "Enable interrupt fails\n");
361 igbuio_pci_release(struct uio_info *info, struct inode *inode)
363 struct rte_uio_pci_dev *udev = info->priv;
364 struct pci_dev *dev = udev->pdev;
366 mutex_lock(&udev->lock);
367 if (--udev->refcnt > 0) {
368 mutex_unlock(&udev->lock);
372 /* disable interrupts */
373 igbuio_pci_disable_interrupts(udev);
375 /* stop the device from further DMA */
376 pci_clear_master(dev);
378 mutex_unlock(&udev->lock);
382 /* Remap pci resources described by bar #pci_bar in uio resource n. */
384 igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
385 int n, int pci_bar, const char *name)
387 unsigned long addr, len;
390 if (n >= ARRAY_SIZE(info->mem))
393 addr = pci_resource_start(dev, pci_bar);
394 len = pci_resource_len(dev, pci_bar);
395 if (addr == 0 || len == 0)
397 internal_addr = ioremap(addr, len);
398 if (internal_addr == NULL)
400 info->mem[n].name = name;
401 info->mem[n].addr = addr;
402 info->mem[n].internal_addr = internal_addr;
403 info->mem[n].size = len;
404 info->mem[n].memtype = UIO_MEM_PHYS;
408 /* Get pci port io resources described by bar #pci_bar in uio resource n. */
410 igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
411 int n, int pci_bar, const char *name)
413 unsigned long addr, len;
415 if (n >= ARRAY_SIZE(info->port))
418 addr = pci_resource_start(dev, pci_bar);
419 len = pci_resource_len(dev, pci_bar);
420 if (addr == 0 || len == 0)
423 info->port[n].name = name;
424 info->port[n].start = addr;
425 info->port[n].size = len;
426 info->port[n].porttype = UIO_PORT_X86;
431 /* Unmap previously ioremap'd resources */
433 igbuio_pci_release_iomem(struct uio_info *info)
437 for (i = 0; i < MAX_UIO_MAPS; i++) {
438 if (info->mem[i].internal_addr)
439 iounmap(info->mem[i].internal_addr);
444 igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
446 int i, iom, iop, ret;
448 static const char *bar_names[PCI_STD_RESOURCE_END + 1] = {
460 for (i = 0; i < ARRAY_SIZE(bar_names); i++) {
461 if (pci_resource_len(dev, i) != 0 &&
462 pci_resource_start(dev, i) != 0) {
463 flags = pci_resource_flags(dev, i);
464 if (flags & IORESOURCE_MEM) {
465 ret = igbuio_pci_setup_iomem(dev, info, iom,
470 } else if (flags & IORESOURCE_IO) {
471 ret = igbuio_pci_setup_ioport(dev, info, iop,
480 return (iom != 0 || iop != 0) ? ret : -ENOENT;
483 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
488 igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
490 struct rte_uio_pci_dev *udev;
491 dma_addr_t map_dma_addr;
495 udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
499 mutex_init(&udev->lock);
501 * enable device: ask low-level code to enable I/O and
504 err = pci_enable_device(dev);
506 dev_err(&dev->dev, "Cannot enable PCI device\n");
510 /* enable bus mastering on the device */
513 /* remap IO memory */
514 err = igbuio_setup_bars(dev, &udev->info);
516 goto fail_release_iomem;
518 /* set 64-bit DMA mask */
519 err = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
521 dev_err(&dev->dev, "Cannot set DMA mask\n");
522 goto fail_release_iomem;
525 err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
527 dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
528 goto fail_release_iomem;
532 udev->info.name = "igb_uio";
533 udev->info.version = "0.1";
534 udev->info.irqcontrol = igbuio_pci_irqcontrol;
535 udev->info.open = igbuio_pci_open;
536 udev->info.release = igbuio_pci_release;
537 udev->info.priv = udev;
540 err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
542 goto fail_release_iomem;
544 /* register uio driver */
545 err = uio_register_device(&dev->dev, &udev->info);
547 goto fail_remove_group;
549 pci_set_drvdata(dev, udev);
552 * Doing a harmless dma mapping for attaching the device to
553 * the iommu identity mapping if kernel boots with iommu=pt.
554 * Note this is not a problem if no IOMMU at all.
556 map_addr = dma_alloc_coherent(&dev->dev, 1024, &map_dma_addr,
559 memset(map_addr, 0, 1024);
562 dev_info(&dev->dev, "dma mapping failed\n");
564 dev_info(&dev->dev, "mapping 1K dma=%#llx host=%p\n",
565 (unsigned long long)map_dma_addr, map_addr);
567 dma_free_coherent(&dev->dev, 1024, map_addr, map_dma_addr);
568 dev_info(&dev->dev, "unmapping 1K dma=%#llx host=%p\n",
569 (unsigned long long)map_dma_addr, map_addr);
575 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
577 igbuio_pci_release_iomem(&udev->info);
578 pci_disable_device(dev);
586 igbuio_pci_remove(struct pci_dev *dev)
588 struct rte_uio_pci_dev *udev = pci_get_drvdata(dev);
590 mutex_destroy(&udev->lock);
591 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
592 uio_unregister_device(&udev->info);
593 igbuio_pci_release_iomem(&udev->info);
594 pci_disable_device(dev);
595 pci_set_drvdata(dev, NULL);
600 igbuio_config_intr_mode(char *intr_str)
603 pr_info("Use MSIX interrupt by default\n");
607 if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
608 igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
609 pr_info("Use MSIX interrupt\n");
610 } else if (!strcmp(intr_str, RTE_INTR_MODE_MSI_NAME)) {
611 igbuio_intr_mode_preferred = RTE_INTR_MODE_MSI;
612 pr_info("Use MSI interrupt\n");
613 } else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
614 igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
615 pr_info("Use legacy interrupt\n");
617 pr_info("Error: bad parameter - %s\n", intr_str);
624 static struct pci_driver igbuio_pci_driver = {
627 .probe = igbuio_pci_probe,
628 .remove = igbuio_pci_remove,
632 igbuio_pci_init_module(void)
636 ret = igbuio_config_intr_mode(intr_mode);
640 return pci_register_driver(&igbuio_pci_driver);
644 igbuio_pci_exit_module(void)
646 pci_unregister_driver(&igbuio_pci_driver);
649 module_init(igbuio_pci_init_module);
650 module_exit(igbuio_pci_exit_module);
652 module_param(intr_mode, charp, S_IRUGO);
653 MODULE_PARM_DESC(intr_mode,
654 "igb_uio interrupt mode (default=msix):\n"
655 " " RTE_INTR_MODE_MSIX_NAME " Use MSIX interrupt\n"
656 " " RTE_INTR_MODE_MSI_NAME " Use MSI interrupt\n"
657 " " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n"
660 MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
661 MODULE_LICENSE("GPL");
662 MODULE_AUTHOR("Intel Corporation");