LCOV - code coverage report
Current view: top level - kernel/irq - msi.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 521 0.0 %
Date: 2023-04-06 08:38:28 Functions: 0 60 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (C) 2014 Intel Corp.
       4             :  * Author: Jiang Liu <jiang.liu@linux.intel.com>
       5             :  *
       6             :  * This file is licensed under GPLv2.
       7             :  *
       8             :  * This file contains common code to support Message Signaled Interrupts for
       9             :  * PCI compatible and non PCI compatible devices.
      10             :  */
      11             : #include <linux/types.h>
      12             : #include <linux/device.h>
      13             : #include <linux/irq.h>
      14             : #include <linux/irqdomain.h>
      15             : #include <linux/msi.h>
      16             : #include <linux/slab.h>
      17             : #include <linux/sysfs.h>
      18             : #include <linux/pci.h>
      19             : 
      20             : #include "internals.h"
      21             : 
      22             : /**
      23             :  * struct msi_ctrl - MSI internal management control structure
      24             :  * @domid:      ID of the domain on which management operations should be done
      25             :  * @first:      First (hardware) slot index to operate on
      26             :  * @last:       Last (hardware) slot index to operate on
      27             :  * @nirqs:      The number of Linux interrupts to allocate. Can be larger
      28             :  *              than the range due to PCI/multi-MSI.
      29             :  */
      30             : struct msi_ctrl {
      31             :         unsigned int                    domid;
      32             :         unsigned int                    first;
      33             :         unsigned int                    last;
      34             :         unsigned int                    nirqs;
      35             : };
      36             : 
      37             : /* Invalid Xarray index which is outside of any searchable range */
      38             : #define MSI_XA_MAX_INDEX        (ULONG_MAX - 1)
      39             : /* The maximum domain size */
      40             : #define MSI_XA_DOMAIN_SIZE      (MSI_MAX_INDEX + 1)
      41             : 
      42             : static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl);
      43             : static unsigned int msi_domain_get_hwsize(struct device *dev, unsigned int domid);
      44             : static inline int msi_sysfs_create_group(struct device *dev);
      45             : 
      46             : 
      47             : /**
      48             :  * msi_alloc_desc - Allocate an initialized msi_desc
      49             :  * @dev:        Pointer to the device for which this is allocated
      50             :  * @nvec:       The number of vectors used in this entry
      51             :  * @affinity:   Optional pointer to an affinity mask array size of @nvec
      52             :  *
      53             :  * If @affinity is not %NULL then an affinity array[@nvec] is allocated
      54             :  * and the affinity masks and flags from @affinity are copied.
      55             :  *
      56             :  * Return: pointer to allocated &msi_desc on success or %NULL on failure
      57             :  */
      58           0 : static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
      59             :                                        const struct irq_affinity_desc *affinity)
      60             : {
      61           0 :         struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
      62             : 
      63           0 :         if (!desc)
      64             :                 return NULL;
      65             : 
      66           0 :         desc->dev = dev;
      67           0 :         desc->nvec_used = nvec;
      68           0 :         if (affinity) {
      69           0 :                 desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
      70           0 :                 if (!desc->affinity) {
      71           0 :                         kfree(desc);
      72           0 :                         return NULL;
      73             :                 }
      74             :         }
      75             :         return desc;
      76             : }
      77             : 
      78             : static void msi_free_desc(struct msi_desc *desc)
      79             : {
      80           0 :         kfree(desc->affinity);
      81           0 :         kfree(desc);
      82             : }
      83             : 
      84           0 : static int msi_insert_desc(struct device *dev, struct msi_desc *desc,
      85             :                            unsigned int domid, unsigned int index)
      86             : {
      87           0 :         struct msi_device_data *md = dev->msi.data;
      88           0 :         struct xarray *xa = &md->__domains[domid].store;
      89             :         unsigned int hwsize;
      90             :         int ret;
      91             : 
      92           0 :         hwsize = msi_domain_get_hwsize(dev, domid);
      93             : 
      94           0 :         if (index == MSI_ANY_INDEX) {
      95           0 :                 struct xa_limit limit = { .min = 0, .max = hwsize - 1 };
      96             :                 unsigned int index;
      97             : 
      98             :                 /* Let the xarray allocate a free index within the limit */
      99           0 :                 ret = xa_alloc(xa, &index, desc, limit, GFP_KERNEL);
     100           0 :                 if (ret)
     101             :                         goto fail;
     102             : 
     103           0 :                 desc->msi_index = index;
     104           0 :                 return 0;
     105             :         } else {
     106           0 :                 if (index >= hwsize) {
     107             :                         ret = -ERANGE;
     108             :                         goto fail;
     109             :                 }
     110             : 
     111           0 :                 desc->msi_index = index;
     112           0 :                 ret = xa_insert(xa, index, desc, GFP_KERNEL);
     113           0 :                 if (ret)
     114             :                         goto fail;
     115             :                 return 0;
     116             :         }
     117             : fail:
     118           0 :         msi_free_desc(desc);
     119           0 :         return ret;
     120             : }
     121             : 
     122             : /**
     123             :  * msi_domain_insert_msi_desc - Allocate and initialize a MSI descriptor and
     124             :  *                              insert it at @init_desc->msi_index
     125             :  *
     126             :  * @dev:        Pointer to the device for which the descriptor is allocated
     127             :  * @domid:      The id of the interrupt domain to which the desriptor is added
     128             :  * @init_desc:  Pointer to an MSI descriptor to initialize the new descriptor
     129             :  *
     130             :  * Return: 0 on success or an appropriate failure code.
     131             :  */
     132           0 : int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
     133             :                                struct msi_desc *init_desc)
     134             : {
     135             :         struct msi_desc *desc;
     136             : 
     137             :         lockdep_assert_held(&dev->msi.data->mutex);
     138             : 
     139           0 :         desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity);
     140           0 :         if (!desc)
     141             :                 return -ENOMEM;
     142             : 
     143             :         /* Copy type specific data to the new descriptor. */
     144           0 :         desc->pci = init_desc->pci;
     145             : 
     146           0 :         return msi_insert_desc(dev, desc, domid, init_desc->msi_index);
     147             : }
     148             : 
     149           0 : static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
     150             : {
     151           0 :         switch (filter) {
     152             :         case MSI_DESC_ALL:
     153             :                 return true;
     154             :         case MSI_DESC_NOTASSOCIATED:
     155           0 :                 return !desc->irq;
     156             :         case MSI_DESC_ASSOCIATED:
     157           0 :                 return !!desc->irq;
     158             :         }
     159           0 :         WARN_ON_ONCE(1);
     160             :         return false;
     161             : }
     162             : 
     163           0 : static bool msi_ctrl_valid(struct device *dev, struct msi_ctrl *ctrl)
     164             : {
     165             :         unsigned int hwsize;
     166             : 
     167           0 :         if (WARN_ON_ONCE(ctrl->domid >= MSI_MAX_DEVICE_IRQDOMAINS ||
     168             :                          (dev->msi.domain &&
     169             :                           !dev->msi.data->__domains[ctrl->domid].domain)))
     170             :                 return false;
     171             : 
     172           0 :         hwsize = msi_domain_get_hwsize(dev, ctrl->domid);
     173           0 :         if (WARN_ON_ONCE(ctrl->first > ctrl->last ||
     174             :                          ctrl->first >= hwsize ||
     175             :                          ctrl->last >= hwsize))
     176             :                 return false;
     177           0 :         return true;
     178             : }
     179             : 
     180           0 : static void msi_domain_free_descs(struct device *dev, struct msi_ctrl *ctrl)
     181             : {
     182             :         struct msi_desc *desc;
     183             :         struct xarray *xa;
     184             :         unsigned long idx;
     185             : 
     186             :         lockdep_assert_held(&dev->msi.data->mutex);
     187             : 
     188           0 :         if (!msi_ctrl_valid(dev, ctrl))
     189           0 :                 return;
     190             : 
     191           0 :         xa = &dev->msi.data->__domains[ctrl->domid].store;
     192           0 :         xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
     193           0 :                 xa_erase(xa, idx);
     194             : 
     195             :                 /* Leak the descriptor when it is still referenced */
     196           0 :                 if (WARN_ON_ONCE(msi_desc_match(desc, MSI_DESC_ASSOCIATED)))
     197           0 :                         continue;
     198             :                 msi_free_desc(desc);
     199             :         }
     200             : }
     201             : 
     202             : /**
     203             :  * msi_domain_free_msi_descs_range - Free a range of MSI descriptors of a device in an irqdomain
     204             :  * @dev:        Device for which to free the descriptors
     205             :  * @domid:      Id of the domain to operate on
     206             :  * @first:      Index to start freeing from (inclusive)
     207             :  * @last:       Last index to be freed (inclusive)
     208             :  */
     209           0 : void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid,
     210             :                                      unsigned int first, unsigned int last)
     211             : {
     212           0 :         struct msi_ctrl ctrl = {
     213             :                 .domid  = domid,
     214             :                 .first  = first,
     215             :                 .last   = last,
     216             :         };
     217             : 
     218           0 :         msi_domain_free_descs(dev, &ctrl);
     219           0 : }
     220             : 
     221             : /**
     222             :  * msi_domain_add_simple_msi_descs - Allocate and initialize MSI descriptors
     223             :  * @dev:        Pointer to the device for which the descriptors are allocated
     224             :  * @ctrl:       Allocation control struct
     225             :  *
     226             :  * Return: 0 on success or an appropriate failure code.
     227             :  */
     228           0 : static int msi_domain_add_simple_msi_descs(struct device *dev, struct msi_ctrl *ctrl)
     229             : {
     230             :         struct msi_desc *desc;
     231             :         unsigned int idx;
     232             :         int ret;
     233             : 
     234             :         lockdep_assert_held(&dev->msi.data->mutex);
     235             : 
     236           0 :         if (!msi_ctrl_valid(dev, ctrl))
     237             :                 return -EINVAL;
     238             : 
     239           0 :         for (idx = ctrl->first; idx <= ctrl->last; idx++) {
     240           0 :                 desc = msi_alloc_desc(dev, 1, NULL);
     241           0 :                 if (!desc)
     242             :                         goto fail_mem;
     243           0 :                 ret = msi_insert_desc(dev, desc, ctrl->domid, idx);
     244           0 :                 if (ret)
     245             :                         goto fail;
     246             :         }
     247             :         return 0;
     248             : 
     249             : fail_mem:
     250             :         ret = -ENOMEM;
     251             : fail:
     252           0 :         msi_domain_free_descs(dev, ctrl);
     253           0 :         return ret;
     254             : }
     255             : 
     256           0 : void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
     257             : {
     258           0 :         *msg = entry->msg;
     259           0 : }
     260             : 
     261           0 : void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
     262             : {
     263           0 :         struct msi_desc *entry = irq_get_msi_desc(irq);
     264             : 
     265           0 :         __get_cached_msi_msg(entry, msg);
     266           0 : }
     267             : EXPORT_SYMBOL_GPL(get_cached_msi_msg);
     268             : 
     269           0 : static void msi_device_data_release(struct device *dev, void *res)
     270             : {
     271           0 :         struct msi_device_data *md = res;
     272             :         int i;
     273             : 
     274           0 :         for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++) {
     275           0 :                 msi_remove_device_irq_domain(dev, i);
     276           0 :                 WARN_ON_ONCE(!xa_empty(&md->__domains[i].store));
     277           0 :                 xa_destroy(&md->__domains[i].store);
     278             :         }
     279           0 :         dev->msi.data = NULL;
     280           0 : }
     281             : 
     282             : /**
     283             :  * msi_setup_device_data - Setup MSI device data
     284             :  * @dev:        Device for which MSI device data should be set up
     285             :  *
     286             :  * Return: 0 on success, appropriate error code otherwise
     287             :  *
     288             :  * This can be called more than once for @dev. If the MSI device data is
     289             :  * already allocated the call succeeds. The allocated memory is
     290             :  * automatically released when the device is destroyed.
     291             :  */
     292           0 : int msi_setup_device_data(struct device *dev)
     293             : {
     294             :         struct msi_device_data *md;
     295             :         int ret, i;
     296             : 
     297           0 :         if (dev->msi.data)
     298             :                 return 0;
     299             : 
     300           0 :         md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
     301           0 :         if (!md)
     302             :                 return -ENOMEM;
     303             : 
     304           0 :         ret = msi_sysfs_create_group(dev);
     305           0 :         if (ret) {
     306           0 :                 devres_free(md);
     307           0 :                 return ret;
     308             :         }
     309             : 
     310           0 :         for (i = 0; i < MSI_MAX_DEVICE_IRQDOMAINS; i++)
     311           0 :                 xa_init_flags(&md->__domains[i].store, XA_FLAGS_ALLOC);
     312             : 
     313             :         /*
     314             :          * If @dev::msi::domain is set and is a global MSI domain, copy the
     315             :          * pointer into the domain array so all code can operate on domain
     316             :          * ids. The NULL pointer check is required to keep the legacy
     317             :          * architecture specific PCI/MSI support working.
     318             :          */
     319           0 :         if (dev->msi.domain && !irq_domain_is_msi_parent(dev->msi.domain))
     320           0 :                 md->__domains[MSI_DEFAULT_DOMAIN].domain = dev->msi.domain;
     321             : 
     322           0 :         mutex_init(&md->mutex);
     323           0 :         dev->msi.data = md;
     324           0 :         devres_add(dev, md);
     325           0 :         return 0;
     326             : }
     327             : 
     328             : /**
     329             :  * msi_lock_descs - Lock the MSI descriptor storage of a device
     330             :  * @dev:        Device to operate on
     331             :  */
     332           0 : void msi_lock_descs(struct device *dev)
     333             : {
     334           0 :         mutex_lock(&dev->msi.data->mutex);
     335           0 : }
     336             : EXPORT_SYMBOL_GPL(msi_lock_descs);
     337             : 
     338             : /**
     339             :  * msi_unlock_descs - Unlock the MSI descriptor storage of a device
     340             :  * @dev:        Device to operate on
     341             :  */
     342           0 : void msi_unlock_descs(struct device *dev)
     343             : {
     344             :         /* Invalidate the index which was cached by the iterator */
     345           0 :         dev->msi.data->__iter_idx = MSI_XA_MAX_INDEX;
     346           0 :         mutex_unlock(&dev->msi.data->mutex);
     347           0 : }
     348             : EXPORT_SYMBOL_GPL(msi_unlock_descs);
     349             : 
     350           0 : static struct msi_desc *msi_find_desc(struct msi_device_data *md, unsigned int domid,
     351             :                                       enum msi_desc_filter filter)
     352             : {
     353           0 :         struct xarray *xa = &md->__domains[domid].store;
     354             :         struct msi_desc *desc;
     355             : 
     356           0 :         xa_for_each_start(xa, md->__iter_idx, desc, md->__iter_idx) {
     357           0 :                 if (msi_desc_match(desc, filter))
     358             :                         return desc;
     359             :         }
     360           0 :         md->__iter_idx = MSI_XA_MAX_INDEX;
     361           0 :         return NULL;
     362             : }
     363             : 
     364             : /**
     365             :  * msi_domain_first_desc - Get the first MSI descriptor of an irqdomain associated to a device
     366             :  * @dev:        Device to operate on
     367             :  * @domid:      The id of the interrupt domain which should be walked.
     368             :  * @filter:     Descriptor state filter
     369             :  *
     370             :  * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
     371             :  * must be invoked before the call.
     372             :  *
     373             :  * Return: Pointer to the first MSI descriptor matching the search
     374             :  *         criteria, NULL if none found.
     375             :  */
     376           0 : struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
     377             :                                        enum msi_desc_filter filter)
     378             : {
     379           0 :         struct msi_device_data *md = dev->msi.data;
     380             : 
     381           0 :         if (WARN_ON_ONCE(!md || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
     382             :                 return NULL;
     383             : 
     384             :         lockdep_assert_held(&md->mutex);
     385             : 
     386           0 :         md->__iter_idx = 0;
     387           0 :         return msi_find_desc(md, domid, filter);
     388             : }
     389             : EXPORT_SYMBOL_GPL(msi_domain_first_desc);
     390             : 
     391             : /**
     392             :  * msi_next_desc - Get the next MSI descriptor of a device
     393             :  * @dev:        Device to operate on
     394             :  * @domid:      The id of the interrupt domain which should be walked.
     395             :  * @filter:     Descriptor state filter
     396             :  *
     397             :  * The first invocation of msi_next_desc() has to be preceeded by a
     398             :  * successful invocation of __msi_first_desc(). Consecutive invocations are
     399             :  * only valid if the previous one was successful. All these operations have
     400             :  * to be done within the same MSI mutex held region.
     401             :  *
     402             :  * Return: Pointer to the next MSI descriptor matching the search
     403             :  *         criteria, NULL if none found.
     404             :  */
     405           0 : struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
     406             :                                enum msi_desc_filter filter)
     407             : {
     408           0 :         struct msi_device_data *md = dev->msi.data;
     409             : 
     410           0 :         if (WARN_ON_ONCE(!md || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
     411             :                 return NULL;
     412             : 
     413             :         lockdep_assert_held(&md->mutex);
     414             : 
     415           0 :         if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX)
     416             :                 return NULL;
     417             : 
     418           0 :         md->__iter_idx++;
     419           0 :         return msi_find_desc(md, domid, filter);
     420             : }
     421             : EXPORT_SYMBOL_GPL(msi_next_desc);
     422             : 
     423             : /**
     424             :  * msi_domain_get_virq - Lookup the Linux interrupt number for a MSI index on a interrupt domain
     425             :  * @dev:        Device to operate on
     426             :  * @domid:      Domain ID of the interrupt domain associated to the device
     427             :  * @index:      MSI interrupt index to look for (0-based)
     428             :  *
     429             :  * Return: The Linux interrupt number on success (> 0), 0 if not found
     430             :  */
     431           0 : unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index)
     432             : {
     433             :         struct msi_desc *desc;
     434           0 :         unsigned int ret = 0;
     435           0 :         bool pcimsi = false;
     436             :         struct xarray *xa;
     437             : 
     438           0 :         if (!dev->msi.data)
     439             :                 return 0;
     440             : 
     441           0 :         if (WARN_ON_ONCE(index > MSI_MAX_INDEX || domid >= MSI_MAX_DEVICE_IRQDOMAINS))
     442             :                 return 0;
     443             : 
     444             :         /* This check is only valid for the PCI default MSI domain */
     445           0 :         if (dev_is_pci(dev) && domid == MSI_DEFAULT_DOMAIN)
     446           0 :                 pcimsi = to_pci_dev(dev)->msi_enabled;
     447             : 
     448           0 :         msi_lock_descs(dev);
     449           0 :         xa = &dev->msi.data->__domains[domid].store;
     450           0 :         desc = xa_load(xa, pcimsi ? 0 : index);
     451           0 :         if (desc && desc->irq) {
     452             :                 /*
     453             :                  * PCI-MSI has only one descriptor for multiple interrupts.
     454             :                  * PCI-MSIX and platform MSI use a descriptor per
     455             :                  * interrupt.
     456             :                  */
     457           0 :                 if (pcimsi) {
     458           0 :                         if (index < desc->nvec_used)
     459           0 :                                 ret = desc->irq + index;
     460             :                 } else {
     461             :                         ret = desc->irq;
     462             :                 }
     463             :         }
     464             : 
     465           0 :         msi_unlock_descs(dev);
     466           0 :         return ret;
     467             : }
     468             : EXPORT_SYMBOL_GPL(msi_domain_get_virq);
     469             : 
     470             : #ifdef CONFIG_SYSFS
     471             : static struct attribute *msi_dev_attrs[] = {
     472             :         NULL
     473             : };
     474             : 
     475             : static const struct attribute_group msi_irqs_group = {
     476             :         .name   = "msi_irqs",
     477             :         .attrs  = msi_dev_attrs,
     478             : };
     479             : 
     480             : static inline int msi_sysfs_create_group(struct device *dev)
     481             : {
     482           0 :         return devm_device_add_group(dev, &msi_irqs_group);
     483             : }
     484             : 
     485           0 : static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
     486             :                              char *buf)
     487             : {
     488             :         /* MSI vs. MSIX is per device not per interrupt */
     489           0 :         bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
     490             : 
     491           0 :         return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
     492             : }
     493             : 
     494           0 : static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc)
     495             : {
     496           0 :         struct device_attribute *attrs = desc->sysfs_attrs;
     497             :         int i;
     498             : 
     499           0 :         if (!attrs)
     500             :                 return;
     501             : 
     502           0 :         desc->sysfs_attrs = NULL;
     503           0 :         for (i = 0; i < desc->nvec_used; i++) {
     504           0 :                 if (attrs[i].show)
     505           0 :                         sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
     506           0 :                 kfree(attrs[i].attr.name);
     507             :         }
     508           0 :         kfree(attrs);
     509             : }
     510             : 
     511           0 : static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc)
     512             : {
     513             :         struct device_attribute *attrs;
     514             :         int ret, i;
     515             : 
     516           0 :         attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL);
     517           0 :         if (!attrs)
     518             :                 return -ENOMEM;
     519             : 
     520           0 :         desc->sysfs_attrs = attrs;
     521           0 :         for (i = 0; i < desc->nvec_used; i++) {
     522             :                 sysfs_attr_init(&attrs[i].attr);
     523           0 :                 attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i);
     524           0 :                 if (!attrs[i].attr.name) {
     525             :                         ret = -ENOMEM;
     526             :                         goto fail;
     527             :                 }
     528             : 
     529           0 :                 attrs[i].attr.mode = 0444;
     530           0 :                 attrs[i].show = msi_mode_show;
     531             : 
     532           0 :                 ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
     533           0 :                 if (ret) {
     534           0 :                         attrs[i].show = NULL;
     535           0 :                         goto fail;
     536             :                 }
     537             :         }
     538             :         return 0;
     539             : 
     540             : fail:
     541           0 :         msi_sysfs_remove_desc(dev, desc);
     542           0 :         return ret;
     543             : }
     544             : 
     545             : #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
     546             : /**
     547             :  * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
     548             :  * @dev:        The device (PCI, platform etc) which will get sysfs entries
     549             :  */
     550             : int msi_device_populate_sysfs(struct device *dev)
     551             : {
     552             :         struct msi_desc *desc;
     553             :         int ret;
     554             : 
     555             :         msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
     556             :                 if (desc->sysfs_attrs)
     557             :                         continue;
     558             :                 ret = msi_sysfs_populate_desc(dev, desc);
     559             :                 if (ret)
     560             :                         return ret;
     561             :         }
     562             :         return 0;
     563             : }
     564             : 
     565             : /**
     566             :  * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
     567             :  * @dev:                The device (PCI, platform etc) for which to remove
     568             :  *                      sysfs entries
     569             :  */
     570             : void msi_device_destroy_sysfs(struct device *dev)
     571             : {
     572             :         struct msi_desc *desc;
     573             : 
     574             :         msi_for_each_desc(desc, dev, MSI_DESC_ALL)
     575             :                 msi_sysfs_remove_desc(dev, desc);
     576             : }
     577             : #endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
     578             : #else /* CONFIG_SYSFS */
     579             : static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
     580             : static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
     581             : static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { }
     582             : #endif /* !CONFIG_SYSFS */
     583             : 
     584           0 : static struct irq_domain *msi_get_device_domain(struct device *dev, unsigned int domid)
     585             : {
     586             :         struct irq_domain *domain;
     587             : 
     588             :         lockdep_assert_held(&dev->msi.data->mutex);
     589             : 
     590           0 :         if (WARN_ON_ONCE(domid >= MSI_MAX_DEVICE_IRQDOMAINS))
     591             :                 return NULL;
     592             : 
     593           0 :         domain = dev->msi.data->__domains[domid].domain;
     594           0 :         if (!domain)
     595             :                 return NULL;
     596             : 
     597           0 :         if (WARN_ON_ONCE(irq_domain_is_msi_parent(domain)))
     598             :                 return NULL;
     599             : 
     600             :         return domain;
     601             : }
     602             : 
     603             : static unsigned int msi_domain_get_hwsize(struct device *dev, unsigned int domid)
     604             : {
     605             :         struct msi_domain_info *info;
     606             :         struct irq_domain *domain;
     607             : 
     608           0 :         domain = msi_get_device_domain(dev, domid);
     609           0 :         if (domain) {
     610           0 :                 info = domain->host_data;
     611           0 :                 return info->hwsize;
     612             :         }
     613             :         /* No domain, default to MSI_XA_DOMAIN_SIZE */
     614             :         return MSI_XA_DOMAIN_SIZE;
     615             : }
     616             : 
     617             : static inline void irq_chip_write_msi_msg(struct irq_data *data,
     618             :                                           struct msi_msg *msg)
     619             : {
     620           0 :         data->chip->irq_write_msi_msg(data, msg);
     621             : }
     622             : 
     623           0 : static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
     624             : {
     625           0 :         struct msi_domain_info *info = domain->host_data;
     626             : 
     627             :         /*
     628             :          * If the MSI provider has messed with the second message and
     629             :          * not advertized that it is level-capable, signal the breakage.
     630             :          */
     631           0 :         WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
     632             :                   (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
     633             :                 (msg[1].address_lo || msg[1].address_hi || msg[1].data));
     634           0 : }
     635             : 
     636             : /**
     637             :  * msi_domain_set_affinity - Generic affinity setter function for MSI domains
     638             :  * @irq_data:   The irq data associated to the interrupt
     639             :  * @mask:       The affinity mask to set
     640             :  * @force:      Flag to enforce setting (disable online checks)
     641             :  *
     642             :  * Intended to be used by MSI interrupt controllers which are
     643             :  * implemented with hierarchical domains.
     644             :  *
     645             :  * Return: IRQ_SET_MASK_* result code
     646             :  */
     647           0 : int msi_domain_set_affinity(struct irq_data *irq_data,
     648             :                             const struct cpumask *mask, bool force)
     649             : {
     650           0 :         struct irq_data *parent = irq_data->parent_data;
     651           0 :         struct msi_msg msg[2] = { [1] = { }, };
     652             :         int ret;
     653             : 
     654           0 :         ret = parent->chip->irq_set_affinity(parent, mask, force);
     655           0 :         if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
     656           0 :                 BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
     657           0 :                 msi_check_level(irq_data->domain, msg);
     658             :                 irq_chip_write_msi_msg(irq_data, msg);
     659             :         }
     660             : 
     661           0 :         return ret;
     662             : }
     663             : 
     664           0 : static int msi_domain_activate(struct irq_domain *domain,
     665             :                                struct irq_data *irq_data, bool early)
     666             : {
     667           0 :         struct msi_msg msg[2] = { [1] = { }, };
     668             : 
     669           0 :         BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
     670           0 :         msi_check_level(irq_data->domain, msg);
     671           0 :         irq_chip_write_msi_msg(irq_data, msg);
     672           0 :         return 0;
     673             : }
     674             : 
     675           0 : static void msi_domain_deactivate(struct irq_domain *domain,
     676             :                                   struct irq_data *irq_data)
     677             : {
     678             :         struct msi_msg msg[2];
     679             : 
     680           0 :         memset(msg, 0, sizeof(msg));
     681           0 :         irq_chip_write_msi_msg(irq_data, msg);
     682           0 : }
     683             : 
     684           0 : static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
     685             :                             unsigned int nr_irqs, void *arg)
     686             : {
     687           0 :         struct msi_domain_info *info = domain->host_data;
     688           0 :         struct msi_domain_ops *ops = info->ops;
     689           0 :         irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
     690             :         int i, ret;
     691             : 
     692           0 :         if (irq_find_mapping(domain, hwirq) > 0)
     693             :                 return -EEXIST;
     694             : 
     695           0 :         if (domain->parent) {
     696           0 :                 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
     697           0 :                 if (ret < 0)
     698             :                         return ret;
     699             :         }
     700             : 
     701           0 :         for (i = 0; i < nr_irqs; i++) {
     702           0 :                 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
     703           0 :                 if (ret < 0) {
     704           0 :                         if (ops->msi_free) {
     705           0 :                                 for (i--; i > 0; i--)
     706           0 :                                         ops->msi_free(domain, info, virq + i);
     707             :                         }
     708           0 :                         irq_domain_free_irqs_top(domain, virq, nr_irqs);
     709           0 :                         return ret;
     710             :                 }
     711             :         }
     712             : 
     713             :         return 0;
     714             : }
     715             : 
     716           0 : static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
     717             :                             unsigned int nr_irqs)
     718             : {
     719           0 :         struct msi_domain_info *info = domain->host_data;
     720             :         int i;
     721             : 
     722           0 :         if (info->ops->msi_free) {
     723           0 :                 for (i = 0; i < nr_irqs; i++)
     724           0 :                         info->ops->msi_free(domain, info, virq + i);
     725             :         }
     726           0 :         irq_domain_free_irqs_top(domain, virq, nr_irqs);
     727           0 : }
     728             : 
     729             : static const struct irq_domain_ops msi_domain_ops = {
     730             :         .alloc          = msi_domain_alloc,
     731             :         .free           = msi_domain_free,
     732             :         .activate       = msi_domain_activate,
     733             :         .deactivate     = msi_domain_deactivate,
     734             : };
     735             : 
     736           0 : static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
     737             :                                                 msi_alloc_info_t *arg)
     738             : {
     739           0 :         return arg->hwirq;
     740             : }
     741             : 
     742           0 : static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
     743             :                                   int nvec, msi_alloc_info_t *arg)
     744             : {
     745           0 :         memset(arg, 0, sizeof(*arg));
     746           0 :         return 0;
     747             : }
     748             : 
     749           0 : static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
     750             :                                     struct msi_desc *desc)
     751             : {
     752           0 :         arg->desc = desc;
     753           0 : }
     754             : 
     755           0 : static int msi_domain_ops_init(struct irq_domain *domain,
     756             :                                struct msi_domain_info *info,
     757             :                                unsigned int virq, irq_hw_number_t hwirq,
     758             :                                msi_alloc_info_t *arg)
     759             : {
     760           0 :         irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
     761             :                                       info->chip_data);
     762           0 :         if (info->handler && info->handler_name) {
     763           0 :                 __irq_set_handler(virq, info->handler, 0, info->handler_name);
     764           0 :                 if (info->handler_data)
     765           0 :                         irq_set_handler_data(virq, info->handler_data);
     766             :         }
     767           0 :         return 0;
     768             : }
     769             : 
     770             : static struct msi_domain_ops msi_domain_ops_default = {
     771             :         .get_hwirq              = msi_domain_ops_get_hwirq,
     772             :         .msi_init               = msi_domain_ops_init,
     773             :         .msi_prepare            = msi_domain_ops_prepare,
     774             :         .set_desc               = msi_domain_ops_set_desc,
     775             : };
     776             : 
     777           0 : static void msi_domain_update_dom_ops(struct msi_domain_info *info)
     778             : {
     779           0 :         struct msi_domain_ops *ops = info->ops;
     780             : 
     781           0 :         if (ops == NULL) {
     782           0 :                 info->ops = &msi_domain_ops_default;
     783             :                 return;
     784             :         }
     785             : 
     786           0 :         if (!(info->flags & MSI_FLAG_USE_DEF_DOM_OPS))
     787             :                 return;
     788             : 
     789           0 :         if (ops->get_hwirq == NULL)
     790           0 :                 ops->get_hwirq = msi_domain_ops_default.get_hwirq;
     791           0 :         if (ops->msi_init == NULL)
     792           0 :                 ops->msi_init = msi_domain_ops_default.msi_init;
     793           0 :         if (ops->msi_prepare == NULL)
     794           0 :                 ops->msi_prepare = msi_domain_ops_default.msi_prepare;
     795           0 :         if (ops->set_desc == NULL)
     796           0 :                 ops->set_desc = msi_domain_ops_default.set_desc;
     797             : }
     798             : 
     799           0 : static void msi_domain_update_chip_ops(struct msi_domain_info *info)
     800             : {
     801           0 :         struct irq_chip *chip = info->chip;
     802             : 
     803           0 :         BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
     804           0 :         if (!chip->irq_set_affinity)
     805           0 :                 chip->irq_set_affinity = msi_domain_set_affinity;
     806           0 : }
     807             : 
     808           0 : static struct irq_domain *__msi_create_irq_domain(struct fwnode_handle *fwnode,
     809             :                                                   struct msi_domain_info *info,
     810             :                                                   unsigned int flags,
     811             :                                                   struct irq_domain *parent)
     812             : {
     813             :         struct irq_domain *domain;
     814             : 
     815           0 :         if (info->hwsize > MSI_XA_DOMAIN_SIZE)
     816             :                 return NULL;
     817             : 
     818             :         /*
     819             :          * Hardware size 0 is valid for backwards compatibility and for
     820             :          * domains which are not backed by a hardware table. Grant the
     821             :          * maximum index space.
     822             :          */
     823           0 :         if (!info->hwsize)
     824           0 :                 info->hwsize = MSI_XA_DOMAIN_SIZE;
     825             : 
     826           0 :         msi_domain_update_dom_ops(info);
     827           0 :         if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
     828           0 :                 msi_domain_update_chip_ops(info);
     829             : 
     830           0 :         domain = irq_domain_create_hierarchy(parent, flags | IRQ_DOMAIN_FLAG_MSI, 0,
     831             :                                              fwnode, &msi_domain_ops, info);
     832             : 
     833           0 :         if (domain)
     834           0 :                 irq_domain_update_bus_token(domain, info->bus_token);
     835             : 
     836             :         return domain;
     837             : }
     838             : 
     839             : /**
     840             :  * msi_create_irq_domain - Create an MSI interrupt domain
     841             :  * @fwnode:     Optional fwnode of the interrupt controller
     842             :  * @info:       MSI domain info
     843             :  * @parent:     Parent irq domain
     844             :  *
     845             :  * Return: pointer to the created &struct irq_domain or %NULL on failure
     846             :  */
     847           0 : struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
     848             :                                          struct msi_domain_info *info,
     849             :                                          struct irq_domain *parent)
     850             : {
     851           0 :         return __msi_create_irq_domain(fwnode, info, 0, parent);
     852             : }
     853             : 
     854             : /**
     855             :  * msi_parent_init_dev_msi_info - Delegate initialization of device MSI info down
     856             :  *                                in the domain hierarchy
     857             :  * @dev:                The device for which the domain should be created
     858             :  * @domain:             The domain in the hierarchy this op is being called on
     859             :  * @msi_parent_domain:  The IRQ_DOMAIN_FLAG_MSI_PARENT domain for the child to
     860             :  *                      be created
     861             :  * @msi_child_info:     The MSI domain info of the IRQ_DOMAIN_FLAG_MSI_DEVICE
     862             :  *                      domain to be created
     863             :  *
     864             :  * Return: true on success, false otherwise
     865             :  *
     866             :  * This is the most complex problem of per device MSI domains and the
     867             :  * underlying interrupt domain hierarchy:
     868             :  *
     869             :  * The device domain to be initialized requests the broadest feature set
     870             :  * possible and the underlying domain hierarchy puts restrictions on it.
     871             :  *
     872             :  * That's trivial for a simple parent->child relationship, but it gets
     873             :  * interesting with an intermediate domain: root->parent->child.  The
     874             :  * intermediate 'parent' can expand the capabilities which the 'root'
     875             :  * domain is providing. So that creates a classic hen and egg problem:
     876             :  * Which entity is doing the restrictions/expansions?
     877             :  *
     878             :  * One solution is to let the root domain handle the initialization that's
     879             :  * why there is the @domain and the @msi_parent_domain pointer.
     880             :  */
     881           0 : bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
     882             :                                   struct irq_domain *msi_parent_domain,
     883             :                                   struct msi_domain_info *msi_child_info)
     884             : {
     885           0 :         struct irq_domain *parent = domain->parent;
     886             : 
     887           0 :         if (WARN_ON_ONCE(!parent || !parent->msi_parent_ops ||
     888             :                          !parent->msi_parent_ops->init_dev_msi_info))
     889             :                 return false;
     890             : 
     891           0 :         return parent->msi_parent_ops->init_dev_msi_info(dev, parent, msi_parent_domain,
     892             :                                                          msi_child_info);
     893             : }
     894             : 
     895             : /**
     896             :  * msi_create_device_irq_domain - Create a device MSI interrupt domain
     897             :  * @dev:                Pointer to the device
     898             :  * @domid:              Domain id
     899             :  * @template:           MSI domain info bundle used as template
     900             :  * @hwsize:             Maximum number of MSI table entries (0 if unknown or unlimited)
     901             :  * @domain_data:        Optional pointer to domain specific data which is set in
     902             :  *                      msi_domain_info::data
     903             :  * @chip_data:          Optional pointer to chip specific data which is set in
     904             :  *                      msi_domain_info::chip_data
     905             :  *
     906             :  * Return: True on success, false otherwise
     907             :  *
     908             :  * There is no firmware node required for this interface because the per
     909             :  * device domains are software constructs which are actually closer to the
     910             :  * hardware reality than any firmware can describe them.
     911             :  *
     912             :  * The domain name and the irq chip name for a MSI device domain are
     913             :  * composed by: "$(PREFIX)$(CHIPNAME)-$(DEVNAME)"
     914             :  *
     915             :  * $PREFIX:   Optional prefix provided by the underlying MSI parent domain
     916             :  *            via msi_parent_ops::prefix. If that pointer is NULL the prefix
     917             :  *            is empty.
     918             :  * $CHIPNAME: The name of the irq_chip in @template
     919             :  * $DEVNAME:  The name of the device
     920             :  *
     921             :  * This results in understandable chip names and hardware interrupt numbers
     922             :  * in e.g. /proc/interrupts
     923             :  *
     924             :  * PCI-MSI-0000:00:1c.0     0-edge  Parent domain has no prefix
     925             :  * IR-PCI-MSI-0000:00:1c.4  0-edge  Same with interrupt remapping prefix 'IR-'
     926             :  *
     927             :  * IR-PCI-MSIX-0000:3d:00.0 0-edge  Hardware interrupt numbers reflect
     928             :  * IR-PCI-MSIX-0000:3d:00.0 1-edge  the real MSI-X index on that device
     929             :  * IR-PCI-MSIX-0000:3d:00.0 2-edge
     930             :  *
     931             :  * On IMS domains the hardware interrupt number is either a table entry
     932             :  * index or a purely software managed index but it is guaranteed to be
     933             :  * unique.
     934             :  *
     935             :  * The domain pointer is stored in @dev::msi::data::__irqdomains[]. All
     936             :  * subsequent operations on the domain depend on the domain id.
     937             :  *
     938             :  * The domain is automatically freed when the device is removed via devres
     939             :  * in the context of @dev::msi::data freeing, but it can also be
     940             :  * independently removed via @msi_remove_device_irq_domain().
     941             :  */
     942           0 : bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
     943             :                                   const struct msi_domain_template *template,
     944             :                                   unsigned int hwsize, void *domain_data,
     945             :                                   void *chip_data)
     946             : {
     947           0 :         struct irq_domain *domain, *parent = dev->msi.domain;
     948             :         const struct msi_parent_ops *pops;
     949             :         struct msi_domain_template *bundle;
     950             :         struct fwnode_handle *fwnode;
     951             : 
     952           0 :         if (!irq_domain_is_msi_parent(parent))
     953             :                 return false;
     954             : 
     955           0 :         if (domid >= MSI_MAX_DEVICE_IRQDOMAINS)
     956             :                 return false;
     957             : 
     958           0 :         bundle = kmemdup(template, sizeof(*bundle), GFP_KERNEL);
     959           0 :         if (!bundle)
     960             :                 return false;
     961             : 
     962           0 :         bundle->info.hwsize = hwsize;
     963           0 :         bundle->info.chip = &bundle->chip;
     964           0 :         bundle->info.ops = &bundle->ops;
     965           0 :         bundle->info.data = domain_data;
     966           0 :         bundle->info.chip_data = chip_data;
     967             : 
     968           0 :         pops = parent->msi_parent_ops;
     969           0 :         snprintf(bundle->name, sizeof(bundle->name), "%s%s-%s",
     970           0 :                  pops->prefix ? : "", bundle->chip.name, dev_name(dev));
     971           0 :         bundle->chip.name = bundle->name;
     972             : 
     973           0 :         fwnode = irq_domain_alloc_named_fwnode(bundle->name);
     974           0 :         if (!fwnode)
     975             :                 goto free_bundle;
     976             : 
     977           0 :         if (msi_setup_device_data(dev))
     978             :                 goto free_fwnode;
     979             : 
     980           0 :         msi_lock_descs(dev);
     981             : 
     982           0 :         if (WARN_ON_ONCE(msi_get_device_domain(dev, domid)))
     983             :                 goto fail;
     984             : 
     985           0 :         if (!pops->init_dev_msi_info(dev, parent, parent, &bundle->info))
     986             :                 goto fail;
     987             : 
     988           0 :         domain = __msi_create_irq_domain(fwnode, &bundle->info, IRQ_DOMAIN_FLAG_MSI_DEVICE, parent);
     989           0 :         if (!domain)
     990             :                 goto fail;
     991             : 
     992           0 :         domain->dev = dev;
     993           0 :         dev->msi.data->__domains[domid].domain = domain;
     994           0 :         msi_unlock_descs(dev);
     995           0 :         return true;
     996             : 
     997             : fail:
     998             :         msi_unlock_descs(dev);
     999             : free_fwnode:
    1000           0 :         irq_domain_free_fwnode(fwnode);
    1001             : free_bundle:
    1002           0 :         kfree(bundle);
    1003           0 :         return false;
    1004             : }
    1005             : 
    1006             : /**
    1007             :  * msi_remove_device_irq_domain - Free a device MSI interrupt domain
    1008             :  * @dev:        Pointer to the device
    1009             :  * @domid:      Domain id
    1010             :  */
    1011           0 : void msi_remove_device_irq_domain(struct device *dev, unsigned int domid)
    1012             : {
    1013           0 :         struct fwnode_handle *fwnode = NULL;
    1014             :         struct msi_domain_info *info;
    1015             :         struct irq_domain *domain;
    1016             : 
    1017           0 :         msi_lock_descs(dev);
    1018             : 
    1019           0 :         domain = msi_get_device_domain(dev, domid);
    1020             : 
    1021           0 :         if (!domain || !irq_domain_is_msi_device(domain))
    1022             :                 goto unlock;
    1023             : 
    1024           0 :         dev->msi.data->__domains[domid].domain = NULL;
    1025           0 :         info = domain->host_data;
    1026           0 :         if (irq_domain_is_msi_device(domain))
    1027           0 :                 fwnode = domain->fwnode;
    1028           0 :         irq_domain_remove(domain);
    1029           0 :         irq_domain_free_fwnode(fwnode);
    1030           0 :         kfree(container_of(info, struct msi_domain_template, info));
    1031             : 
    1032             : unlock:
    1033           0 :         msi_unlock_descs(dev);
    1034           0 : }
    1035             : 
    1036             : /**
    1037             :  * msi_match_device_irq_domain - Match a device irq domain against a bus token
    1038             :  * @dev:        Pointer to the device
    1039             :  * @domid:      Domain id
    1040             :  * @bus_token:  Bus token to match against the domain bus token
    1041             :  *
    1042             :  * Return: True if device domain exists and bus tokens match.
    1043             :  */
    1044           0 : bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
    1045             :                                  enum irq_domain_bus_token bus_token)
    1046             : {
    1047             :         struct msi_domain_info *info;
    1048             :         struct irq_domain *domain;
    1049           0 :         bool ret = false;
    1050             : 
    1051           0 :         msi_lock_descs(dev);
    1052           0 :         domain = msi_get_device_domain(dev, domid);
    1053           0 :         if (domain && irq_domain_is_msi_device(domain)) {
    1054           0 :                 info = domain->host_data;
    1055           0 :                 ret = info->bus_token == bus_token;
    1056             :         }
    1057           0 :         msi_unlock_descs(dev);
    1058           0 :         return ret;
    1059             : }
    1060             : 
    1061           0 : int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
    1062             :                             int nvec, msi_alloc_info_t *arg)
    1063             : {
    1064           0 :         struct msi_domain_info *info = domain->host_data;
    1065           0 :         struct msi_domain_ops *ops = info->ops;
    1066             : 
    1067           0 :         return ops->msi_prepare(domain, dev, nvec, arg);
    1068             : }
    1069             : 
    1070           0 : int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
    1071             :                              int virq_base, int nvec, msi_alloc_info_t *arg)
    1072             : {
    1073           0 :         struct msi_domain_info *info = domain->host_data;
    1074           0 :         struct msi_domain_ops *ops = info->ops;
    1075           0 :         struct msi_ctrl ctrl = {
    1076             :                 .domid  = MSI_DEFAULT_DOMAIN,
    1077             :                 .first  = virq_base,
    1078           0 :                 .last   = virq_base + nvec - 1,
    1079             :         };
    1080             :         struct msi_desc *desc;
    1081             :         struct xarray *xa;
    1082             :         int ret, virq;
    1083             : 
    1084           0 :         msi_lock_descs(dev);
    1085             : 
    1086           0 :         if (!msi_ctrl_valid(dev, &ctrl)) {
    1087             :                 ret = -EINVAL;
    1088             :                 goto unlock;
    1089             :         }
    1090             : 
    1091           0 :         ret = msi_domain_add_simple_msi_descs(dev, &ctrl);
    1092           0 :         if (ret)
    1093             :                 goto unlock;
    1094             : 
    1095           0 :         xa = &dev->msi.data->__domains[ctrl.domid].store;
    1096             : 
    1097           0 :         for (virq = virq_base; virq < virq_base + nvec; virq++) {
    1098           0 :                 desc = xa_load(xa, virq);
    1099           0 :                 desc->irq = virq;
    1100             : 
    1101           0 :                 ops->set_desc(arg, desc);
    1102           0 :                 ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
    1103           0 :                 if (ret)
    1104             :                         goto fail;
    1105             : 
    1106           0 :                 irq_set_msi_desc(virq, desc);
    1107             :         }
    1108           0 :         msi_unlock_descs(dev);
    1109           0 :         return 0;
    1110             : 
    1111             : fail:
    1112           0 :         for (--virq; virq >= virq_base; virq--) {
    1113           0 :                 msi_domain_depopulate_descs(dev, virq, 1);
    1114           0 :                 irq_domain_free_irqs_common(domain, virq, 1);
    1115             :         }
    1116           0 :         msi_domain_free_descs(dev, &ctrl);
    1117             : unlock:
    1118           0 :         msi_unlock_descs(dev);
    1119           0 :         return ret;
    1120             : }
    1121             : 
    1122           0 : void msi_domain_depopulate_descs(struct device *dev, int virq_base, int nvec)
    1123             : {
    1124           0 :         struct msi_ctrl ctrl = {
    1125             :                 .domid  = MSI_DEFAULT_DOMAIN,
    1126             :                 .first  = virq_base,
    1127           0 :                 .last   = virq_base + nvec - 1,
    1128             :         };
    1129             :         struct msi_desc *desc;
    1130             :         struct xarray *xa;
    1131             :         unsigned long idx;
    1132             : 
    1133           0 :         if (!msi_ctrl_valid(dev, &ctrl))
    1134           0 :                 return;
    1135             : 
    1136           0 :         xa = &dev->msi.data->__domains[ctrl.domid].store;
    1137           0 :         xa_for_each_range(xa, idx, desc, ctrl.first, ctrl.last)
    1138           0 :                 desc->irq = 0;
    1139             : }
    1140             : 
    1141             : /*
    1142             :  * Carefully check whether the device can use reservation mode. If
    1143             :  * reservation mode is enabled then the early activation will assign a
    1144             :  * dummy vector to the device. If the PCI/MSI device does not support
    1145             :  * masking of the entry then this can result in spurious interrupts when
    1146             :  * the device driver is not absolutely careful. But even then a malfunction
    1147             :  * of the hardware could result in a spurious interrupt on the dummy vector
    1148             :  * and render the device unusable. If the entry can be masked then the core
    1149             :  * logic will prevent the spurious interrupt and reservation mode can be
    1150             :  * used. For now reservation mode is restricted to PCI/MSI.
    1151             :  */
    1152           0 : static bool msi_check_reservation_mode(struct irq_domain *domain,
    1153             :                                        struct msi_domain_info *info,
    1154             :                                        struct device *dev)
    1155             : {
    1156             :         struct msi_desc *desc;
    1157             : 
    1158           0 :         switch(domain->bus_token) {
    1159             :         case DOMAIN_BUS_PCI_MSI:
    1160             :         case DOMAIN_BUS_PCI_DEVICE_MSI:
    1161             :         case DOMAIN_BUS_PCI_DEVICE_MSIX:
    1162             :         case DOMAIN_BUS_VMD_MSI:
    1163             :                 break;
    1164             :         default:
    1165             :                 return false;
    1166             :         }
    1167             : 
    1168           0 :         if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
    1169             :                 return false;
    1170             : 
    1171           0 :         if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
    1172             :                 return false;
    1173             : 
    1174             :         /*
    1175             :          * Checking the first MSI descriptor is sufficient. MSIX supports
    1176             :          * masking and MSI does so when the can_mask attribute is set.
    1177             :          */
    1178           0 :         desc = msi_first_desc(dev, MSI_DESC_ALL);
    1179           0 :         return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
    1180             : }
    1181             : 
    1182             : static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
    1183             :                                int allocated)
    1184             : {
    1185           0 :         switch(domain->bus_token) {
    1186             :         case DOMAIN_BUS_PCI_MSI:
    1187             :         case DOMAIN_BUS_PCI_DEVICE_MSI:
    1188             :         case DOMAIN_BUS_PCI_DEVICE_MSIX:
    1189             :         case DOMAIN_BUS_VMD_MSI:
    1190             :                 if (IS_ENABLED(CONFIG_PCI_MSI))
    1191             :                         break;
    1192             :                 fallthrough;
    1193             :         default:
    1194             :                 return -ENOSPC;
    1195             :         }
    1196             : 
    1197             :         /* Let a failed PCI multi MSI allocation retry */
    1198           0 :         if (desc->nvec_used > 1)
    1199             :                 return 1;
    1200             : 
    1201             :         /* If there was a successful allocation let the caller know */
    1202           0 :         return allocated ? allocated : -ENOSPC;
    1203             : }
    1204             : 
    1205             : #define VIRQ_CAN_RESERVE        0x01
    1206             : #define VIRQ_ACTIVATE           0x02
    1207             : #define VIRQ_NOMASK_QUIRK       0x04
    1208             : 
    1209           0 : static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
    1210             : {
    1211           0 :         struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
    1212             :         int ret;
    1213             : 
    1214           0 :         if (!(vflags & VIRQ_CAN_RESERVE)) {
    1215           0 :                 irqd_clr_can_reserve(irqd);
    1216           0 :                 if (vflags & VIRQ_NOMASK_QUIRK)
    1217           0 :                         irqd_set_msi_nomask_quirk(irqd);
    1218             : 
    1219             :                 /*
    1220             :                  * If the interrupt is managed but no CPU is available to
    1221             :                  * service it, shut it down until better times. Note that
    1222             :                  * we only do this on the !RESERVE path as x86 (the only
    1223             :                  * architecture using this flag) deals with this in a
    1224             :                  * different way by using a catch-all vector.
    1225             :                  */
    1226           0 :                 if ((vflags & VIRQ_ACTIVATE) &&
    1227           0 :                     irqd_affinity_is_managed(irqd) &&
    1228           0 :                     !cpumask_intersects(irq_data_get_affinity_mask(irqd),
    1229             :                                         cpu_online_mask)) {
    1230           0 :                             irqd_set_managed_shutdown(irqd);
    1231           0 :                             return 0;
    1232             :                     }
    1233             :         }
    1234             : 
    1235           0 :         if (!(vflags & VIRQ_ACTIVATE))
    1236             :                 return 0;
    1237             : 
    1238           0 :         ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
    1239           0 :         if (ret)
    1240             :                 return ret;
    1241             :         /*
    1242             :          * If the interrupt uses reservation mode, clear the activated bit
    1243             :          * so request_irq() will assign the final vector.
    1244             :          */
    1245           0 :         if (vflags & VIRQ_CAN_RESERVE)
    1246           0 :                 irqd_clr_activated(irqd);
    1247             :         return 0;
    1248             : }
    1249             : 
    1250           0 : static int __msi_domain_alloc_irqs(struct device *dev, struct irq_domain *domain,
    1251             :                                    struct msi_ctrl *ctrl)
    1252             : {
    1253           0 :         struct xarray *xa = &dev->msi.data->__domains[ctrl->domid].store;
    1254           0 :         struct msi_domain_info *info = domain->host_data;
    1255           0 :         struct msi_domain_ops *ops = info->ops;
    1256           0 :         unsigned int vflags = 0, allocated = 0;
    1257           0 :         msi_alloc_info_t arg = { };
    1258             :         struct msi_desc *desc;
    1259             :         unsigned long idx;
    1260             :         int i, ret, virq;
    1261             : 
    1262           0 :         ret = msi_domain_prepare_irqs(domain, dev, ctrl->nirqs, &arg);
    1263           0 :         if (ret)
    1264             :                 return ret;
    1265             : 
    1266             :         /*
    1267             :          * This flag is set by the PCI layer as we need to activate
    1268             :          * the MSI entries before the PCI layer enables MSI in the
    1269             :          * card. Otherwise the card latches a random msi message.
    1270             :          */
    1271           0 :         if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
    1272           0 :                 vflags |= VIRQ_ACTIVATE;
    1273             : 
    1274             :         /*
    1275             :          * Interrupt can use a reserved vector and will not occupy
    1276             :          * a real device vector until the interrupt is requested.
    1277             :          */
    1278           0 :         if (msi_check_reservation_mode(domain, info, dev)) {
    1279           0 :                 vflags |= VIRQ_CAN_RESERVE;
    1280             :                 /*
    1281             :                  * MSI affinity setting requires a special quirk (X86) when
    1282             :                  * reservation mode is active.
    1283             :                  */
    1284           0 :                 if (info->flags & MSI_FLAG_NOMASK_QUIRK)
    1285           0 :                         vflags |= VIRQ_NOMASK_QUIRK;
    1286             :         }
    1287             : 
    1288           0 :         xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
    1289           0 :                 if (!msi_desc_match(desc, MSI_DESC_NOTASSOCIATED))
    1290           0 :                         continue;
    1291             : 
    1292             :                 /* This should return -ECONFUSED... */
    1293           0 :                 if (WARN_ON_ONCE(allocated >= ctrl->nirqs))
    1294             :                         return -EINVAL;
    1295             : 
    1296           0 :                 if (ops->prepare_desc)
    1297           0 :                         ops->prepare_desc(domain, &arg, desc);
    1298             : 
    1299           0 :                 ops->set_desc(&arg, desc);
    1300             : 
    1301           0 :                 virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
    1302             :                                                dev_to_node(dev), &arg, false,
    1303           0 :                                                desc->affinity);
    1304           0 :                 if (virq < 0)
    1305           0 :                         return msi_handle_pci_fail(domain, desc, allocated);
    1306             : 
    1307           0 :                 for (i = 0; i < desc->nvec_used; i++) {
    1308           0 :                         irq_set_msi_desc_off(virq, i, desc);
    1309           0 :                         irq_debugfs_copy_devname(virq + i, dev);
    1310           0 :                         ret = msi_init_virq(domain, virq + i, vflags);
    1311           0 :                         if (ret)
    1312             :                                 return ret;
    1313             :                 }
    1314           0 :                 if (info->flags & MSI_FLAG_DEV_SYSFS) {
    1315           0 :                         ret = msi_sysfs_populate_desc(dev, desc);
    1316           0 :                         if (ret)
    1317             :                                 return ret;
    1318             :                 }
    1319           0 :                 allocated++;
    1320             :         }
    1321             :         return 0;
    1322             : }
    1323             : 
    1324             : static int msi_domain_alloc_simple_msi_descs(struct device *dev,
    1325             :                                              struct msi_domain_info *info,
    1326             :                                              struct msi_ctrl *ctrl)
    1327             : {
    1328           0 :         if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
    1329             :                 return 0;
    1330             : 
    1331           0 :         return msi_domain_add_simple_msi_descs(dev, ctrl);
    1332             : }
    1333             : 
    1334           0 : static int __msi_domain_alloc_locked(struct device *dev, struct msi_ctrl *ctrl)
    1335             : {
    1336             :         struct msi_domain_info *info;
    1337             :         struct msi_domain_ops *ops;
    1338             :         struct irq_domain *domain;
    1339             :         int ret;
    1340             : 
    1341           0 :         if (!msi_ctrl_valid(dev, ctrl))
    1342             :                 return -EINVAL;
    1343             : 
    1344           0 :         domain = msi_get_device_domain(dev, ctrl->domid);
    1345           0 :         if (!domain)
    1346             :                 return -ENODEV;
    1347             : 
    1348           0 :         info = domain->host_data;
    1349             : 
    1350           0 :         ret = msi_domain_alloc_simple_msi_descs(dev, info, ctrl);
    1351           0 :         if (ret)
    1352             :                 return ret;
    1353             : 
    1354           0 :         ops = info->ops;
    1355           0 :         if (ops->domain_alloc_irqs)
    1356           0 :                 return ops->domain_alloc_irqs(domain, dev, ctrl->nirqs);
    1357             : 
    1358           0 :         return __msi_domain_alloc_irqs(dev, domain, ctrl);
    1359             : }
    1360             : 
    1361           0 : static int msi_domain_alloc_locked(struct device *dev, struct msi_ctrl *ctrl)
    1362             : {
    1363           0 :         int ret = __msi_domain_alloc_locked(dev, ctrl);
    1364             : 
    1365           0 :         if (ret)
    1366           0 :                 msi_domain_free_locked(dev, ctrl);
    1367           0 :         return ret;
    1368             : }
    1369             : 
    1370             : /**
    1371             :  * msi_domain_alloc_irqs_range_locked - Allocate interrupts from a MSI interrupt domain
    1372             :  * @dev:        Pointer to device struct of the device for which the interrupts
    1373             :  *              are allocated
    1374             :  * @domid:      Id of the interrupt domain to operate on
    1375             :  * @first:      First index to allocate (inclusive)
    1376             :  * @last:       Last index to allocate (inclusive)
    1377             :  *
    1378             :  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
    1379             :  * pair. Use this for MSI irqdomains which implement their own descriptor
    1380             :  * allocation/free.
    1381             :  *
    1382             :  * Return: %0 on success or an error code.
    1383             :  */
    1384           0 : int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
    1385             :                                        unsigned int first, unsigned int last)
    1386             : {
    1387           0 :         struct msi_ctrl ctrl = {
    1388             :                 .domid  = domid,
    1389             :                 .first  = first,
    1390             :                 .last   = last,
    1391           0 :                 .nirqs  = last + 1 - first,
    1392             :         };
    1393             : 
    1394           0 :         return msi_domain_alloc_locked(dev, &ctrl);
    1395             : }
    1396             : 
    1397             : /**
    1398             :  * msi_domain_alloc_irqs_range - Allocate interrupts from a MSI interrupt domain
    1399             :  * @dev:        Pointer to device struct of the device for which the interrupts
    1400             :  *              are allocated
    1401             :  * @domid:      Id of the interrupt domain to operate on
    1402             :  * @first:      First index to allocate (inclusive)
    1403             :  * @last:       Last index to allocate (inclusive)
    1404             :  *
    1405             :  * Return: %0 on success or an error code.
    1406             :  */
    1407           0 : int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
    1408             :                                 unsigned int first, unsigned int last)
    1409             : {
    1410             :         int ret;
    1411             : 
    1412           0 :         msi_lock_descs(dev);
    1413           0 :         ret = msi_domain_alloc_irqs_range_locked(dev, domid, first, last);
    1414           0 :         msi_unlock_descs(dev);
    1415           0 :         return ret;
    1416             : }
    1417             : 
    1418             : /**
    1419             :  * msi_domain_alloc_irqs_all_locked - Allocate all interrupts from a MSI interrupt domain
    1420             :  *
    1421             :  * @dev:        Pointer to device struct of the device for which the interrupts
    1422             :  *              are allocated
    1423             :  * @domid:      Id of the interrupt domain to operate on
    1424             :  * @nirqs:      The number of interrupts to allocate
    1425             :  *
    1426             :  * This function scans all MSI descriptors of the MSI domain and allocates interrupts
    1427             :  * for all unassigned ones. That function is to be used for MSI domain usage where
    1428             :  * the descriptor allocation is handled at the call site, e.g. PCI/MSI[X].
    1429             :  *
    1430             :  * Return: %0 on success or an error code.
    1431             :  */
    1432           0 : int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs)
    1433             : {
    1434           0 :         struct msi_ctrl ctrl = {
    1435             :                 .domid  = domid,
    1436             :                 .first  = 0,
    1437           0 :                 .last   = msi_domain_get_hwsize(dev, domid) - 1,
    1438             :                 .nirqs  = nirqs,
    1439             :         };
    1440             : 
    1441           0 :         return msi_domain_alloc_locked(dev, &ctrl);
    1442             : }
    1443             : 
    1444             : /**
    1445             :  * msi_domain_alloc_irq_at - Allocate an interrupt from a MSI interrupt domain at
    1446             :  *                           a given index - or at the next free index
    1447             :  *
    1448             :  * @dev:        Pointer to device struct of the device for which the interrupts
    1449             :  *              are allocated
    1450             :  * @domid:      Id of the interrupt domain to operate on
    1451             :  * @index:      Index for allocation. If @index == %MSI_ANY_INDEX the allocation
    1452             :  *              uses the next free index.
    1453             :  * @affdesc:    Optional pointer to an interrupt affinity descriptor structure
    1454             :  * @icookie:    Optional pointer to a domain specific per instance cookie. If
    1455             :  *              non-NULL the content of the cookie is stored in msi_desc::data.
    1456             :  *              Must be NULL for MSI-X allocations
    1457             :  *
    1458             :  * This requires a MSI interrupt domain which lets the core code manage the
    1459             :  * MSI descriptors.
    1460             :  *
    1461             :  * Return: struct msi_map
    1462             :  *
    1463             :  *      On success msi_map::index contains the allocated index number and
    1464             :  *      msi_map::virq the corresponding Linux interrupt number
    1465             :  *
    1466             :  *      On failure msi_map::index contains the error code and msi_map::virq
    1467             :  *      is %0.
    1468             :  */
    1469           0 : struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index,
    1470             :                                        const struct irq_affinity_desc *affdesc,
    1471             :                                        union msi_instance_cookie *icookie)
    1472             : {
    1473           0 :         struct msi_ctrl ctrl = { .domid = domid, .nirqs = 1, };
    1474             :         struct irq_domain *domain;
    1475           0 :         struct msi_map map = { };
    1476             :         struct msi_desc *desc;
    1477             :         int ret;
    1478             : 
    1479           0 :         msi_lock_descs(dev);
    1480           0 :         domain = msi_get_device_domain(dev, domid);
    1481           0 :         if (!domain) {
    1482             :                 map.index = -ENODEV;
    1483             :                 goto unlock;
    1484             :         }
    1485             : 
    1486           0 :         desc = msi_alloc_desc(dev, 1, affdesc);
    1487           0 :         if (!desc) {
    1488             :                 map.index = -ENOMEM;
    1489             :                 goto unlock;
    1490             :         }
    1491             : 
    1492           0 :         if (icookie)
    1493           0 :                 desc->data.icookie = *icookie;
    1494             : 
    1495           0 :         ret = msi_insert_desc(dev, desc, domid, index);
    1496           0 :         if (ret) {
    1497             :                 map.index = ret;
    1498             :                 goto unlock;
    1499             :         }
    1500             : 
    1501           0 :         ctrl.first = ctrl.last = desc->msi_index;
    1502             : 
    1503           0 :         ret = __msi_domain_alloc_irqs(dev, domain, &ctrl);
    1504           0 :         if (ret) {
    1505           0 :                 map.index = ret;
    1506           0 :                 msi_domain_free_locked(dev, &ctrl);
    1507             :         } else {
    1508           0 :                 map.index = desc->msi_index;
    1509           0 :                 map.virq = desc->irq;
    1510             :         }
    1511             : unlock:
    1512           0 :         msi_unlock_descs(dev);
    1513           0 :         return map;
    1514             : }
    1515             : 
    1516           0 : static void __msi_domain_free_irqs(struct device *dev, struct irq_domain *domain,
    1517             :                                    struct msi_ctrl *ctrl)
    1518             : {
    1519           0 :         struct xarray *xa = &dev->msi.data->__domains[ctrl->domid].store;
    1520           0 :         struct msi_domain_info *info = domain->host_data;
    1521             :         struct irq_data *irqd;
    1522             :         struct msi_desc *desc;
    1523             :         unsigned long idx;
    1524             :         int i;
    1525             : 
    1526           0 :         xa_for_each_range(xa, idx, desc, ctrl->first, ctrl->last) {
    1527             :                 /* Only handle MSI entries which have an interrupt associated */
    1528           0 :                 if (!msi_desc_match(desc, MSI_DESC_ASSOCIATED))
    1529           0 :                         continue;
    1530             : 
    1531             :                 /* Make sure all interrupts are deactivated */
    1532           0 :                 for (i = 0; i < desc->nvec_used; i++) {
    1533           0 :                         irqd = irq_domain_get_irq_data(domain, desc->irq + i);
    1534           0 :                         if (irqd && irqd_is_activated(irqd))
    1535           0 :                                 irq_domain_deactivate_irq(irqd);
    1536             :                 }
    1537             : 
    1538           0 :                 irq_domain_free_irqs(desc->irq, desc->nvec_used);
    1539           0 :                 if (info->flags & MSI_FLAG_DEV_SYSFS)
    1540           0 :                         msi_sysfs_remove_desc(dev, desc);
    1541           0 :                 desc->irq = 0;
    1542             :         }
    1543           0 : }
    1544             : 
    1545           0 : static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl)
    1546             : {
    1547             :         struct msi_domain_info *info;
    1548             :         struct msi_domain_ops *ops;
    1549             :         struct irq_domain *domain;
    1550             : 
    1551           0 :         if (!msi_ctrl_valid(dev, ctrl))
    1552             :                 return;
    1553             : 
    1554           0 :         domain = msi_get_device_domain(dev, ctrl->domid);
    1555           0 :         if (!domain)
    1556             :                 return;
    1557             : 
    1558           0 :         info = domain->host_data;
    1559           0 :         ops = info->ops;
    1560             : 
    1561           0 :         if (ops->domain_free_irqs)
    1562           0 :                 ops->domain_free_irqs(domain, dev);
    1563             :         else
    1564           0 :                 __msi_domain_free_irqs(dev, domain, ctrl);
    1565             : 
    1566           0 :         if (ops->msi_post_free)
    1567           0 :                 ops->msi_post_free(domain, dev);
    1568             : 
    1569           0 :         if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
    1570           0 :                 msi_domain_free_descs(dev, ctrl);
    1571             : }
    1572             : 
    1573             : /**
    1574             :  * msi_domain_free_irqs_range_locked - Free a range of interrupts from a MSI interrupt domain
    1575             :  *                                     associated to @dev with msi_lock held
    1576             :  * @dev:        Pointer to device struct of the device for which the interrupts
    1577             :  *              are freed
    1578             :  * @domid:      Id of the interrupt domain to operate on
    1579             :  * @first:      First index to free (inclusive)
    1580             :  * @last:       Last index to free (inclusive)
    1581             :  */
    1582           0 : void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
    1583             :                                        unsigned int first, unsigned int last)
    1584             : {
    1585           0 :         struct msi_ctrl ctrl = {
    1586             :                 .domid  = domid,
    1587             :                 .first  = first,
    1588             :                 .last   = last,
    1589             :         };
    1590           0 :         msi_domain_free_locked(dev, &ctrl);
    1591           0 : }
    1592             : 
    1593             : /**
    1594             :  * msi_domain_free_irqs_range - Free a range of interrupts from a MSI interrupt domain
    1595             :  *                              associated to @dev
    1596             :  * @dev:        Pointer to device struct of the device for which the interrupts
    1597             :  *              are freed
    1598             :  * @domid:      Id of the interrupt domain to operate on
    1599             :  * @first:      First index to free (inclusive)
    1600             :  * @last:       Last index to free (inclusive)
    1601             :  */
    1602           0 : void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
    1603             :                                 unsigned int first, unsigned int last)
    1604             : {
    1605           0 :         msi_lock_descs(dev);
    1606           0 :         msi_domain_free_irqs_range_locked(dev, domid, first, last);
    1607           0 :         msi_unlock_descs(dev);
    1608           0 : }
    1609             : 
    1610             : /**
    1611             :  * msi_domain_free_irqs_all_locked - Free all interrupts from a MSI interrupt domain
    1612             :  *                                   associated to a device
    1613             :  * @dev:        Pointer to device struct of the device for which the interrupts
    1614             :  *              are freed
    1615             :  * @domid:      The id of the domain to operate on
    1616             :  *
    1617             :  * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
    1618             :  * pair. Use this for MSI irqdomains which implement their own vector
    1619             :  * allocation.
    1620             :  */
    1621           0 : void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid)
    1622             : {
    1623           0 :         msi_domain_free_irqs_range_locked(dev, domid, 0,
    1624           0 :                                           msi_domain_get_hwsize(dev, domid) - 1);
    1625           0 : }
    1626             : 
    1627             : /**
    1628             :  * msi_domain_free_irqs_all - Free all interrupts from a MSI interrupt domain
    1629             :  *                            associated to a device
    1630             :  * @dev:        Pointer to device struct of the device for which the interrupts
    1631             :  *              are freed
    1632             :  * @domid:      The id of the domain to operate on
    1633             :  */
    1634           0 : void msi_domain_free_irqs_all(struct device *dev, unsigned int domid)
    1635             : {
    1636           0 :         msi_lock_descs(dev);
    1637           0 :         msi_domain_free_irqs_all_locked(dev, domid);
    1638           0 :         msi_unlock_descs(dev);
    1639           0 : }
    1640             : 
    1641             : /**
    1642             :  * msi_get_domain_info - Get the MSI interrupt domain info for @domain
    1643             :  * @domain:     The interrupt domain to retrieve data from
    1644             :  *
    1645             :  * Return: the pointer to the msi_domain_info stored in @domain->host_data.
    1646             :  */
    1647           0 : struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
    1648             : {
    1649           0 :         return (struct msi_domain_info *)domain->host_data;
    1650             : }
    1651             : 
    1652             : /**
    1653             :  * msi_device_has_isolated_msi - True if the device has isolated MSI
    1654             :  * @dev: The device to check
    1655             :  *
    1656             :  * Isolated MSI means that HW modeled by an irq_domain on the path from the
    1657             :  * initiating device to the CPU will validate that the MSI message specifies an
    1658             :  * interrupt number that the device is authorized to trigger. This must block
    1659             :  * devices from triggering interrupts they are not authorized to trigger.
    1660             :  * Currently authorization means the MSI vector is one assigned to the device.
    1661             :  *
    1662             :  * This is interesting for securing VFIO use cases where a rouge MSI (eg created
    1663             :  * by abusing a normal PCI MemWr DMA) must not allow the VFIO userspace to
    1664             :  * impact outside its security domain, eg userspace triggering interrupts on
    1665             :  * kernel drivers, a VM triggering interrupts on the hypervisor, or a VM
    1666             :  * triggering interrupts on another VM.
    1667             :  */
    1668           0 : bool msi_device_has_isolated_msi(struct device *dev)
    1669             : {
    1670           0 :         struct irq_domain *domain = dev_get_msi_domain(dev);
    1671             : 
    1672           0 :         for (; domain; domain = domain->parent)
    1673           0 :                 if (domain->flags & IRQ_DOMAIN_FLAG_ISOLATED_MSI)
    1674             :                         return true;
    1675             :         return arch_is_isolated_msi();
    1676             : }
    1677             : EXPORT_SYMBOL_GPL(msi_device_has_isolated_msi);

Generated by: LCOV version 1.14