LCOV - code coverage report
Current view: top level - kernel/irq - manage.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 79 706 11.2 %
Date: 2023-07-19 18:55:55 Functions: 3 57 5.3 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
       4             :  * Copyright (C) 2005-2006 Thomas Gleixner
       5             :  *
       6             :  * This file contains driver APIs to the irq subsystem.
       7             :  */
       8             : 
       9             : #define pr_fmt(fmt) "genirq: " fmt
      10             : 
      11             : #include <linux/irq.h>
      12             : #include <linux/kthread.h>
      13             : #include <linux/module.h>
      14             : #include <linux/random.h>
      15             : #include <linux/interrupt.h>
      16             : #include <linux/irqdomain.h>
      17             : #include <linux/slab.h>
      18             : #include <linux/sched.h>
      19             : #include <linux/sched/rt.h>
      20             : #include <linux/sched/task.h>
      21             : #include <linux/sched/isolation.h>
      22             : #include <uapi/linux/sched/types.h>
      23             : #include <linux/task_work.h>
      24             : 
      25             : #include "internals.h"
      26             : 
      27             : #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
      28             : DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
      29             : 
      30             : static int __init setup_forced_irqthreads(char *arg)
      31             : {
      32             :         static_branch_enable(&force_irqthreads_key);
      33             :         return 0;
      34             : }
      35             : early_param("threadirqs", setup_forced_irqthreads);
      36             : #endif
      37             : 
      38           0 : static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
      39             : {
      40           0 :         struct irq_data *irqd = irq_desc_get_irq_data(desc);
      41             :         bool inprogress;
      42             : 
      43             :         do {
      44             :                 unsigned long flags;
      45             : 
      46             :                 /*
      47             :                  * Wait until we're out of the critical section.  This might
      48             :                  * give the wrong answer due to the lack of memory barriers.
      49             :                  */
      50           0 :                 while (irqd_irq_inprogress(&desc->irq_data))
      51             :                         cpu_relax();
      52             : 
      53             :                 /* Ok, that indicated we're done: double-check carefully. */
      54           0 :                 raw_spin_lock_irqsave(&desc->lock, flags);
      55           0 :                 inprogress = irqd_irq_inprogress(&desc->irq_data);
      56             : 
      57             :                 /*
      58             :                  * If requested and supported, check at the chip whether it
      59             :                  * is in flight at the hardware level, i.e. already pending
      60             :                  * in a CPU and waiting for service and acknowledge.
      61             :                  */
      62           0 :                 if (!inprogress && sync_chip) {
      63             :                         /*
      64             :                          * Ignore the return code. inprogress is only updated
      65             :                          * when the chip supports it.
      66             :                          */
      67           0 :                         __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
      68             :                                                 &inprogress);
      69             :                 }
      70           0 :                 raw_spin_unlock_irqrestore(&desc->lock, flags);
      71             : 
      72             :                 /* Oops, that failed? */
      73           0 :         } while (inprogress);
      74           0 : }
      75             : 
      76             : /**
      77             :  *      synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
      78             :  *      @irq: interrupt number to wait for
      79             :  *
      80             :  *      This function waits for any pending hard IRQ handlers for this
      81             :  *      interrupt to complete before returning. If you use this
      82             :  *      function while holding a resource the IRQ handler may need you
      83             :  *      will deadlock. It does not take associated threaded handlers
      84             :  *      into account.
      85             :  *
      86             :  *      Do not use this for shutdown scenarios where you must be sure
      87             :  *      that all parts (hardirq and threaded handler) have completed.
      88             :  *
      89             :  *      Returns: false if a threaded handler is active.
      90             :  *
      91             :  *      This function may be called - with care - from IRQ context.
      92             :  *
      93             :  *      It does not check whether there is an interrupt in flight at the
      94             :  *      hardware level, but not serviced yet, as this might deadlock when
      95             :  *      called with interrupts disabled and the target CPU of the interrupt
      96             :  *      is the current CPU.
      97             :  */
      98           0 : bool synchronize_hardirq(unsigned int irq)
      99             : {
     100           0 :         struct irq_desc *desc = irq_to_desc(irq);
     101             : 
     102           0 :         if (desc) {
     103           0 :                 __synchronize_hardirq(desc, false);
     104           0 :                 return !atomic_read(&desc->threads_active);
     105             :         }
     106             : 
     107             :         return true;
     108             : }
     109             : EXPORT_SYMBOL(synchronize_hardirq);
     110             : 
     111             : /**
     112             :  *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
     113             :  *      @irq: interrupt number to wait for
     114             :  *
     115             :  *      This function waits for any pending IRQ handlers for this interrupt
     116             :  *      to complete before returning. If you use this function while
     117             :  *      holding a resource the IRQ handler may need you will deadlock.
     118             :  *
     119             :  *      Can only be called from preemptible code as it might sleep when
     120             :  *      an interrupt thread is associated to @irq.
     121             :  *
     122             :  *      It optionally makes sure (when the irq chip supports that method)
     123             :  *      that the interrupt is not pending in any CPU and waiting for
     124             :  *      service.
     125             :  */
     126           0 : void synchronize_irq(unsigned int irq)
     127             : {
     128           0 :         struct irq_desc *desc = irq_to_desc(irq);
     129             : 
     130           0 :         if (desc) {
     131           0 :                 __synchronize_hardirq(desc, true);
     132             :                 /*
     133             :                  * We made sure that no hardirq handler is
     134             :                  * running. Now verify that no threaded handlers are
     135             :                  * active.
     136             :                  */
     137           0 :                 wait_event(desc->wait_for_threads,
     138             :                            !atomic_read(&desc->threads_active));
     139             :         }
     140           0 : }
     141             : EXPORT_SYMBOL(synchronize_irq);
     142             : 
     143             : #ifdef CONFIG_SMP
     144             : cpumask_var_t irq_default_affinity;
     145             : 
     146             : static bool __irq_can_set_affinity(struct irq_desc *desc)
     147             : {
     148             :         if (!desc || !irqd_can_balance(&desc->irq_data) ||
     149             :             !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
     150             :                 return false;
     151             :         return true;
     152             : }
     153             : 
     154             : /**
     155             :  *      irq_can_set_affinity - Check if the affinity of a given irq can be set
     156             :  *      @irq:           Interrupt to check
     157             :  *
     158             :  */
     159             : int irq_can_set_affinity(unsigned int irq)
     160             : {
     161             :         return __irq_can_set_affinity(irq_to_desc(irq));
     162             : }
     163             : 
     164             : /**
     165             :  * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
     166             :  * @irq:        Interrupt to check
     167             :  *
     168             :  * Like irq_can_set_affinity() above, but additionally checks for the
     169             :  * AFFINITY_MANAGED flag.
     170             :  */
     171             : bool irq_can_set_affinity_usr(unsigned int irq)
     172             : {
     173             :         struct irq_desc *desc = irq_to_desc(irq);
     174             : 
     175             :         return __irq_can_set_affinity(desc) &&
     176             :                 !irqd_affinity_is_managed(&desc->irq_data);
     177             : }
     178             : 
     179             : /**
     180             :  *      irq_set_thread_affinity - Notify irq threads to adjust affinity
     181             :  *      @desc:          irq descriptor which has affinity changed
     182             :  *
     183             :  *      We just set IRQTF_AFFINITY and delegate the affinity setting
     184             :  *      to the interrupt thread itself. We can not call
     185             :  *      set_cpus_allowed_ptr() here as we hold desc->lock and this
     186             :  *      code can be called from hard interrupt context.
     187             :  */
     188             : void irq_set_thread_affinity(struct irq_desc *desc)
     189             : {
     190             :         struct irqaction *action;
     191             : 
     192             :         for_each_action_of_desc(desc, action) {
     193             :                 if (action->thread)
     194             :                         set_bit(IRQTF_AFFINITY, &action->thread_flags);
     195             :                 if (action->secondary && action->secondary->thread)
     196             :                         set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags);
     197             :         }
     198             : }
     199             : 
     200             : #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
     201             : static void irq_validate_effective_affinity(struct irq_data *data)
     202             : {
     203             :         const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
     204             :         struct irq_chip *chip = irq_data_get_irq_chip(data);
     205             : 
     206             :         if (!cpumask_empty(m))
     207             :                 return;
     208             :         pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
     209             :                      chip->name, data->irq);
     210             : }
     211             : #else
     212             : static inline void irq_validate_effective_affinity(struct irq_data *data) { }
     213             : #endif
     214             : 
     215             : int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
     216             :                         bool force)
     217             : {
     218             :         struct irq_desc *desc = irq_data_to_desc(data);
     219             :         struct irq_chip *chip = irq_data_get_irq_chip(data);
     220             :         const struct cpumask  *prog_mask;
     221             :         int ret;
     222             : 
     223             :         static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
     224             :         static struct cpumask tmp_mask;
     225             : 
     226             :         if (!chip || !chip->irq_set_affinity)
     227             :                 return -EINVAL;
     228             : 
     229             :         raw_spin_lock(&tmp_mask_lock);
     230             :         /*
     231             :          * If this is a managed interrupt and housekeeping is enabled on
     232             :          * it check whether the requested affinity mask intersects with
     233             :          * a housekeeping CPU. If so, then remove the isolated CPUs from
     234             :          * the mask and just keep the housekeeping CPU(s). This prevents
     235             :          * the affinity setter from routing the interrupt to an isolated
     236             :          * CPU to avoid that I/O submitted from a housekeeping CPU causes
     237             :          * interrupts on an isolated one.
     238             :          *
     239             :          * If the masks do not intersect or include online CPU(s) then
     240             :          * keep the requested mask. The isolated target CPUs are only
     241             :          * receiving interrupts when the I/O operation was submitted
     242             :          * directly from them.
     243             :          *
     244             :          * If all housekeeping CPUs in the affinity mask are offline, the
     245             :          * interrupt will be migrated by the CPU hotplug code once a
     246             :          * housekeeping CPU which belongs to the affinity mask comes
     247             :          * online.
     248             :          */
     249             :         if (irqd_affinity_is_managed(data) &&
     250             :             housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
     251             :                 const struct cpumask *hk_mask;
     252             : 
     253             :                 hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
     254             : 
     255             :                 cpumask_and(&tmp_mask, mask, hk_mask);
     256             :                 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
     257             :                         prog_mask = mask;
     258             :                 else
     259             :                         prog_mask = &tmp_mask;
     260             :         } else {
     261             :                 prog_mask = mask;
     262             :         }
     263             : 
     264             :         /*
     265             :          * Make sure we only provide online CPUs to the irqchip,
     266             :          * unless we are being asked to force the affinity (in which
     267             :          * case we do as we are told).
     268             :          */
     269             :         cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
     270             :         if (!force && !cpumask_empty(&tmp_mask))
     271             :                 ret = chip->irq_set_affinity(data, &tmp_mask, force);
     272             :         else if (force)
     273             :                 ret = chip->irq_set_affinity(data, mask, force);
     274             :         else
     275             :                 ret = -EINVAL;
     276             : 
     277             :         raw_spin_unlock(&tmp_mask_lock);
     278             : 
     279             :         switch (ret) {
     280             :         case IRQ_SET_MASK_OK:
     281             :         case IRQ_SET_MASK_OK_DONE:
     282             :                 cpumask_copy(desc->irq_common_data.affinity, mask);
     283             :                 fallthrough;
     284             :         case IRQ_SET_MASK_OK_NOCOPY:
     285             :                 irq_validate_effective_affinity(data);
     286             :                 irq_set_thread_affinity(desc);
     287             :                 ret = 0;
     288             :         }
     289             : 
     290             :         return ret;
     291             : }
     292             : 
     293             : #ifdef CONFIG_GENERIC_PENDING_IRQ
     294             : static inline int irq_set_affinity_pending(struct irq_data *data,
     295             :                                            const struct cpumask *dest)
     296             : {
     297             :         struct irq_desc *desc = irq_data_to_desc(data);
     298             : 
     299             :         irqd_set_move_pending(data);
     300             :         irq_copy_pending(desc, dest);
     301             :         return 0;
     302             : }
     303             : #else
     304             : static inline int irq_set_affinity_pending(struct irq_data *data,
     305             :                                            const struct cpumask *dest)
     306             : {
     307             :         return -EBUSY;
     308             : }
     309             : #endif
     310             : 
     311             : static int irq_try_set_affinity(struct irq_data *data,
     312             :                                 const struct cpumask *dest, bool force)
     313             : {
     314             :         int ret = irq_do_set_affinity(data, dest, force);
     315             : 
     316             :         /*
     317             :          * In case that the underlying vector management is busy and the
     318             :          * architecture supports the generic pending mechanism then utilize
     319             :          * this to avoid returning an error to user space.
     320             :          */
     321             :         if (ret == -EBUSY && !force)
     322             :                 ret = irq_set_affinity_pending(data, dest);
     323             :         return ret;
     324             : }
     325             : 
     326             : static bool irq_set_affinity_deactivated(struct irq_data *data,
     327             :                                          const struct cpumask *mask)
     328             : {
     329             :         struct irq_desc *desc = irq_data_to_desc(data);
     330             : 
     331             :         /*
     332             :          * Handle irq chips which can handle affinity only in activated
     333             :          * state correctly
     334             :          *
     335             :          * If the interrupt is not yet activated, just store the affinity
     336             :          * mask and do not call the chip driver at all. On activation the
     337             :          * driver has to make sure anyway that the interrupt is in a
     338             :          * usable state so startup works.
     339             :          */
     340             :         if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
     341             :             irqd_is_activated(data) || !irqd_affinity_on_activate(data))
     342             :                 return false;
     343             : 
     344             :         cpumask_copy(desc->irq_common_data.affinity, mask);
     345             :         irq_data_update_effective_affinity(data, mask);
     346             :         irqd_set(data, IRQD_AFFINITY_SET);
     347             :         return true;
     348             : }
     349             : 
     350             : int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
     351             :                             bool force)
     352             : {
     353             :         struct irq_chip *chip = irq_data_get_irq_chip(data);
     354             :         struct irq_desc *desc = irq_data_to_desc(data);
     355             :         int ret = 0;
     356             : 
     357             :         if (!chip || !chip->irq_set_affinity)
     358             :                 return -EINVAL;
     359             : 
     360             :         if (irq_set_affinity_deactivated(data, mask))
     361             :                 return 0;
     362             : 
     363             :         if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
     364             :                 ret = irq_try_set_affinity(data, mask, force);
     365             :         } else {
     366             :                 irqd_set_move_pending(data);
     367             :                 irq_copy_pending(desc, mask);
     368             :         }
     369             : 
     370             :         if (desc->affinity_notify) {
     371             :                 kref_get(&desc->affinity_notify->kref);
     372             :                 if (!schedule_work(&desc->affinity_notify->work)) {
     373             :                         /* Work was already scheduled, drop our extra ref */
     374             :                         kref_put(&desc->affinity_notify->kref,
     375             :                                  desc->affinity_notify->release);
     376             :                 }
     377             :         }
     378             :         irqd_set(data, IRQD_AFFINITY_SET);
     379             : 
     380             :         return ret;
     381             : }
     382             : 
     383             : /**
     384             :  * irq_update_affinity_desc - Update affinity management for an interrupt
     385             :  * @irq:        The interrupt number to update
     386             :  * @affinity:   Pointer to the affinity descriptor
     387             :  *
     388             :  * This interface can be used to configure the affinity management of
     389             :  * interrupts which have been allocated already.
     390             :  *
     391             :  * There are certain limitations on when it may be used - attempts to use it
     392             :  * for when the kernel is configured for generic IRQ reservation mode (in
     393             :  * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
     394             :  * managed/non-managed interrupt accounting. In addition, attempts to use it on
     395             :  * an interrupt which is already started or which has already been configured
     396             :  * as managed will also fail, as these mean invalid init state or double init.
     397             :  */
     398             : int irq_update_affinity_desc(unsigned int irq,
     399             :                              struct irq_affinity_desc *affinity)
     400             : {
     401             :         struct irq_desc *desc;
     402             :         unsigned long flags;
     403             :         bool activated;
     404             :         int ret = 0;
     405             : 
     406             :         /*
     407             :          * Supporting this with the reservation scheme used by x86 needs
     408             :          * some more thought. Fail it for now.
     409             :          */
     410             :         if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
     411             :                 return -EOPNOTSUPP;
     412             : 
     413             :         desc = irq_get_desc_buslock(irq, &flags, 0);
     414             :         if (!desc)
     415             :                 return -EINVAL;
     416             : 
     417             :         /* Requires the interrupt to be shut down */
     418             :         if (irqd_is_started(&desc->irq_data)) {
     419             :                 ret = -EBUSY;
     420             :                 goto out_unlock;
     421             :         }
     422             : 
     423             :         /* Interrupts which are already managed cannot be modified */
     424             :         if (irqd_affinity_is_managed(&desc->irq_data)) {
     425             :                 ret = -EBUSY;
     426             :                 goto out_unlock;
     427             :         }
     428             : 
     429             :         /*
     430             :          * Deactivate the interrupt. That's required to undo
     431             :          * anything an earlier activation has established.
     432             :          */
     433             :         activated = irqd_is_activated(&desc->irq_data);
     434             :         if (activated)
     435             :                 irq_domain_deactivate_irq(&desc->irq_data);
     436             : 
     437             :         if (affinity->is_managed) {
     438             :                 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
     439             :                 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
     440             :         }
     441             : 
     442             :         cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
     443             : 
     444             :         /* Restore the activation state */
     445             :         if (activated)
     446             :                 irq_domain_activate_irq(&desc->irq_data, false);
     447             : 
     448             : out_unlock:
     449             :         irq_put_desc_busunlock(desc, flags);
     450             :         return ret;
     451             : }
     452             : 
     453             : static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
     454             :                               bool force)
     455             : {
     456             :         struct irq_desc *desc = irq_to_desc(irq);
     457             :         unsigned long flags;
     458             :         int ret;
     459             : 
     460             :         if (!desc)
     461             :                 return -EINVAL;
     462             : 
     463             :         raw_spin_lock_irqsave(&desc->lock, flags);
     464             :         ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
     465             :         raw_spin_unlock_irqrestore(&desc->lock, flags);
     466             :         return ret;
     467             : }
     468             : 
     469             : /**
     470             :  * irq_set_affinity - Set the irq affinity of a given irq
     471             :  * @irq:        Interrupt to set affinity
     472             :  * @cpumask:    cpumask
     473             :  *
     474             :  * Fails if cpumask does not contain an online CPU
     475             :  */
     476             : int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
     477             : {
     478             :         return __irq_set_affinity(irq, cpumask, false);
     479             : }
     480             : EXPORT_SYMBOL_GPL(irq_set_affinity);
     481             : 
     482             : /**
     483             :  * irq_force_affinity - Force the irq affinity of a given irq
     484             :  * @irq:        Interrupt to set affinity
     485             :  * @cpumask:    cpumask
     486             :  *
     487             :  * Same as irq_set_affinity, but without checking the mask against
     488             :  * online cpus.
     489             :  *
     490             :  * Solely for low level cpu hotplug code, where we need to make per
     491             :  * cpu interrupts affine before the cpu becomes online.
     492             :  */
     493             : int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
     494             : {
     495             :         return __irq_set_affinity(irq, cpumask, true);
     496             : }
     497             : EXPORT_SYMBOL_GPL(irq_force_affinity);
     498             : 
     499             : int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
     500             :                               bool setaffinity)
     501             : {
     502             :         unsigned long flags;
     503             :         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
     504             : 
     505             :         if (!desc)
     506             :                 return -EINVAL;
     507             :         desc->affinity_hint = m;
     508             :         irq_put_desc_unlock(desc, flags);
     509             :         if (m && setaffinity)
     510             :                 __irq_set_affinity(irq, m, false);
     511             :         return 0;
     512             : }
     513             : EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
     514             : 
     515             : static void irq_affinity_notify(struct work_struct *work)
     516             : {
     517             :         struct irq_affinity_notify *notify =
     518             :                 container_of(work, struct irq_affinity_notify, work);
     519             :         struct irq_desc *desc = irq_to_desc(notify->irq);
     520             :         cpumask_var_t cpumask;
     521             :         unsigned long flags;
     522             : 
     523             :         if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
     524             :                 goto out;
     525             : 
     526             :         raw_spin_lock_irqsave(&desc->lock, flags);
     527             :         if (irq_move_pending(&desc->irq_data))
     528             :                 irq_get_pending(cpumask, desc);
     529             :         else
     530             :                 cpumask_copy(cpumask, desc->irq_common_data.affinity);
     531             :         raw_spin_unlock_irqrestore(&desc->lock, flags);
     532             : 
     533             :         notify->notify(notify, cpumask);
     534             : 
     535             :         free_cpumask_var(cpumask);
     536             : out:
     537             :         kref_put(&notify->kref, notify->release);
     538             : }
     539             : 
     540             : /**
     541             :  *      irq_set_affinity_notifier - control notification of IRQ affinity changes
     542             :  *      @irq:           Interrupt for which to enable/disable notification
     543             :  *      @notify:        Context for notification, or %NULL to disable
     544             :  *                      notification.  Function pointers must be initialised;
     545             :  *                      the other fields will be initialised by this function.
     546             :  *
     547             :  *      Must be called in process context.  Notification may only be enabled
     548             :  *      after the IRQ is allocated and must be disabled before the IRQ is
     549             :  *      freed using free_irq().
     550             :  */
     551             : int
     552             : irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
     553             : {
     554             :         struct irq_desc *desc = irq_to_desc(irq);
     555             :         struct irq_affinity_notify *old_notify;
     556             :         unsigned long flags;
     557             : 
     558             :         /* The release function is promised process context */
     559             :         might_sleep();
     560             : 
     561             :         if (!desc || desc->istate & IRQS_NMI)
     562             :                 return -EINVAL;
     563             : 
     564             :         /* Complete initialisation of *notify */
     565             :         if (notify) {
     566             :                 notify->irq = irq;
     567             :                 kref_init(&notify->kref);
     568             :                 INIT_WORK(&notify->work, irq_affinity_notify);
     569             :         }
     570             : 
     571             :         raw_spin_lock_irqsave(&desc->lock, flags);
     572             :         old_notify = desc->affinity_notify;
     573             :         desc->affinity_notify = notify;
     574             :         raw_spin_unlock_irqrestore(&desc->lock, flags);
     575             : 
     576             :         if (old_notify) {
     577             :                 if (cancel_work_sync(&old_notify->work)) {
     578             :                         /* Pending work had a ref, put that one too */
     579             :                         kref_put(&old_notify->kref, old_notify->release);
     580             :                 }
     581             :                 kref_put(&old_notify->kref, old_notify->release);
     582             :         }
     583             : 
     584             :         return 0;
     585             : }
     586             : EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
     587             : 
     588             : #ifndef CONFIG_AUTO_IRQ_AFFINITY
     589             : /*
     590             :  * Generic version of the affinity autoselector.
     591             :  */
     592             : int irq_setup_affinity(struct irq_desc *desc)
     593             : {
     594             :         struct cpumask *set = irq_default_affinity;
     595             :         int ret, node = irq_desc_get_node(desc);
     596             :         static DEFINE_RAW_SPINLOCK(mask_lock);
     597             :         static struct cpumask mask;
     598             : 
     599             :         /* Excludes PER_CPU and NO_BALANCE interrupts */
     600             :         if (!__irq_can_set_affinity(desc))
     601             :                 return 0;
     602             : 
     603             :         raw_spin_lock(&mask_lock);
     604             :         /*
     605             :          * Preserve the managed affinity setting and a userspace affinity
     606             :          * setup, but make sure that one of the targets is online.
     607             :          */
     608             :         if (irqd_affinity_is_managed(&desc->irq_data) ||
     609             :             irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
     610             :                 if (cpumask_intersects(desc->irq_common_data.affinity,
     611             :                                        cpu_online_mask))
     612             :                         set = desc->irq_common_data.affinity;
     613             :                 else
     614             :                         irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
     615             :         }
     616             : 
     617             :         cpumask_and(&mask, cpu_online_mask, set);
     618             :         if (cpumask_empty(&mask))
     619             :                 cpumask_copy(&mask, cpu_online_mask);
     620             : 
     621             :         if (node != NUMA_NO_NODE) {
     622             :                 const struct cpumask *nodemask = cpumask_of_node(node);
     623             : 
     624             :                 /* make sure at least one of the cpus in nodemask is online */
     625             :                 if (cpumask_intersects(&mask, nodemask))
     626             :                         cpumask_and(&mask, &mask, nodemask);
     627             :         }
     628             :         ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
     629             :         raw_spin_unlock(&mask_lock);
     630             :         return ret;
     631             : }
     632             : #else
     633             : /* Wrapper for ALPHA specific affinity selector magic */
     634             : int irq_setup_affinity(struct irq_desc *desc)
     635             : {
     636             :         return irq_select_affinity(irq_desc_get_irq(desc));
     637             : }
     638             : #endif /* CONFIG_AUTO_IRQ_AFFINITY */
     639             : #endif /* CONFIG_SMP */
     640             : 
     641             : 
     642             : /**
     643             :  *      irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
     644             :  *      @irq: interrupt number to set affinity
     645             :  *      @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
     646             :  *                  specific data for percpu_devid interrupts
     647             :  *
     648             :  *      This function uses the vCPU specific data to set the vCPU
     649             :  *      affinity for an irq. The vCPU specific data is passed from
     650             :  *      outside, such as KVM. One example code path is as below:
     651             :  *      KVM -> IOMMU -> irq_set_vcpu_affinity().
     652             :  */
     653           0 : int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
     654             : {
     655             :         unsigned long flags;
     656           0 :         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
     657             :         struct irq_data *data;
     658             :         struct irq_chip *chip;
     659           0 :         int ret = -ENOSYS;
     660             : 
     661           0 :         if (!desc)
     662             :                 return -EINVAL;
     663             : 
     664           0 :         data = irq_desc_get_irq_data(desc);
     665             :         do {
     666           0 :                 chip = irq_data_get_irq_chip(data);
     667           0 :                 if (chip && chip->irq_set_vcpu_affinity)
     668             :                         break;
     669             : #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
     670           0 :                 data = data->parent_data;
     671             : #else
     672             :                 data = NULL;
     673             : #endif
     674           0 :         } while (data);
     675             : 
     676           0 :         if (data)
     677           0 :                 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
     678           0 :         irq_put_desc_unlock(desc, flags);
     679             : 
     680           0 :         return ret;
     681             : }
     682             : EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
     683             : 
     684           0 : void __disable_irq(struct irq_desc *desc)
     685             : {
     686           0 :         if (!desc->depth++)
     687           0 :                 irq_disable(desc);
     688           0 : }
     689             : 
     690           0 : static int __disable_irq_nosync(unsigned int irq)
     691             : {
     692             :         unsigned long flags;
     693           0 :         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
     694             : 
     695           0 :         if (!desc)
     696             :                 return -EINVAL;
     697           0 :         __disable_irq(desc);
     698           0 :         irq_put_desc_busunlock(desc, flags);
     699           0 :         return 0;
     700             : }
     701             : 
     702             : /**
     703             :  *      disable_irq_nosync - disable an irq without waiting
     704             :  *      @irq: Interrupt to disable
     705             :  *
     706             :  *      Disable the selected interrupt line.  Disables and Enables are
     707             :  *      nested.
     708             :  *      Unlike disable_irq(), this function does not ensure existing
     709             :  *      instances of the IRQ handler have completed before returning.
     710             :  *
     711             :  *      This function may be called from IRQ context.
     712             :  */
     713           0 : void disable_irq_nosync(unsigned int irq)
     714             : {
     715           0 :         __disable_irq_nosync(irq);
     716           0 : }
     717             : EXPORT_SYMBOL(disable_irq_nosync);
     718             : 
     719             : /**
     720             :  *      disable_irq - disable an irq and wait for completion
     721             :  *      @irq: Interrupt to disable
     722             :  *
     723             :  *      Disable the selected interrupt line.  Enables and Disables are
     724             :  *      nested.
     725             :  *      This function waits for any pending IRQ handlers for this interrupt
     726             :  *      to complete before returning. If you use this function while
     727             :  *      holding a resource the IRQ handler may need you will deadlock.
     728             :  *
     729             :  *      Can only be called from preemptible code as it might sleep when
     730             :  *      an interrupt thread is associated to @irq.
     731             :  *
     732             :  */
     733           0 : void disable_irq(unsigned int irq)
     734             : {
     735             :         might_sleep();
     736           0 :         if (!__disable_irq_nosync(irq))
     737           0 :                 synchronize_irq(irq);
     738           0 : }
     739             : EXPORT_SYMBOL(disable_irq);
     740             : 
     741             : /**
     742             :  *      disable_hardirq - disables an irq and waits for hardirq completion
     743             :  *      @irq: Interrupt to disable
     744             :  *
     745             :  *      Disable the selected interrupt line.  Enables and Disables are
     746             :  *      nested.
     747             :  *      This function waits for any pending hard IRQ handlers for this
     748             :  *      interrupt to complete before returning. If you use this function while
     749             :  *      holding a resource the hard IRQ handler may need you will deadlock.
     750             :  *
     751             :  *      When used to optimistically disable an interrupt from atomic context
     752             :  *      the return value must be checked.
     753             :  *
     754             :  *      Returns: false if a threaded handler is active.
     755             :  *
     756             :  *      This function may be called - with care - from IRQ context.
     757             :  */
     758           0 : bool disable_hardirq(unsigned int irq)
     759             : {
     760           0 :         if (!__disable_irq_nosync(irq))
     761           0 :                 return synchronize_hardirq(irq);
     762             : 
     763             :         return false;
     764             : }
     765             : EXPORT_SYMBOL_GPL(disable_hardirq);
     766             : 
     767             : /**
     768             :  *      disable_nmi_nosync - disable an nmi without waiting
     769             :  *      @irq: Interrupt to disable
     770             :  *
     771             :  *      Disable the selected interrupt line. Disables and enables are
     772             :  *      nested.
     773             :  *      The interrupt to disable must have been requested through request_nmi.
     774             :  *      Unlike disable_nmi(), this function does not ensure existing
     775             :  *      instances of the IRQ handler have completed before returning.
     776             :  */
     777           0 : void disable_nmi_nosync(unsigned int irq)
     778             : {
     779           0 :         disable_irq_nosync(irq);
     780           0 : }
     781             : 
     782           0 : void __enable_irq(struct irq_desc *desc)
     783             : {
     784           0 :         switch (desc->depth) {
     785             :         case 0:
     786             :  err_out:
     787           0 :                 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
     788             :                      irq_desc_get_irq(desc));
     789           0 :                 break;
     790             :         case 1: {
     791           0 :                 if (desc->istate & IRQS_SUSPENDED)
     792             :                         goto err_out;
     793             :                 /* Prevent probing on this irq: */
     794           0 :                 irq_settings_set_noprobe(desc);
     795             :                 /*
     796             :                  * Call irq_startup() not irq_enable() here because the
     797             :                  * interrupt might be marked NOAUTOEN. So irq_startup()
     798             :                  * needs to be invoked when it gets enabled the first
     799             :                  * time. If it was already started up, then irq_startup()
     800             :                  * will invoke irq_enable() under the hood.
     801             :                  */
     802           0 :                 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
     803           0 :                 break;
     804             :         }
     805             :         default:
     806           0 :                 desc->depth--;
     807             :         }
     808           0 : }
     809             : 
     810             : /**
     811             :  *      enable_irq - enable handling of an irq
     812             :  *      @irq: Interrupt to enable
     813             :  *
     814             :  *      Undoes the effect of one call to disable_irq().  If this
     815             :  *      matches the last disable, processing of interrupts on this
     816             :  *      IRQ line is re-enabled.
     817             :  *
     818             :  *      This function may be called from IRQ context only when
     819             :  *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
     820             :  */
     821           0 : void enable_irq(unsigned int irq)
     822             : {
     823             :         unsigned long flags;
     824           0 :         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
     825             : 
     826           0 :         if (!desc)
     827           0 :                 return;
     828           0 :         if (WARN(!desc->irq_data.chip,
     829             :                  KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
     830             :                 goto out;
     831             : 
     832           0 :         __enable_irq(desc);
     833             : out:
     834           0 :         irq_put_desc_busunlock(desc, flags);
     835             : }
     836             : EXPORT_SYMBOL(enable_irq);
     837             : 
     838             : /**
     839             :  *      enable_nmi - enable handling of an nmi
     840             :  *      @irq: Interrupt to enable
     841             :  *
     842             :  *      The interrupt to enable must have been requested through request_nmi.
     843             :  *      Undoes the effect of one call to disable_nmi(). If this
     844             :  *      matches the last disable, processing of interrupts on this
     845             :  *      IRQ line is re-enabled.
     846             :  */
     847           0 : void enable_nmi(unsigned int irq)
     848             : {
     849           0 :         enable_irq(irq);
     850           0 : }
     851             : 
     852           0 : static int set_irq_wake_real(unsigned int irq, unsigned int on)
     853             : {
     854           0 :         struct irq_desc *desc = irq_to_desc(irq);
     855           0 :         int ret = -ENXIO;
     856             : 
     857           0 :         if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
     858             :                 return 0;
     859             : 
     860           0 :         if (desc->irq_data.chip->irq_set_wake)
     861           0 :                 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
     862             : 
     863             :         return ret;
     864             : }
     865             : 
     866             : /**
     867             :  *      irq_set_irq_wake - control irq power management wakeup
     868             :  *      @irq:   interrupt to control
     869             :  *      @on:    enable/disable power management wakeup
     870             :  *
     871             :  *      Enable/disable power management wakeup mode, which is
     872             :  *      disabled by default.  Enables and disables must match,
     873             :  *      just as they match for non-wakeup mode support.
     874             :  *
     875             :  *      Wakeup mode lets this IRQ wake the system from sleep
     876             :  *      states like "suspend to RAM".
     877             :  *
     878             :  *      Note: irq enable/disable state is completely orthogonal
     879             :  *      to the enable/disable state of irq wake. An irq can be
     880             :  *      disabled with disable_irq() and still wake the system as
     881             :  *      long as the irq has wake enabled. If this does not hold,
     882             :  *      then the underlying irq chip and the related driver need
     883             :  *      to be investigated.
     884             :  */
     885           0 : int irq_set_irq_wake(unsigned int irq, unsigned int on)
     886             : {
     887             :         unsigned long flags;
     888           0 :         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
     889           0 :         int ret = 0;
     890             : 
     891           0 :         if (!desc)
     892             :                 return -EINVAL;
     893             : 
     894             :         /* Don't use NMIs as wake up interrupts please */
     895           0 :         if (desc->istate & IRQS_NMI) {
     896             :                 ret = -EINVAL;
     897             :                 goto out_unlock;
     898             :         }
     899             : 
     900             :         /* wakeup-capable irqs can be shared between drivers that
     901             :          * don't need to have the same sleep mode behaviors.
     902             :          */
     903           0 :         if (on) {
     904           0 :                 if (desc->wake_depth++ == 0) {
     905           0 :                         ret = set_irq_wake_real(irq, on);
     906           0 :                         if (ret)
     907           0 :                                 desc->wake_depth = 0;
     908             :                         else
     909           0 :                                 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
     910             :                 }
     911             :         } else {
     912           0 :                 if (desc->wake_depth == 0) {
     913           0 :                         WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
     914           0 :                 } else if (--desc->wake_depth == 0) {
     915           0 :                         ret = set_irq_wake_real(irq, on);
     916           0 :                         if (ret)
     917           0 :                                 desc->wake_depth = 1;
     918             :                         else
     919           0 :                                 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
     920             :                 }
     921             :         }
     922             : 
     923             : out_unlock:
     924           0 :         irq_put_desc_busunlock(desc, flags);
     925           0 :         return ret;
     926             : }
     927             : EXPORT_SYMBOL(irq_set_irq_wake);
     928             : 
     929             : /*
     930             :  * Internal function that tells the architecture code whether a
     931             :  * particular irq has been exclusively allocated or is available
     932             :  * for driver use.
     933             :  */
     934           0 : int can_request_irq(unsigned int irq, unsigned long irqflags)
     935             : {
     936             :         unsigned long flags;
     937           0 :         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
     938           0 :         int canrequest = 0;
     939             : 
     940           0 :         if (!desc)
     941             :                 return 0;
     942             : 
     943           0 :         if (irq_settings_can_request(desc)) {
     944           0 :                 if (!desc->action ||
     945           0 :                     irqflags & desc->action->flags & IRQF_SHARED)
     946           0 :                         canrequest = 1;
     947             :         }
     948           0 :         irq_put_desc_unlock(desc, flags);
     949           0 :         return canrequest;
     950             : }
     951             : 
     952           0 : int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
     953             : {
     954           0 :         struct irq_chip *chip = desc->irq_data.chip;
     955           0 :         int ret, unmask = 0;
     956             : 
     957           0 :         if (!chip || !chip->irq_set_type) {
     958             :                 /*
     959             :                  * IRQF_TRIGGER_* but the PIC does not support multiple
     960             :                  * flow-types?
     961             :                  */
     962             :                 pr_debug("No set_type function for IRQ %d (%s)\n",
     963             :                          irq_desc_get_irq(desc),
     964             :                          chip ? (chip->name ? : "unknown") : "unknown");
     965             :                 return 0;
     966             :         }
     967             : 
     968           0 :         if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
     969           0 :                 if (!irqd_irq_masked(&desc->irq_data))
     970           0 :                         mask_irq(desc);
     971           0 :                 if (!irqd_irq_disabled(&desc->irq_data))
     972           0 :                         unmask = 1;
     973             :         }
     974             : 
     975             :         /* Mask all flags except trigger mode */
     976           0 :         flags &= IRQ_TYPE_SENSE_MASK;
     977           0 :         ret = chip->irq_set_type(&desc->irq_data, flags);
     978             : 
     979           0 :         switch (ret) {
     980             :         case IRQ_SET_MASK_OK:
     981             :         case IRQ_SET_MASK_OK_DONE:
     982           0 :                 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
     983           0 :                 irqd_set(&desc->irq_data, flags);
     984             :                 fallthrough;
     985             : 
     986             :         case IRQ_SET_MASK_OK_NOCOPY:
     987           0 :                 flags = irqd_get_trigger_type(&desc->irq_data);
     988           0 :                 irq_settings_set_trigger_mask(desc, flags);
     989           0 :                 irqd_clear(&desc->irq_data, IRQD_LEVEL);
     990           0 :                 irq_settings_clr_level(desc);
     991           0 :                 if (flags & IRQ_TYPE_LEVEL_MASK) {
     992           0 :                         irq_settings_set_level(desc);
     993           0 :                         irqd_set(&desc->irq_data, IRQD_LEVEL);
     994             :                 }
     995             : 
     996             :                 ret = 0;
     997             :                 break;
     998             :         default:
     999           0 :                 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
    1000             :                        flags, irq_desc_get_irq(desc), chip->irq_set_type);
    1001             :         }
    1002           0 :         if (unmask)
    1003           0 :                 unmask_irq(desc);
    1004             :         return ret;
    1005             : }
    1006             : 
    1007             : #ifdef CONFIG_HARDIRQS_SW_RESEND
    1008             : int irq_set_parent(int irq, int parent_irq)
    1009             : {
    1010             :         unsigned long flags;
    1011             :         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
    1012             : 
    1013             :         if (!desc)
    1014             :                 return -EINVAL;
    1015             : 
    1016             :         desc->parent_irq = parent_irq;
    1017             : 
    1018             :         irq_put_desc_unlock(desc, flags);
    1019             :         return 0;
    1020             : }
    1021             : EXPORT_SYMBOL_GPL(irq_set_parent);
    1022             : #endif
    1023             : 
    1024             : /*
    1025             :  * Default primary interrupt handler for threaded interrupts. Is
    1026             :  * assigned as primary handler when request_threaded_irq is called
    1027             :  * with handler == NULL. Useful for oneshot interrupts.
    1028             :  */
    1029           0 : static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
    1030             : {
    1031           0 :         return IRQ_WAKE_THREAD;
    1032             : }
    1033             : 
    1034             : /*
    1035             :  * Primary handler for nested threaded interrupts. Should never be
    1036             :  * called.
    1037             :  */
    1038           0 : static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
    1039             : {
    1040           0 :         WARN(1, "Primary handler called for nested irq %d\n", irq);
    1041           0 :         return IRQ_NONE;
    1042             : }
    1043             : 
    1044           0 : static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
    1045             : {
    1046           0 :         WARN(1, "Secondary action handler called for irq %d\n", irq);
    1047           0 :         return IRQ_NONE;
    1048             : }
    1049             : 
    1050           0 : static int irq_wait_for_interrupt(struct irqaction *action)
    1051             : {
    1052             :         for (;;) {
    1053           0 :                 set_current_state(TASK_INTERRUPTIBLE);
    1054             : 
    1055           0 :                 if (kthread_should_stop()) {
    1056             :                         /* may need to run one last time */
    1057           0 :                         if (test_and_clear_bit(IRQTF_RUNTHREAD,
    1058           0 :                                                &action->thread_flags)) {
    1059           0 :                                 __set_current_state(TASK_RUNNING);
    1060           0 :                                 return 0;
    1061             :                         }
    1062           0 :                         __set_current_state(TASK_RUNNING);
    1063           0 :                         return -1;
    1064             :                 }
    1065             : 
    1066           0 :                 if (test_and_clear_bit(IRQTF_RUNTHREAD,
    1067           0 :                                        &action->thread_flags)) {
    1068           0 :                         __set_current_state(TASK_RUNNING);
    1069           0 :                         return 0;
    1070             :                 }
    1071           0 :                 schedule();
    1072             :         }
    1073             : }
    1074             : 
    1075             : /*
    1076             :  * Oneshot interrupts keep the irq line masked until the threaded
    1077             :  * handler finished. unmask if the interrupt has not been disabled and
    1078             :  * is marked MASKED.
    1079             :  */
    1080           0 : static void irq_finalize_oneshot(struct irq_desc *desc,
    1081             :                                  struct irqaction *action)
    1082             : {
    1083           0 :         if (!(desc->istate & IRQS_ONESHOT) ||
    1084           0 :             action->handler == irq_forced_secondary_handler)
    1085             :                 return;
    1086             : again:
    1087           0 :         chip_bus_lock(desc);
    1088           0 :         raw_spin_lock_irq(&desc->lock);
    1089             : 
    1090             :         /*
    1091             :          * Implausible though it may be we need to protect us against
    1092             :          * the following scenario:
    1093             :          *
    1094             :          * The thread is faster done than the hard interrupt handler
    1095             :          * on the other CPU. If we unmask the irq line then the
    1096             :          * interrupt can come in again and masks the line, leaves due
    1097             :          * to IRQS_INPROGRESS and the irq line is masked forever.
    1098             :          *
    1099             :          * This also serializes the state of shared oneshot handlers
    1100             :          * versus "desc->threads_oneshot |= action->thread_mask;" in
    1101             :          * irq_wake_thread(). See the comment there which explains the
    1102             :          * serialization.
    1103             :          */
    1104           0 :         if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
    1105           0 :                 raw_spin_unlock_irq(&desc->lock);
    1106           0 :                 chip_bus_sync_unlock(desc);
    1107             :                 cpu_relax();
    1108             :                 goto again;
    1109             :         }
    1110             : 
    1111             :         /*
    1112             :          * Now check again, whether the thread should run. Otherwise
    1113             :          * we would clear the threads_oneshot bit of this thread which
    1114             :          * was just set.
    1115             :          */
    1116           0 :         if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
    1117             :                 goto out_unlock;
    1118             : 
    1119           0 :         desc->threads_oneshot &= ~action->thread_mask;
    1120             : 
    1121           0 :         if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
    1122           0 :             irqd_irq_masked(&desc->irq_data))
    1123           0 :                 unmask_threaded_irq(desc);
    1124             : 
    1125             : out_unlock:
    1126           0 :         raw_spin_unlock_irq(&desc->lock);
    1127             :         chip_bus_sync_unlock(desc);
    1128             : }
    1129             : 
    1130             : #ifdef CONFIG_SMP
    1131             : /*
    1132             :  * Check whether we need to change the affinity of the interrupt thread.
    1133             :  */
    1134             : static void
    1135             : irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
    1136             : {
    1137             :         cpumask_var_t mask;
    1138             :         bool valid = true;
    1139             : 
    1140             :         if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
    1141             :                 return;
    1142             : 
    1143             :         /*
    1144             :          * In case we are out of memory we set IRQTF_AFFINITY again and
    1145             :          * try again next time
    1146             :          */
    1147             :         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
    1148             :                 set_bit(IRQTF_AFFINITY, &action->thread_flags);
    1149             :                 return;
    1150             :         }
    1151             : 
    1152             :         raw_spin_lock_irq(&desc->lock);
    1153             :         /*
    1154             :          * This code is triggered unconditionally. Check the affinity
    1155             :          * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
    1156             :          */
    1157             :         if (cpumask_available(desc->irq_common_data.affinity)) {
    1158             :                 const struct cpumask *m;
    1159             : 
    1160             :                 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
    1161             :                 cpumask_copy(mask, m);
    1162             :         } else {
    1163             :                 valid = false;
    1164             :         }
    1165             :         raw_spin_unlock_irq(&desc->lock);
    1166             : 
    1167             :         if (valid)
    1168             :                 set_cpus_allowed_ptr(current, mask);
    1169             :         free_cpumask_var(mask);
    1170             : }
    1171             : #else
    1172             : static inline void
    1173             : irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
    1174             : #endif
    1175             : 
    1176             : /*
    1177             :  * Interrupts which are not explicitly requested as threaded
    1178             :  * interrupts rely on the implicit bh/preempt disable of the hard irq
    1179             :  * context. So we need to disable bh here to avoid deadlocks and other
    1180             :  * side effects.
    1181             :  */
    1182             : static irqreturn_t
    1183             : irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
    1184             : {
    1185             :         irqreturn_t ret;
    1186             : 
    1187             :         local_bh_disable();
    1188             :         if (!IS_ENABLED(CONFIG_PREEMPT_RT))
    1189             :                 local_irq_disable();
    1190             :         ret = action->thread_fn(action->irq, action->dev_id);
    1191             :         if (ret == IRQ_HANDLED)
    1192             :                 atomic_inc(&desc->threads_handled);
    1193             : 
    1194             :         irq_finalize_oneshot(desc, action);
    1195             :         if (!IS_ENABLED(CONFIG_PREEMPT_RT))
    1196             :                 local_irq_enable();
    1197             :         local_bh_enable();
    1198             :         return ret;
    1199             : }
    1200             : 
    1201             : /*
    1202             :  * Interrupts explicitly requested as threaded interrupts want to be
    1203             :  * preemptible - many of them need to sleep and wait for slow busses to
    1204             :  * complete.
    1205             :  */
    1206           0 : static irqreturn_t irq_thread_fn(struct irq_desc *desc,
    1207             :                 struct irqaction *action)
    1208             : {
    1209             :         irqreturn_t ret;
    1210             : 
    1211           0 :         ret = action->thread_fn(action->irq, action->dev_id);
    1212           0 :         if (ret == IRQ_HANDLED)
    1213           0 :                 atomic_inc(&desc->threads_handled);
    1214             : 
    1215           0 :         irq_finalize_oneshot(desc, action);
    1216           0 :         return ret;
    1217             : }
    1218             : 
    1219           0 : static void wake_threads_waitq(struct irq_desc *desc)
    1220             : {
    1221           0 :         if (atomic_dec_and_test(&desc->threads_active))
    1222           0 :                 wake_up(&desc->wait_for_threads);
    1223           0 : }
    1224             : 
    1225           0 : static void irq_thread_dtor(struct callback_head *unused)
    1226             : {
    1227           0 :         struct task_struct *tsk = current;
    1228             :         struct irq_desc *desc;
    1229             :         struct irqaction *action;
    1230             : 
    1231           0 :         if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
    1232             :                 return;
    1233             : 
    1234           0 :         action = kthread_data(tsk);
    1235             : 
    1236           0 :         pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
    1237             :                tsk->comm, tsk->pid, action->irq);
    1238             : 
    1239             : 
    1240           0 :         desc = irq_to_desc(action->irq);
    1241             :         /*
    1242             :          * If IRQTF_RUNTHREAD is set, we need to decrement
    1243             :          * desc->threads_active and wake possible waiters.
    1244             :          */
    1245           0 :         if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
    1246           0 :                 wake_threads_waitq(desc);
    1247             : 
    1248             :         /* Prevent a stale desc->threads_oneshot */
    1249           0 :         irq_finalize_oneshot(desc, action);
    1250             : }
    1251             : 
    1252           0 : static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
    1253             : {
    1254           0 :         struct irqaction *secondary = action->secondary;
    1255             : 
    1256           0 :         if (WARN_ON_ONCE(!secondary))
    1257             :                 return;
    1258             : 
    1259           0 :         raw_spin_lock_irq(&desc->lock);
    1260           0 :         __irq_wake_thread(desc, secondary);
    1261           0 :         raw_spin_unlock_irq(&desc->lock);
    1262             : }
    1263             : 
    1264             : /*
    1265             :  * Internal function to notify that a interrupt thread is ready.
    1266             :  */
    1267             : static void irq_thread_set_ready(struct irq_desc *desc,
    1268             :                                  struct irqaction *action)
    1269             : {
    1270           0 :         set_bit(IRQTF_READY, &action->thread_flags);
    1271           0 :         wake_up(&desc->wait_for_threads);
    1272             : }
    1273             : 
    1274             : /*
    1275             :  * Internal function to wake up a interrupt thread and wait until it is
    1276             :  * ready.
    1277             :  */
    1278           4 : static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
    1279             :                                                   struct irqaction *action)
    1280             : {
    1281           4 :         if (!action || !action->thread)
    1282             :                 return;
    1283             : 
    1284           0 :         wake_up_process(action->thread);
    1285           0 :         wait_event(desc->wait_for_threads,
    1286             :                    test_bit(IRQTF_READY, &action->thread_flags));
    1287             : }
    1288             : 
    1289             : /*
    1290             :  * Interrupt handler thread
    1291             :  */
    1292           0 : static int irq_thread(void *data)
    1293             : {
    1294             :         struct callback_head on_exit_work;
    1295           0 :         struct irqaction *action = data;
    1296           0 :         struct irq_desc *desc = irq_to_desc(action->irq);
    1297             :         irqreturn_t (*handler_fn)(struct irq_desc *desc,
    1298             :                         struct irqaction *action);
    1299             : 
    1300           0 :         irq_thread_set_ready(desc, action);
    1301             : 
    1302           0 :         sched_set_fifo(current);
    1303             : 
    1304             :         if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
    1305             :                                            &action->thread_flags))
    1306             :                 handler_fn = irq_forced_thread_fn;
    1307             :         else
    1308           0 :                 handler_fn = irq_thread_fn;
    1309             : 
    1310           0 :         init_task_work(&on_exit_work, irq_thread_dtor);
    1311           0 :         task_work_add(current, &on_exit_work, TWA_NONE);
    1312             : 
    1313           0 :         irq_thread_check_affinity(desc, action);
    1314             : 
    1315           0 :         while (!irq_wait_for_interrupt(action)) {
    1316             :                 irqreturn_t action_ret;
    1317             : 
    1318           0 :                 irq_thread_check_affinity(desc, action);
    1319             : 
    1320           0 :                 action_ret = handler_fn(desc, action);
    1321           0 :                 if (action_ret == IRQ_WAKE_THREAD)
    1322           0 :                         irq_wake_secondary(desc, action);
    1323             : 
    1324           0 :                 wake_threads_waitq(desc);
    1325             :         }
    1326             : 
    1327             :         /*
    1328             :          * This is the regular exit path. __free_irq() is stopping the
    1329             :          * thread via kthread_stop() after calling
    1330             :          * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
    1331             :          * oneshot mask bit can be set.
    1332             :          */
    1333           0 :         task_work_cancel(current, irq_thread_dtor);
    1334           0 :         return 0;
    1335             : }
    1336             : 
    1337             : /**
    1338             :  *      irq_wake_thread - wake the irq thread for the action identified by dev_id
    1339             :  *      @irq:           Interrupt line
    1340             :  *      @dev_id:        Device identity for which the thread should be woken
    1341             :  *
    1342             :  */
    1343           0 : void irq_wake_thread(unsigned int irq, void *dev_id)
    1344             : {
    1345           0 :         struct irq_desc *desc = irq_to_desc(irq);
    1346             :         struct irqaction *action;
    1347             :         unsigned long flags;
    1348             : 
    1349           0 :         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
    1350             :                 return;
    1351             : 
    1352           0 :         raw_spin_lock_irqsave(&desc->lock, flags);
    1353           0 :         for_each_action_of_desc(desc, action) {
    1354           0 :                 if (action->dev_id == dev_id) {
    1355           0 :                         if (action->thread)
    1356           0 :                                 __irq_wake_thread(desc, action);
    1357             :                         break;
    1358             :                 }
    1359             :         }
    1360           0 :         raw_spin_unlock_irqrestore(&desc->lock, flags);
    1361             : }
    1362             : EXPORT_SYMBOL_GPL(irq_wake_thread);
    1363             : 
    1364             : static int irq_setup_forced_threading(struct irqaction *new)
    1365             : {
    1366             :         if (!force_irqthreads())
    1367             :                 return 0;
    1368             :         if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
    1369             :                 return 0;
    1370             : 
    1371             :         /*
    1372             :          * No further action required for interrupts which are requested as
    1373             :          * threaded interrupts already
    1374             :          */
    1375             :         if (new->handler == irq_default_primary_handler)
    1376             :                 return 0;
    1377             : 
    1378             :         new->flags |= IRQF_ONESHOT;
    1379             : 
    1380             :         /*
    1381             :          * Handle the case where we have a real primary handler and a
    1382             :          * thread handler. We force thread them as well by creating a
    1383             :          * secondary action.
    1384             :          */
    1385             :         if (new->handler && new->thread_fn) {
    1386             :                 /* Allocate the secondary action */
    1387             :                 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
    1388             :                 if (!new->secondary)
    1389             :                         return -ENOMEM;
    1390             :                 new->secondary->handler = irq_forced_secondary_handler;
    1391             :                 new->secondary->thread_fn = new->thread_fn;
    1392             :                 new->secondary->dev_id = new->dev_id;
    1393             :                 new->secondary->irq = new->irq;
    1394             :                 new->secondary->name = new->name;
    1395             :         }
    1396             :         /* Deal with the primary handler */
    1397             :         set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
    1398             :         new->thread_fn = new->handler;
    1399             :         new->handler = irq_default_primary_handler;
    1400             :         return 0;
    1401             : }
    1402             : 
    1403             : static int irq_request_resources(struct irq_desc *desc)
    1404             : {
    1405           2 :         struct irq_data *d = &desc->irq_data;
    1406           2 :         struct irq_chip *c = d->chip;
    1407             : 
    1408           2 :         return c->irq_request_resources ? c->irq_request_resources(d) : 0;
    1409             : }
    1410             : 
    1411             : static void irq_release_resources(struct irq_desc *desc)
    1412             : {
    1413           0 :         struct irq_data *d = &desc->irq_data;
    1414           0 :         struct irq_chip *c = d->chip;
    1415             : 
    1416           0 :         if (c->irq_release_resources)
    1417           0 :                 c->irq_release_resources(d);
    1418             : }
    1419             : 
    1420             : static bool irq_supports_nmi(struct irq_desc *desc)
    1421             : {
    1422           0 :         struct irq_data *d = irq_desc_get_irq_data(desc);
    1423             : 
    1424             : #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
    1425             :         /* Only IRQs directly managed by the root irqchip can be set as NMI */
    1426           0 :         if (d->parent_data)
    1427             :                 return false;
    1428             : #endif
    1429             :         /* Don't support NMIs for chips behind a slow bus */
    1430           0 :         if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
    1431             :                 return false;
    1432             : 
    1433           0 :         return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
    1434             : }
    1435             : 
    1436             : static int irq_nmi_setup(struct irq_desc *desc)
    1437             : {
    1438           0 :         struct irq_data *d = irq_desc_get_irq_data(desc);
    1439           0 :         struct irq_chip *c = d->chip;
    1440             : 
    1441           0 :         return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
    1442             : }
    1443             : 
    1444             : static void irq_nmi_teardown(struct irq_desc *desc)
    1445             : {
    1446           0 :         struct irq_data *d = irq_desc_get_irq_data(desc);
    1447           0 :         struct irq_chip *c = d->chip;
    1448             : 
    1449           0 :         if (c->irq_nmi_teardown)
    1450           0 :                 c->irq_nmi_teardown(d);
    1451             : }
    1452             : 
    1453             : static int
    1454           0 : setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
    1455             : {
    1456             :         struct task_struct *t;
    1457             : 
    1458           0 :         if (!secondary) {
    1459           0 :                 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
    1460             :                                    new->name);
    1461             :         } else {
    1462           0 :                 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
    1463             :                                    new->name);
    1464             :         }
    1465             : 
    1466           0 :         if (IS_ERR(t))
    1467           0 :                 return PTR_ERR(t);
    1468             : 
    1469             :         /*
    1470             :          * We keep the reference to the task struct even if
    1471             :          * the thread dies to avoid that the interrupt code
    1472             :          * references an already freed task_struct.
    1473             :          */
    1474           0 :         new->thread = get_task_struct(t);
    1475             :         /*
    1476             :          * Tell the thread to set its affinity. This is
    1477             :          * important for shared interrupt handlers as we do
    1478             :          * not invoke setup_affinity() for the secondary
    1479             :          * handlers as everything is already set up. Even for
    1480             :          * interrupts marked with IRQF_NO_BALANCE this is
    1481             :          * correct as we want the thread to move to the cpu(s)
    1482             :          * on which the requesting code placed the interrupt.
    1483             :          */
    1484           0 :         set_bit(IRQTF_AFFINITY, &new->thread_flags);
    1485           0 :         return 0;
    1486             : }
    1487             : 
    1488             : /*
    1489             :  * Internal function to register an irqaction - typically used to
    1490             :  * allocate special interrupts that are part of the architecture.
    1491             :  *
    1492             :  * Locking rules:
    1493             :  *
    1494             :  * desc->request_mutex       Provides serialization against a concurrent free_irq()
    1495             :  *   chip_bus_lock      Provides serialization for slow bus operations
    1496             :  *     desc->lock    Provides serialization against hard interrupts
    1497             :  *
    1498             :  * chip_bus_lock and desc->lock are sufficient for all other management and
    1499             :  * interrupt related functions. desc->request_mutex solely serializes
    1500             :  * request/free_irq().
    1501             :  */
    1502             : static int
    1503           2 : __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
    1504             : {
    1505             :         struct irqaction *old, **old_ptr;
    1506           2 :         unsigned long flags, thread_mask = 0;
    1507           2 :         int ret, nested, shared = 0;
    1508             : 
    1509           2 :         if (!desc)
    1510             :                 return -EINVAL;
    1511             : 
    1512           2 :         if (desc->irq_data.chip == &no_irq_chip)
    1513             :                 return -ENOSYS;
    1514           2 :         if (!try_module_get(desc->owner))
    1515             :                 return -ENODEV;
    1516             : 
    1517           2 :         new->irq = irq;
    1518             : 
    1519             :         /*
    1520             :          * If the trigger type is not specified by the caller,
    1521             :          * then use the default for this interrupt.
    1522             :          */
    1523           2 :         if (!(new->flags & IRQF_TRIGGER_MASK))
    1524           4 :                 new->flags |= irqd_get_trigger_type(&desc->irq_data);
    1525             : 
    1526             :         /*
    1527             :          * Check whether the interrupt nests into another interrupt
    1528             :          * thread.
    1529             :          */
    1530           4 :         nested = irq_settings_is_nested_thread(desc);
    1531           2 :         if (nested) {
    1532           0 :                 if (!new->thread_fn) {
    1533             :                         ret = -EINVAL;
    1534             :                         goto out_mput;
    1535             :                 }
    1536             :                 /*
    1537             :                  * Replace the primary handler which was provided from
    1538             :                  * the driver for non nested interrupt handling by the
    1539             :                  * dummy function which warns when called.
    1540             :                  */
    1541           0 :                 new->handler = irq_nested_primary_handler;
    1542             :         } else {
    1543             :                 if (irq_settings_can_thread(desc)) {
    1544             :                         ret = irq_setup_forced_threading(new);
    1545             :                         if (ret)
    1546             :                                 goto out_mput;
    1547             :                 }
    1548             :         }
    1549             : 
    1550             :         /*
    1551             :          * Create a handler thread when a thread function is supplied
    1552             :          * and the interrupt does not nest into another interrupt
    1553             :          * thread.
    1554             :          */
    1555           2 :         if (new->thread_fn && !nested) {
    1556           0 :                 ret = setup_irq_thread(new, irq, false);
    1557           0 :                 if (ret)
    1558             :                         goto out_mput;
    1559           0 :                 if (new->secondary) {
    1560           0 :                         ret = setup_irq_thread(new->secondary, irq, true);
    1561           0 :                         if (ret)
    1562             :                                 goto out_thread;
    1563             :                 }
    1564             :         }
    1565             : 
    1566             :         /*
    1567             :          * Drivers are often written to work w/o knowledge about the
    1568             :          * underlying irq chip implementation, so a request for a
    1569             :          * threaded irq without a primary hard irq context handler
    1570             :          * requires the ONESHOT flag to be set. Some irq chips like
    1571             :          * MSI based interrupts are per se one shot safe. Check the
    1572             :          * chip flags, so we can avoid the unmask dance at the end of
    1573             :          * the threaded handler for those.
    1574             :          */
    1575           2 :         if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
    1576           0 :                 new->flags &= ~IRQF_ONESHOT;
    1577             : 
    1578             :         /*
    1579             :          * Protects against a concurrent __free_irq() call which might wait
    1580             :          * for synchronize_hardirq() to complete without holding the optional
    1581             :          * chip bus lock and desc->lock. Also protects against handing out
    1582             :          * a recycled oneshot thread_mask bit while it's still in use by
    1583             :          * its previous owner.
    1584             :          */
    1585           2 :         mutex_lock(&desc->request_mutex);
    1586             : 
    1587             :         /*
    1588             :          * Acquire bus lock as the irq_request_resources() callback below
    1589             :          * might rely on the serialization or the magic power management
    1590             :          * functions which are abusing the irq_bus_lock() callback,
    1591             :          */
    1592           2 :         chip_bus_lock(desc);
    1593             : 
    1594             :         /* First installed action requests resources. */
    1595           2 :         if (!desc->action) {
    1596           2 :                 ret = irq_request_resources(desc);
    1597           2 :                 if (ret) {
    1598           0 :                         pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
    1599             :                                new->name, irq, desc->irq_data.chip->name);
    1600           0 :                         goto out_bus_unlock;
    1601             :                 }
    1602             :         }
    1603             : 
    1604             :         /*
    1605             :          * The following block of code has to be executed atomically
    1606             :          * protected against a concurrent interrupt and any of the other
    1607             :          * management calls which are not serialized via
    1608             :          * desc->request_mutex or the optional bus lock.
    1609             :          */
    1610           2 :         raw_spin_lock_irqsave(&desc->lock, flags);
    1611           2 :         old_ptr = &desc->action;
    1612           2 :         old = *old_ptr;
    1613           2 :         if (old) {
    1614             :                 /*
    1615             :                  * Can't share interrupts unless both agree to and are
    1616             :                  * the same type (level, edge, polarity). So both flag
    1617             :                  * fields must have IRQF_SHARED set and the bits which
    1618             :                  * set the trigger type must match. Also all must
    1619             :                  * agree on ONESHOT.
    1620             :                  * Interrupt lines used for NMIs cannot be shared.
    1621             :                  */
    1622             :                 unsigned int oldtype;
    1623             : 
    1624           0 :                 if (desc->istate & IRQS_NMI) {
    1625           0 :                         pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
    1626             :                                 new->name, irq, desc->irq_data.chip->name);
    1627           0 :                         ret = -EINVAL;
    1628           0 :                         goto out_unlock;
    1629             :                 }
    1630             : 
    1631             :                 /*
    1632             :                  * If nobody did set the configuration before, inherit
    1633             :                  * the one provided by the requester.
    1634             :                  */
    1635           0 :                 if (irqd_trigger_type_was_set(&desc->irq_data)) {
    1636           0 :                         oldtype = irqd_get_trigger_type(&desc->irq_data);
    1637             :                 } else {
    1638           0 :                         oldtype = new->flags & IRQF_TRIGGER_MASK;
    1639           0 :                         irqd_set_trigger_type(&desc->irq_data, oldtype);
    1640             :                 }
    1641             : 
    1642           0 :                 if (!((old->flags & new->flags) & IRQF_SHARED) ||
    1643           0 :                     (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
    1644           0 :                     ((old->flags ^ new->flags) & IRQF_ONESHOT))
    1645             :                         goto mismatch;
    1646             : 
    1647             :                 /* All handlers must agree on per-cpuness */
    1648           0 :                 if ((old->flags & IRQF_PERCPU) !=
    1649             :                     (new->flags & IRQF_PERCPU))
    1650             :                         goto mismatch;
    1651             : 
    1652             :                 /* add new interrupt at end of irq queue */
    1653             :                 do {
    1654             :                         /*
    1655             :                          * Or all existing action->thread_mask bits,
    1656             :                          * so we can find the next zero bit for this
    1657             :                          * new action.
    1658             :                          */
    1659           0 :                         thread_mask |= old->thread_mask;
    1660           0 :                         old_ptr = &old->next;
    1661           0 :                         old = *old_ptr;
    1662           0 :                 } while (old);
    1663             :                 shared = 1;
    1664             :         }
    1665             : 
    1666             :         /*
    1667             :          * Setup the thread mask for this irqaction for ONESHOT. For
    1668             :          * !ONESHOT irqs the thread mask is 0 so we can avoid a
    1669             :          * conditional in irq_wake_thread().
    1670             :          */
    1671           2 :         if (new->flags & IRQF_ONESHOT) {
    1672             :                 /*
    1673             :                  * Unlikely to have 32 resp 64 irqs sharing one line,
    1674             :                  * but who knows.
    1675             :                  */
    1676           0 :                 if (thread_mask == ~0UL) {
    1677             :                         ret = -EBUSY;
    1678             :                         goto out_unlock;
    1679             :                 }
    1680             :                 /*
    1681             :                  * The thread_mask for the action is or'ed to
    1682             :                  * desc->thread_active to indicate that the
    1683             :                  * IRQF_ONESHOT thread handler has been woken, but not
    1684             :                  * yet finished. The bit is cleared when a thread
    1685             :                  * completes. When all threads of a shared interrupt
    1686             :                  * line have completed desc->threads_active becomes
    1687             :                  * zero and the interrupt line is unmasked. See
    1688             :                  * handle.c:irq_wake_thread() for further information.
    1689             :                  *
    1690             :                  * If no thread is woken by primary (hard irq context)
    1691             :                  * interrupt handlers, then desc->threads_active is
    1692             :                  * also checked for zero to unmask the irq line in the
    1693             :                  * affected hard irq flow handlers
    1694             :                  * (handle_[fasteoi|level]_irq).
    1695             :                  *
    1696             :                  * The new action gets the first zero bit of
    1697             :                  * thread_mask assigned. See the loop above which or's
    1698             :                  * all existing action->thread_mask bits.
    1699             :                  */
    1700           0 :                 new->thread_mask = 1UL << ffz(thread_mask);
    1701             : 
    1702           2 :         } else if (new->handler == irq_default_primary_handler &&
    1703           0 :                    !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
    1704             :                 /*
    1705             :                  * The interrupt was requested with handler = NULL, so
    1706             :                  * we use the default primary handler for it. But it
    1707             :                  * does not have the oneshot flag set. In combination
    1708             :                  * with level interrupts this is deadly, because the
    1709             :                  * default primary handler just wakes the thread, then
    1710             :                  * the irq lines is reenabled, but the device still
    1711             :                  * has the level irq asserted. Rinse and repeat....
    1712             :                  *
    1713             :                  * While this works for edge type interrupts, we play
    1714             :                  * it safe and reject unconditionally because we can't
    1715             :                  * say for sure which type this interrupt really
    1716             :                  * has. The type flags are unreliable as the
    1717             :                  * underlying chip implementation can override them.
    1718             :                  */
    1719           0 :                 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
    1720             :                        new->name, irq);
    1721           0 :                 ret = -EINVAL;
    1722           0 :                 goto out_unlock;
    1723             :         }
    1724             : 
    1725           2 :         if (!shared) {
    1726             :                 /* Setup the type (level, edge polarity) if configured: */
    1727           2 :                 if (new->flags & IRQF_TRIGGER_MASK) {
    1728           0 :                         ret = __irq_set_trigger(desc,
    1729             :                                                 new->flags & IRQF_TRIGGER_MASK);
    1730             : 
    1731           0 :                         if (ret)
    1732             :                                 goto out_unlock;
    1733             :                 }
    1734             : 
    1735             :                 /*
    1736             :                  * Activate the interrupt. That activation must happen
    1737             :                  * independently of IRQ_NOAUTOEN. request_irq() can fail
    1738             :                  * and the callers are supposed to handle
    1739             :                  * that. enable_irq() of an interrupt requested with
    1740             :                  * IRQ_NOAUTOEN is not supposed to fail. The activation
    1741             :                  * keeps it in shutdown mode, it merily associates
    1742             :                  * resources if necessary and if that's not possible it
    1743             :                  * fails. Interrupts which are in managed shutdown mode
    1744             :                  * will simply ignore that activation request.
    1745             :                  */
    1746           2 :                 ret = irq_activate(desc);
    1747           2 :                 if (ret)
    1748             :                         goto out_unlock;
    1749             : 
    1750           2 :                 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
    1751             :                                   IRQS_ONESHOT | IRQS_WAITING);
    1752           4 :                 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
    1753             : 
    1754           2 :                 if (new->flags & IRQF_PERCPU) {
    1755           0 :                         irqd_set(&desc->irq_data, IRQD_PER_CPU);
    1756           0 :                         irq_settings_set_per_cpu(desc);
    1757           0 :                         if (new->flags & IRQF_NO_DEBUG)
    1758           0 :                                 irq_settings_set_no_debug(desc);
    1759             :                 }
    1760             : 
    1761           2 :                 if (noirqdebug)
    1762           0 :                         irq_settings_set_no_debug(desc);
    1763             : 
    1764           2 :                 if (new->flags & IRQF_ONESHOT)
    1765           0 :                         desc->istate |= IRQS_ONESHOT;
    1766             : 
    1767             :                 /* Exclude IRQ from balancing if requested */
    1768           2 :                 if (new->flags & IRQF_NOBALANCING) {
    1769           0 :                         irq_settings_set_no_balancing(desc);
    1770           0 :                         irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
    1771             :                 }
    1772             : 
    1773           4 :                 if (!(new->flags & IRQF_NO_AUTOEN) &&
    1774           2 :                     irq_settings_can_autoenable(desc)) {
    1775           2 :                         irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
    1776             :                 } else {
    1777             :                         /*
    1778             :                          * Shared interrupts do not go well with disabling
    1779             :                          * auto enable. The sharing interrupt might request
    1780             :                          * it while it's still disabled and then wait for
    1781             :                          * interrupts forever.
    1782             :                          */
    1783           0 :                         WARN_ON_ONCE(new->flags & IRQF_SHARED);
    1784             :                         /* Undo nested disables: */
    1785           0 :                         desc->depth = 1;
    1786             :                 }
    1787             : 
    1788           0 :         } else if (new->flags & IRQF_TRIGGER_MASK) {
    1789           0 :                 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
    1790           0 :                 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
    1791             : 
    1792           0 :                 if (nmsk != omsk)
    1793             :                         /* hope the handler works with current  trigger mode */
    1794           0 :                         pr_warn("irq %d uses trigger mode %u; requested %u\n",
    1795             :                                 irq, omsk, nmsk);
    1796             :         }
    1797             : 
    1798           2 :         *old_ptr = new;
    1799             : 
    1800           2 :         irq_pm_install_action(desc, new);
    1801             : 
    1802             :         /* Reset broken irq detection when installing new handler */
    1803           2 :         desc->irq_count = 0;
    1804           2 :         desc->irqs_unhandled = 0;
    1805             : 
    1806             :         /*
    1807             :          * Check whether we disabled the irq via the spurious handler
    1808             :          * before. Reenable it and give it another chance.
    1809             :          */
    1810           2 :         if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
    1811           0 :                 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
    1812           0 :                 __enable_irq(desc);
    1813             :         }
    1814             : 
    1815           4 :         raw_spin_unlock_irqrestore(&desc->lock, flags);
    1816           2 :         chip_bus_sync_unlock(desc);
    1817           2 :         mutex_unlock(&desc->request_mutex);
    1818             : 
    1819           2 :         irq_setup_timings(desc, new);
    1820             : 
    1821           2 :         wake_up_and_wait_for_irq_thread_ready(desc, new);
    1822           2 :         wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
    1823             : 
    1824           2 :         register_irq_proc(irq, desc);
    1825           2 :         new->dir = NULL;
    1826           2 :         register_handler_proc(irq, new);
    1827           2 :         return 0;
    1828             : 
    1829             : mismatch:
    1830           0 :         if (!(new->flags & IRQF_PROBE_SHARED)) {
    1831           0 :                 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
    1832             :                        irq, new->flags, new->name, old->flags, old->name);
    1833             : #ifdef CONFIG_DEBUG_SHIRQ
    1834             :                 dump_stack();
    1835             : #endif
    1836             :         }
    1837             :         ret = -EBUSY;
    1838             : 
    1839             : out_unlock:
    1840           0 :         raw_spin_unlock_irqrestore(&desc->lock, flags);
    1841             : 
    1842           0 :         if (!desc->action)
    1843             :                 irq_release_resources(desc);
    1844             : out_bus_unlock:
    1845           0 :         chip_bus_sync_unlock(desc);
    1846           0 :         mutex_unlock(&desc->request_mutex);
    1847             : 
    1848             : out_thread:
    1849           0 :         if (new->thread) {
    1850           0 :                 struct task_struct *t = new->thread;
    1851             : 
    1852           0 :                 new->thread = NULL;
    1853           0 :                 kthread_stop(t);
    1854           0 :                 put_task_struct(t);
    1855             :         }
    1856           0 :         if (new->secondary && new->secondary->thread) {
    1857           0 :                 struct task_struct *t = new->secondary->thread;
    1858             : 
    1859           0 :                 new->secondary->thread = NULL;
    1860           0 :                 kthread_stop(t);
    1861           0 :                 put_task_struct(t);
    1862             :         }
    1863             : out_mput:
    1864           0 :         module_put(desc->owner);
    1865           0 :         return ret;
    1866             : }
    1867             : 
    1868             : /*
    1869             :  * Internal function to unregister an irqaction - used to free
    1870             :  * regular and special interrupts that are part of the architecture.
    1871             :  */
    1872           0 : static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
    1873             : {
    1874           0 :         unsigned irq = desc->irq_data.irq;
    1875             :         struct irqaction *action, **action_ptr;
    1876             :         unsigned long flags;
    1877             : 
    1878           0 :         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
    1879             : 
    1880           0 :         mutex_lock(&desc->request_mutex);
    1881           0 :         chip_bus_lock(desc);
    1882           0 :         raw_spin_lock_irqsave(&desc->lock, flags);
    1883             : 
    1884             :         /*
    1885             :          * There can be multiple actions per IRQ descriptor, find the right
    1886             :          * one based on the dev_id:
    1887             :          */
    1888           0 :         action_ptr = &desc->action;
    1889             :         for (;;) {
    1890           0 :                 action = *action_ptr;
    1891             : 
    1892           0 :                 if (!action) {
    1893           0 :                         WARN(1, "Trying to free already-free IRQ %d\n", irq);
    1894           0 :                         raw_spin_unlock_irqrestore(&desc->lock, flags);
    1895           0 :                         chip_bus_sync_unlock(desc);
    1896           0 :                         mutex_unlock(&desc->request_mutex);
    1897           0 :                         return NULL;
    1898             :                 }
    1899             : 
    1900           0 :                 if (action->dev_id == dev_id)
    1901             :                         break;
    1902           0 :                 action_ptr = &action->next;
    1903             :         }
    1904             : 
    1905             :         /* Found it - now remove it from the list of entries: */
    1906           0 :         *action_ptr = action->next;
    1907             : 
    1908           0 :         irq_pm_remove_action(desc, action);
    1909             : 
    1910             :         /* If this was the last handler, shut down the IRQ line: */
    1911           0 :         if (!desc->action) {
    1912           0 :                 irq_settings_clr_disable_unlazy(desc);
    1913             :                 /* Only shutdown. Deactivate after synchronize_hardirq() */
    1914           0 :                 irq_shutdown(desc);
    1915             :         }
    1916             : 
    1917             : #ifdef CONFIG_SMP
    1918             :         /* make sure affinity_hint is cleaned up */
    1919             :         if (WARN_ON_ONCE(desc->affinity_hint))
    1920             :                 desc->affinity_hint = NULL;
    1921             : #endif
    1922             : 
    1923           0 :         raw_spin_unlock_irqrestore(&desc->lock, flags);
    1924             :         /*
    1925             :          * Drop bus_lock here so the changes which were done in the chip
    1926             :          * callbacks above are synced out to the irq chips which hang
    1927             :          * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
    1928             :          *
    1929             :          * Aside of that the bus_lock can also be taken from the threaded
    1930             :          * handler in irq_finalize_oneshot() which results in a deadlock
    1931             :          * because kthread_stop() would wait forever for the thread to
    1932             :          * complete, which is blocked on the bus lock.
    1933             :          *
    1934             :          * The still held desc->request_mutex() protects against a
    1935             :          * concurrent request_irq() of this irq so the release of resources
    1936             :          * and timing data is properly serialized.
    1937             :          */
    1938           0 :         chip_bus_sync_unlock(desc);
    1939             : 
    1940           0 :         unregister_handler_proc(irq, action);
    1941             : 
    1942             :         /*
    1943             :          * Make sure it's not being used on another CPU and if the chip
    1944             :          * supports it also make sure that there is no (not yet serviced)
    1945             :          * interrupt in flight at the hardware level.
    1946             :          */
    1947           0 :         __synchronize_hardirq(desc, true);
    1948             : 
    1949             : #ifdef CONFIG_DEBUG_SHIRQ
    1950             :         /*
    1951             :          * It's a shared IRQ -- the driver ought to be prepared for an IRQ
    1952             :          * event to happen even now it's being freed, so let's make sure that
    1953             :          * is so by doing an extra call to the handler ....
    1954             :          *
    1955             :          * ( We do this after actually deregistering it, to make sure that a
    1956             :          *   'real' IRQ doesn't run in parallel with our fake. )
    1957             :          */
    1958             :         if (action->flags & IRQF_SHARED) {
    1959             :                 local_irq_save(flags);
    1960             :                 action->handler(irq, dev_id);
    1961             :                 local_irq_restore(flags);
    1962             :         }
    1963             : #endif
    1964             : 
    1965             :         /*
    1966             :          * The action has already been removed above, but the thread writes
    1967             :          * its oneshot mask bit when it completes. Though request_mutex is
    1968             :          * held across this which prevents __setup_irq() from handing out
    1969             :          * the same bit to a newly requested action.
    1970             :          */
    1971           0 :         if (action->thread) {
    1972           0 :                 kthread_stop(action->thread);
    1973           0 :                 put_task_struct(action->thread);
    1974           0 :                 if (action->secondary && action->secondary->thread) {
    1975           0 :                         kthread_stop(action->secondary->thread);
    1976           0 :                         put_task_struct(action->secondary->thread);
    1977             :                 }
    1978             :         }
    1979             : 
    1980             :         /* Last action releases resources */
    1981           0 :         if (!desc->action) {
    1982             :                 /*
    1983             :                  * Reacquire bus lock as irq_release_resources() might
    1984             :                  * require it to deallocate resources over the slow bus.
    1985             :                  */
    1986           0 :                 chip_bus_lock(desc);
    1987             :                 /*
    1988             :                  * There is no interrupt on the fly anymore. Deactivate it
    1989             :                  * completely.
    1990             :                  */
    1991           0 :                 raw_spin_lock_irqsave(&desc->lock, flags);
    1992           0 :                 irq_domain_deactivate_irq(&desc->irq_data);
    1993           0 :                 raw_spin_unlock_irqrestore(&desc->lock, flags);
    1994             : 
    1995           0 :                 irq_release_resources(desc);
    1996             :                 chip_bus_sync_unlock(desc);
    1997             :                 irq_remove_timings(desc);
    1998             :         }
    1999             : 
    2000           0 :         mutex_unlock(&desc->request_mutex);
    2001             : 
    2002           0 :         irq_chip_pm_put(&desc->irq_data);
    2003           0 :         module_put(desc->owner);
    2004           0 :         kfree(action->secondary);
    2005           0 :         return action;
    2006             : }
    2007             : 
    2008             : /**
    2009             :  *      free_irq - free an interrupt allocated with request_irq
    2010             :  *      @irq: Interrupt line to free
    2011             :  *      @dev_id: Device identity to free
    2012             :  *
    2013             :  *      Remove an interrupt handler. The handler is removed and if the
    2014             :  *      interrupt line is no longer in use by any driver it is disabled.
    2015             :  *      On a shared IRQ the caller must ensure the interrupt is disabled
    2016             :  *      on the card it drives before calling this function. The function
    2017             :  *      does not return until any executing interrupts for this IRQ
    2018             :  *      have completed.
    2019             :  *
    2020             :  *      This function must not be called from interrupt context.
    2021             :  *
    2022             :  *      Returns the devname argument passed to request_irq.
    2023             :  */
    2024           0 : const void *free_irq(unsigned int irq, void *dev_id)
    2025             : {
    2026           0 :         struct irq_desc *desc = irq_to_desc(irq);
    2027             :         struct irqaction *action;
    2028             :         const char *devname;
    2029             : 
    2030           0 :         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
    2031             :                 return NULL;
    2032             : 
    2033             : #ifdef CONFIG_SMP
    2034             :         if (WARN_ON(desc->affinity_notify))
    2035             :                 desc->affinity_notify = NULL;
    2036             : #endif
    2037             : 
    2038           0 :         action = __free_irq(desc, dev_id);
    2039             : 
    2040           0 :         if (!action)
    2041             :                 return NULL;
    2042             : 
    2043           0 :         devname = action->name;
    2044           0 :         kfree(action);
    2045           0 :         return devname;
    2046             : }
    2047             : EXPORT_SYMBOL(free_irq);
    2048             : 
    2049             : /* This function must be called with desc->lock held */
    2050           0 : static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
    2051             : {
    2052           0 :         const char *devname = NULL;
    2053             : 
    2054           0 :         desc->istate &= ~IRQS_NMI;
    2055             : 
    2056           0 :         if (!WARN_ON(desc->action == NULL)) {
    2057           0 :                 irq_pm_remove_action(desc, desc->action);
    2058           0 :                 devname = desc->action->name;
    2059           0 :                 unregister_handler_proc(irq, desc->action);
    2060             : 
    2061           0 :                 kfree(desc->action);
    2062           0 :                 desc->action = NULL;
    2063             :         }
    2064             : 
    2065           0 :         irq_settings_clr_disable_unlazy(desc);
    2066           0 :         irq_shutdown_and_deactivate(desc);
    2067             : 
    2068           0 :         irq_release_resources(desc);
    2069             : 
    2070           0 :         irq_chip_pm_put(&desc->irq_data);
    2071           0 :         module_put(desc->owner);
    2072             : 
    2073           0 :         return devname;
    2074             : }
    2075             : 
    2076           0 : const void *free_nmi(unsigned int irq, void *dev_id)
    2077             : {
    2078           0 :         struct irq_desc *desc = irq_to_desc(irq);
    2079             :         unsigned long flags;
    2080             :         const void *devname;
    2081             : 
    2082           0 :         if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
    2083             :                 return NULL;
    2084             : 
    2085           0 :         if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
    2086             :                 return NULL;
    2087             : 
    2088             :         /* NMI still enabled */
    2089           0 :         if (WARN_ON(desc->depth == 0))
    2090             :                 disable_nmi_nosync(irq);
    2091             : 
    2092           0 :         raw_spin_lock_irqsave(&desc->lock, flags);
    2093             : 
    2094           0 :         irq_nmi_teardown(desc);
    2095           0 :         devname = __cleanup_nmi(irq, desc);
    2096             : 
    2097           0 :         raw_spin_unlock_irqrestore(&desc->lock, flags);
    2098             : 
    2099           0 :         return devname;
    2100             : }
    2101             : 
    2102             : /**
    2103             :  *      request_threaded_irq - allocate an interrupt line
    2104             :  *      @irq: Interrupt line to allocate
    2105             :  *      @handler: Function to be called when the IRQ occurs.
    2106             :  *                Primary handler for threaded interrupts.
    2107             :  *                If handler is NULL and thread_fn != NULL
    2108             :  *                the default primary handler is installed.
    2109             :  *      @thread_fn: Function called from the irq handler thread
    2110             :  *                  If NULL, no irq thread is created
    2111             :  *      @irqflags: Interrupt type flags
    2112             :  *      @devname: An ascii name for the claiming device
    2113             :  *      @dev_id: A cookie passed back to the handler function
    2114             :  *
    2115             :  *      This call allocates interrupt resources and enables the
    2116             :  *      interrupt line and IRQ handling. From the point this
    2117             :  *      call is made your handler function may be invoked. Since
    2118             :  *      your handler function must clear any interrupt the board
    2119             :  *      raises, you must take care both to initialise your hardware
    2120             :  *      and to set up the interrupt handler in the right order.
    2121             :  *
    2122             :  *      If you want to set up a threaded irq handler for your device
    2123             :  *      then you need to supply @handler and @thread_fn. @handler is
    2124             :  *      still called in hard interrupt context and has to check
    2125             :  *      whether the interrupt originates from the device. If yes it
    2126             :  *      needs to disable the interrupt on the device and return
    2127             :  *      IRQ_WAKE_THREAD which will wake up the handler thread and run
    2128             :  *      @thread_fn. This split handler design is necessary to support
    2129             :  *      shared interrupts.
    2130             :  *
    2131             :  *      Dev_id must be globally unique. Normally the address of the
    2132             :  *      device data structure is used as the cookie. Since the handler
    2133             :  *      receives this value it makes sense to use it.
    2134             :  *
    2135             :  *      If your interrupt is shared you must pass a non NULL dev_id
    2136             :  *      as this is required when freeing the interrupt.
    2137             :  *
    2138             :  *      Flags:
    2139             :  *
    2140             :  *      IRQF_SHARED             Interrupt is shared
    2141             :  *      IRQF_TRIGGER_*          Specify active edge(s) or level
    2142             :  *      IRQF_ONESHOT            Run thread_fn with interrupt line masked
    2143             :  */
    2144           2 : int request_threaded_irq(unsigned int irq, irq_handler_t handler,
    2145             :                          irq_handler_t thread_fn, unsigned long irqflags,
    2146             :                          const char *devname, void *dev_id)
    2147             : {
    2148             :         struct irqaction *action;
    2149             :         struct irq_desc *desc;
    2150             :         int retval;
    2151             : 
    2152           2 :         if (irq == IRQ_NOTCONNECTED)
    2153             :                 return -ENOTCONN;
    2154             : 
    2155             :         /*
    2156             :          * Sanity-check: shared interrupts must pass in a real dev-ID,
    2157             :          * otherwise we'll have trouble later trying to figure out
    2158             :          * which interrupt is which (messes up the interrupt freeing
    2159             :          * logic etc).
    2160             :          *
    2161             :          * Also shared interrupts do not go well with disabling auto enable.
    2162             :          * The sharing interrupt might request it while it's still disabled
    2163             :          * and then wait for interrupts forever.
    2164             :          *
    2165             :          * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
    2166             :          * it cannot be set along with IRQF_NO_SUSPEND.
    2167             :          */
    2168           4 :         if (((irqflags & IRQF_SHARED) && !dev_id) ||
    2169           4 :             ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
    2170           4 :             (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
    2171           2 :             ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
    2172             :                 return -EINVAL;
    2173             : 
    2174           2 :         desc = irq_to_desc(irq);
    2175           2 :         if (!desc)
    2176             :                 return -EINVAL;
    2177             : 
    2178           6 :         if (!irq_settings_can_request(desc) ||
    2179           4 :             WARN_ON(irq_settings_is_per_cpu_devid(desc)))
    2180             :                 return -EINVAL;
    2181             : 
    2182           2 :         if (!handler) {
    2183           0 :                 if (!thread_fn)
    2184             :                         return -EINVAL;
    2185             :                 handler = irq_default_primary_handler;
    2186             :         }
    2187             : 
    2188           2 :         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
    2189           2 :         if (!action)
    2190             :                 return -ENOMEM;
    2191             : 
    2192           2 :         action->handler = handler;
    2193           2 :         action->thread_fn = thread_fn;
    2194           2 :         action->flags = irqflags;
    2195           2 :         action->name = devname;
    2196           2 :         action->dev_id = dev_id;
    2197             : 
    2198           2 :         retval = irq_chip_pm_get(&desc->irq_data);
    2199           2 :         if (retval < 0) {
    2200           0 :                 kfree(action);
    2201           0 :                 return retval;
    2202             :         }
    2203             : 
    2204           2 :         retval = __setup_irq(irq, desc, action);
    2205             : 
    2206           2 :         if (retval) {
    2207           0 :                 irq_chip_pm_put(&desc->irq_data);
    2208           0 :                 kfree(action->secondary);
    2209           0 :                 kfree(action);
    2210             :         }
    2211             : 
    2212             : #ifdef CONFIG_DEBUG_SHIRQ_FIXME
    2213             :         if (!retval && (irqflags & IRQF_SHARED)) {
    2214             :                 /*
    2215             :                  * It's a shared IRQ -- the driver ought to be prepared for it
    2216             :                  * to happen immediately, so let's make sure....
    2217             :                  * We disable the irq to make sure that a 'real' IRQ doesn't
    2218             :                  * run in parallel with our fake.
    2219             :                  */
    2220             :                 unsigned long flags;
    2221             : 
    2222             :                 disable_irq(irq);
    2223             :                 local_irq_save(flags);
    2224             : 
    2225             :                 handler(irq, dev_id);
    2226             : 
    2227             :                 local_irq_restore(flags);
    2228             :                 enable_irq(irq);
    2229             :         }
    2230             : #endif
    2231             :         return retval;
    2232             : }
    2233             : EXPORT_SYMBOL(request_threaded_irq);
    2234             : 
    2235             : /**
    2236             :  *      request_any_context_irq - allocate an interrupt line
    2237             :  *      @irq: Interrupt line to allocate
    2238             :  *      @handler: Function to be called when the IRQ occurs.
    2239             :  *                Threaded handler for threaded interrupts.
    2240             :  *      @flags: Interrupt type flags
    2241             :  *      @name: An ascii name for the claiming device
    2242             :  *      @dev_id: A cookie passed back to the handler function
    2243             :  *
    2244             :  *      This call allocates interrupt resources and enables the
    2245             :  *      interrupt line and IRQ handling. It selects either a
    2246             :  *      hardirq or threaded handling method depending on the
    2247             :  *      context.
    2248             :  *
    2249             :  *      On failure, it returns a negative value. On success,
    2250             :  *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
    2251             :  */
    2252           0 : int request_any_context_irq(unsigned int irq, irq_handler_t handler,
    2253             :                             unsigned long flags, const char *name, void *dev_id)
    2254             : {
    2255             :         struct irq_desc *desc;
    2256             :         int ret;
    2257             : 
    2258           0 :         if (irq == IRQ_NOTCONNECTED)
    2259             :                 return -ENOTCONN;
    2260             : 
    2261           0 :         desc = irq_to_desc(irq);
    2262           0 :         if (!desc)
    2263             :                 return -EINVAL;
    2264             : 
    2265           0 :         if (irq_settings_is_nested_thread(desc)) {
    2266           0 :                 ret = request_threaded_irq(irq, NULL, handler,
    2267             :                                            flags, name, dev_id);
    2268           0 :                 return !ret ? IRQC_IS_NESTED : ret;
    2269             :         }
    2270             : 
    2271           0 :         ret = request_irq(irq, handler, flags, name, dev_id);
    2272           0 :         return !ret ? IRQC_IS_HARDIRQ : ret;
    2273             : }
    2274             : EXPORT_SYMBOL_GPL(request_any_context_irq);
    2275             : 
    2276             : /**
    2277             :  *      request_nmi - allocate an interrupt line for NMI delivery
    2278             :  *      @irq: Interrupt line to allocate
    2279             :  *      @handler: Function to be called when the IRQ occurs.
    2280             :  *                Threaded handler for threaded interrupts.
    2281             :  *      @irqflags: Interrupt type flags
    2282             :  *      @name: An ascii name for the claiming device
    2283             :  *      @dev_id: A cookie passed back to the handler function
    2284             :  *
    2285             :  *      This call allocates interrupt resources and enables the
    2286             :  *      interrupt line and IRQ handling. It sets up the IRQ line
    2287             :  *      to be handled as an NMI.
    2288             :  *
    2289             :  *      An interrupt line delivering NMIs cannot be shared and IRQ handling
    2290             :  *      cannot be threaded.
    2291             :  *
    2292             :  *      Interrupt lines requested for NMI delivering must produce per cpu
    2293             :  *      interrupts and have auto enabling setting disabled.
    2294             :  *
    2295             :  *      Dev_id must be globally unique. Normally the address of the
    2296             :  *      device data structure is used as the cookie. Since the handler
    2297             :  *      receives this value it makes sense to use it.
    2298             :  *
    2299             :  *      If the interrupt line cannot be used to deliver NMIs, function
    2300             :  *      will fail and return a negative value.
    2301             :  */
    2302           0 : int request_nmi(unsigned int irq, irq_handler_t handler,
    2303             :                 unsigned long irqflags, const char *name, void *dev_id)
    2304             : {
    2305             :         struct irqaction *action;
    2306             :         struct irq_desc *desc;
    2307             :         unsigned long flags;
    2308             :         int retval;
    2309             : 
    2310           0 :         if (irq == IRQ_NOTCONNECTED)
    2311             :                 return -ENOTCONN;
    2312             : 
    2313             :         /* NMI cannot be shared, used for Polling */
    2314           0 :         if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
    2315             :                 return -EINVAL;
    2316             : 
    2317           0 :         if (!(irqflags & IRQF_PERCPU))
    2318             :                 return -EINVAL;
    2319             : 
    2320           0 :         if (!handler)
    2321             :                 return -EINVAL;
    2322             : 
    2323           0 :         desc = irq_to_desc(irq);
    2324             : 
    2325           0 :         if (!desc || (irq_settings_can_autoenable(desc) &&
    2326           0 :             !(irqflags & IRQF_NO_AUTOEN)) ||
    2327           0 :             !irq_settings_can_request(desc) ||
    2328           0 :             WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
    2329           0 :             !irq_supports_nmi(desc))
    2330             :                 return -EINVAL;
    2331             : 
    2332           0 :         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
    2333           0 :         if (!action)
    2334             :                 return -ENOMEM;
    2335             : 
    2336           0 :         action->handler = handler;
    2337           0 :         action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
    2338           0 :         action->name = name;
    2339           0 :         action->dev_id = dev_id;
    2340             : 
    2341           0 :         retval = irq_chip_pm_get(&desc->irq_data);
    2342           0 :         if (retval < 0)
    2343             :                 goto err_out;
    2344             : 
    2345           0 :         retval = __setup_irq(irq, desc, action);
    2346           0 :         if (retval)
    2347             :                 goto err_irq_setup;
    2348             : 
    2349           0 :         raw_spin_lock_irqsave(&desc->lock, flags);
    2350             : 
    2351             :         /* Setup NMI state */
    2352           0 :         desc->istate |= IRQS_NMI;
    2353           0 :         retval = irq_nmi_setup(desc);
    2354           0 :         if (retval) {
    2355           0 :                 __cleanup_nmi(irq, desc);
    2356           0 :                 raw_spin_unlock_irqrestore(&desc->lock, flags);
    2357           0 :                 return -EINVAL;
    2358             :         }
    2359             : 
    2360           0 :         raw_spin_unlock_irqrestore(&desc->lock, flags);
    2361             : 
    2362           0 :         return 0;
    2363             : 
    2364             : err_irq_setup:
    2365           0 :         irq_chip_pm_put(&desc->irq_data);
    2366             : err_out:
    2367           0 :         kfree(action);
    2368             : 
    2369           0 :         return retval;
    2370             : }
    2371             : 
    2372           0 : void enable_percpu_irq(unsigned int irq, unsigned int type)
    2373             : {
    2374           0 :         unsigned int cpu = smp_processor_id();
    2375             :         unsigned long flags;
    2376           0 :         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
    2377             : 
    2378           0 :         if (!desc)
    2379           0 :                 return;
    2380             : 
    2381             :         /*
    2382             :          * If the trigger type is not specified by the caller, then
    2383             :          * use the default for this interrupt.
    2384             :          */
    2385           0 :         type &= IRQ_TYPE_SENSE_MASK;
    2386           0 :         if (type == IRQ_TYPE_NONE)
    2387           0 :                 type = irqd_get_trigger_type(&desc->irq_data);
    2388             : 
    2389           0 :         if (type != IRQ_TYPE_NONE) {
    2390             :                 int ret;
    2391             : 
    2392           0 :                 ret = __irq_set_trigger(desc, type);
    2393             : 
    2394           0 :                 if (ret) {
    2395           0 :                         WARN(1, "failed to set type for IRQ%d\n", irq);
    2396           0 :                         goto out;
    2397             :                 }
    2398             :         }
    2399             : 
    2400           0 :         irq_percpu_enable(desc, cpu);
    2401             : out:
    2402           0 :         irq_put_desc_unlock(desc, flags);
    2403             : }
    2404             : EXPORT_SYMBOL_GPL(enable_percpu_irq);
    2405             : 
    2406           0 : void enable_percpu_nmi(unsigned int irq, unsigned int type)
    2407             : {
    2408           0 :         enable_percpu_irq(irq, type);
    2409           0 : }
    2410             : 
    2411             : /**
    2412             :  * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
    2413             :  * @irq:        Linux irq number to check for
    2414             :  *
    2415             :  * Must be called from a non migratable context. Returns the enable
    2416             :  * state of a per cpu interrupt on the current cpu.
    2417             :  */
    2418           0 : bool irq_percpu_is_enabled(unsigned int irq)
    2419             : {
    2420           0 :         unsigned int cpu = smp_processor_id();
    2421             :         struct irq_desc *desc;
    2422             :         unsigned long flags;
    2423             :         bool is_enabled;
    2424             : 
    2425           0 :         desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
    2426           0 :         if (!desc)
    2427             :                 return false;
    2428             : 
    2429           0 :         is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
    2430           0 :         irq_put_desc_unlock(desc, flags);
    2431             : 
    2432           0 :         return is_enabled;
    2433             : }
    2434             : EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
    2435             : 
    2436           0 : void disable_percpu_irq(unsigned int irq)
    2437             : {
    2438           0 :         unsigned int cpu = smp_processor_id();
    2439             :         unsigned long flags;
    2440           0 :         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
    2441             : 
    2442           0 :         if (!desc)
    2443           0 :                 return;
    2444             : 
    2445           0 :         irq_percpu_disable(desc, cpu);
    2446           0 :         irq_put_desc_unlock(desc, flags);
    2447             : }
    2448             : EXPORT_SYMBOL_GPL(disable_percpu_irq);
    2449             : 
    2450           0 : void disable_percpu_nmi(unsigned int irq)
    2451             : {
    2452           0 :         disable_percpu_irq(irq);
    2453           0 : }
    2454             : 
    2455             : /*
    2456             :  * Internal function to unregister a percpu irqaction.
    2457             :  */
    2458           0 : static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
    2459             : {
    2460           0 :         struct irq_desc *desc = irq_to_desc(irq);
    2461             :         struct irqaction *action;
    2462             :         unsigned long flags;
    2463             : 
    2464           0 :         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
    2465             : 
    2466           0 :         if (!desc)
    2467             :                 return NULL;
    2468             : 
    2469           0 :         raw_spin_lock_irqsave(&desc->lock, flags);
    2470             : 
    2471           0 :         action = desc->action;
    2472           0 :         if (!action || action->percpu_dev_id != dev_id) {
    2473           0 :                 WARN(1, "Trying to free already-free IRQ %d\n", irq);
    2474           0 :                 goto bad;
    2475             :         }
    2476             : 
    2477           0 :         if (!cpumask_empty(desc->percpu_enabled)) {
    2478           0 :                 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
    2479             :                      irq, cpumask_first(desc->percpu_enabled));
    2480           0 :                 goto bad;
    2481             :         }
    2482             : 
    2483             :         /* Found it - now remove it from the list of entries: */
    2484           0 :         desc->action = NULL;
    2485             : 
    2486           0 :         desc->istate &= ~IRQS_NMI;
    2487             : 
    2488           0 :         raw_spin_unlock_irqrestore(&desc->lock, flags);
    2489             : 
    2490           0 :         unregister_handler_proc(irq, action);
    2491             : 
    2492           0 :         irq_chip_pm_put(&desc->irq_data);
    2493           0 :         module_put(desc->owner);
    2494           0 :         return action;
    2495             : 
    2496             : bad:
    2497           0 :         raw_spin_unlock_irqrestore(&desc->lock, flags);
    2498           0 :         return NULL;
    2499             : }
    2500             : 
    2501             : /**
    2502             :  *      remove_percpu_irq - free a per-cpu interrupt
    2503             :  *      @irq: Interrupt line to free
    2504             :  *      @act: irqaction for the interrupt
    2505             :  *
    2506             :  * Used to remove interrupts statically setup by the early boot process.
    2507             :  */
    2508           0 : void remove_percpu_irq(unsigned int irq, struct irqaction *act)
    2509             : {
    2510           0 :         struct irq_desc *desc = irq_to_desc(irq);
    2511             : 
    2512           0 :         if (desc && irq_settings_is_per_cpu_devid(desc))
    2513           0 :             __free_percpu_irq(irq, act->percpu_dev_id);
    2514           0 : }
    2515             : 
    2516             : /**
    2517             :  *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
    2518             :  *      @irq: Interrupt line to free
    2519             :  *      @dev_id: Device identity to free
    2520             :  *
    2521             :  *      Remove a percpu interrupt handler. The handler is removed, but
    2522             :  *      the interrupt line is not disabled. This must be done on each
    2523             :  *      CPU before calling this function. The function does not return
    2524             :  *      until any executing interrupts for this IRQ have completed.
    2525             :  *
    2526             :  *      This function must not be called from interrupt context.
    2527             :  */
    2528           0 : void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
    2529             : {
    2530           0 :         struct irq_desc *desc = irq_to_desc(irq);
    2531             : 
    2532           0 :         if (!desc || !irq_settings_is_per_cpu_devid(desc))
    2533             :                 return;
    2534             : 
    2535           0 :         chip_bus_lock(desc);
    2536           0 :         kfree(__free_percpu_irq(irq, dev_id));
    2537             :         chip_bus_sync_unlock(desc);
    2538             : }
    2539             : EXPORT_SYMBOL_GPL(free_percpu_irq);
    2540             : 
    2541           0 : void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
    2542             : {
    2543           0 :         struct irq_desc *desc = irq_to_desc(irq);
    2544             : 
    2545           0 :         if (!desc || !irq_settings_is_per_cpu_devid(desc))
    2546             :                 return;
    2547             : 
    2548           0 :         if (WARN_ON(!(desc->istate & IRQS_NMI)))
    2549             :                 return;
    2550             : 
    2551           0 :         kfree(__free_percpu_irq(irq, dev_id));
    2552             : }
    2553             : 
    2554             : /**
    2555             :  *      setup_percpu_irq - setup a per-cpu interrupt
    2556             :  *      @irq: Interrupt line to setup
    2557             :  *      @act: irqaction for the interrupt
    2558             :  *
    2559             :  * Used to statically setup per-cpu interrupts in the early boot process.
    2560             :  */
    2561           0 : int setup_percpu_irq(unsigned int irq, struct irqaction *act)
    2562             : {
    2563           0 :         struct irq_desc *desc = irq_to_desc(irq);
    2564             :         int retval;
    2565             : 
    2566           0 :         if (!desc || !irq_settings_is_per_cpu_devid(desc))
    2567             :                 return -EINVAL;
    2568             : 
    2569           0 :         retval = irq_chip_pm_get(&desc->irq_data);
    2570           0 :         if (retval < 0)
    2571             :                 return retval;
    2572             : 
    2573           0 :         retval = __setup_irq(irq, desc, act);
    2574             : 
    2575           0 :         if (retval)
    2576           0 :                 irq_chip_pm_put(&desc->irq_data);
    2577             : 
    2578             :         return retval;
    2579             : }
    2580             : 
    2581             : /**
    2582             :  *      __request_percpu_irq - allocate a percpu interrupt line
    2583             :  *      @irq: Interrupt line to allocate
    2584             :  *      @handler: Function to be called when the IRQ occurs.
    2585             :  *      @flags: Interrupt type flags (IRQF_TIMER only)
    2586             :  *      @devname: An ascii name for the claiming device
    2587             :  *      @dev_id: A percpu cookie passed back to the handler function
    2588             :  *
    2589             :  *      This call allocates interrupt resources and enables the
    2590             :  *      interrupt on the local CPU. If the interrupt is supposed to be
    2591             :  *      enabled on other CPUs, it has to be done on each CPU using
    2592             :  *      enable_percpu_irq().
    2593             :  *
    2594             :  *      Dev_id must be globally unique. It is a per-cpu variable, and
    2595             :  *      the handler gets called with the interrupted CPU's instance of
    2596             :  *      that variable.
    2597             :  */
    2598           0 : int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
    2599             :                          unsigned long flags, const char *devname,
    2600             :                          void __percpu *dev_id)
    2601             : {
    2602             :         struct irqaction *action;
    2603             :         struct irq_desc *desc;
    2604             :         int retval;
    2605             : 
    2606           0 :         if (!dev_id)
    2607             :                 return -EINVAL;
    2608             : 
    2609           0 :         desc = irq_to_desc(irq);
    2610           0 :         if (!desc || !irq_settings_can_request(desc) ||
    2611           0 :             !irq_settings_is_per_cpu_devid(desc))
    2612             :                 return -EINVAL;
    2613             : 
    2614           0 :         if (flags && flags != IRQF_TIMER)
    2615             :                 return -EINVAL;
    2616             : 
    2617           0 :         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
    2618           0 :         if (!action)
    2619             :                 return -ENOMEM;
    2620             : 
    2621           0 :         action->handler = handler;
    2622           0 :         action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
    2623           0 :         action->name = devname;
    2624           0 :         action->percpu_dev_id = dev_id;
    2625             : 
    2626           0 :         retval = irq_chip_pm_get(&desc->irq_data);
    2627           0 :         if (retval < 0) {
    2628           0 :                 kfree(action);
    2629           0 :                 return retval;
    2630             :         }
    2631             : 
    2632           0 :         retval = __setup_irq(irq, desc, action);
    2633             : 
    2634           0 :         if (retval) {
    2635           0 :                 irq_chip_pm_put(&desc->irq_data);
    2636           0 :                 kfree(action);
    2637             :         }
    2638             : 
    2639             :         return retval;
    2640             : }
    2641             : EXPORT_SYMBOL_GPL(__request_percpu_irq);
    2642             : 
    2643             : /**
    2644             :  *      request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
    2645             :  *      @irq: Interrupt line to allocate
    2646             :  *      @handler: Function to be called when the IRQ occurs.
    2647             :  *      @name: An ascii name for the claiming device
    2648             :  *      @dev_id: A percpu cookie passed back to the handler function
    2649             :  *
    2650             :  *      This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
    2651             :  *      have to be setup on each CPU by calling prepare_percpu_nmi() before
    2652             :  *      being enabled on the same CPU by using enable_percpu_nmi().
    2653             :  *
    2654             :  *      Dev_id must be globally unique. It is a per-cpu variable, and
    2655             :  *      the handler gets called with the interrupted CPU's instance of
    2656             :  *      that variable.
    2657             :  *
    2658             :  *      Interrupt lines requested for NMI delivering should have auto enabling
    2659             :  *      setting disabled.
    2660             :  *
    2661             :  *      If the interrupt line cannot be used to deliver NMIs, function
    2662             :  *      will fail returning a negative value.
    2663             :  */
    2664           0 : int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
    2665             :                        const char *name, void __percpu *dev_id)
    2666             : {
    2667             :         struct irqaction *action;
    2668             :         struct irq_desc *desc;
    2669             :         unsigned long flags;
    2670             :         int retval;
    2671             : 
    2672           0 :         if (!handler)
    2673             :                 return -EINVAL;
    2674             : 
    2675           0 :         desc = irq_to_desc(irq);
    2676             : 
    2677           0 :         if (!desc || !irq_settings_can_request(desc) ||
    2678           0 :             !irq_settings_is_per_cpu_devid(desc) ||
    2679           0 :             irq_settings_can_autoenable(desc) ||
    2680           0 :             !irq_supports_nmi(desc))
    2681             :                 return -EINVAL;
    2682             : 
    2683             :         /* The line cannot already be NMI */
    2684           0 :         if (desc->istate & IRQS_NMI)
    2685             :                 return -EINVAL;
    2686             : 
    2687           0 :         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
    2688           0 :         if (!action)
    2689             :                 return -ENOMEM;
    2690             : 
    2691           0 :         action->handler = handler;
    2692           0 :         action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
    2693             :                 | IRQF_NOBALANCING;
    2694           0 :         action->name = name;
    2695           0 :         action->percpu_dev_id = dev_id;
    2696             : 
    2697           0 :         retval = irq_chip_pm_get(&desc->irq_data);
    2698           0 :         if (retval < 0)
    2699             :                 goto err_out;
    2700             : 
    2701           0 :         retval = __setup_irq(irq, desc, action);
    2702           0 :         if (retval)
    2703             :                 goto err_irq_setup;
    2704             : 
    2705           0 :         raw_spin_lock_irqsave(&desc->lock, flags);
    2706           0 :         desc->istate |= IRQS_NMI;
    2707           0 :         raw_spin_unlock_irqrestore(&desc->lock, flags);
    2708             : 
    2709           0 :         return 0;
    2710             : 
    2711             : err_irq_setup:
    2712           0 :         irq_chip_pm_put(&desc->irq_data);
    2713             : err_out:
    2714           0 :         kfree(action);
    2715             : 
    2716           0 :         return retval;
    2717             : }
    2718             : 
    2719             : /**
    2720             :  *      prepare_percpu_nmi - performs CPU local setup for NMI delivery
    2721             :  *      @irq: Interrupt line to prepare for NMI delivery
    2722             :  *
    2723             :  *      This call prepares an interrupt line to deliver NMI on the current CPU,
    2724             :  *      before that interrupt line gets enabled with enable_percpu_nmi().
    2725             :  *
    2726             :  *      As a CPU local operation, this should be called from non-preemptible
    2727             :  *      context.
    2728             :  *
    2729             :  *      If the interrupt line cannot be used to deliver NMIs, function
    2730             :  *      will fail returning a negative value.
    2731             :  */
    2732           0 : int prepare_percpu_nmi(unsigned int irq)
    2733             : {
    2734             :         unsigned long flags;
    2735             :         struct irq_desc *desc;
    2736           0 :         int ret = 0;
    2737             : 
    2738           0 :         WARN_ON(preemptible());
    2739             : 
    2740           0 :         desc = irq_get_desc_lock(irq, &flags,
    2741             :                                  IRQ_GET_DESC_CHECK_PERCPU);
    2742           0 :         if (!desc)
    2743             :                 return -EINVAL;
    2744             : 
    2745           0 :         if (WARN(!(desc->istate & IRQS_NMI),
    2746             :                  KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
    2747             :                  irq)) {
    2748             :                 ret = -EINVAL;
    2749             :                 goto out;
    2750             :         }
    2751             : 
    2752           0 :         ret = irq_nmi_setup(desc);
    2753           0 :         if (ret) {
    2754           0 :                 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
    2755           0 :                 goto out;
    2756             :         }
    2757             : 
    2758             : out:
    2759           0 :         irq_put_desc_unlock(desc, flags);
    2760           0 :         return ret;
    2761             : }
    2762             : 
    2763             : /**
    2764             :  *      teardown_percpu_nmi - undoes NMI setup of IRQ line
    2765             :  *      @irq: Interrupt line from which CPU local NMI configuration should be
    2766             :  *            removed
    2767             :  *
    2768             :  *      This call undoes the setup done by prepare_percpu_nmi().
    2769             :  *
    2770             :  *      IRQ line should not be enabled for the current CPU.
    2771             :  *
    2772             :  *      As a CPU local operation, this should be called from non-preemptible
    2773             :  *      context.
    2774             :  */
    2775           0 : void teardown_percpu_nmi(unsigned int irq)
    2776             : {
    2777             :         unsigned long flags;
    2778             :         struct irq_desc *desc;
    2779             : 
    2780           0 :         WARN_ON(preemptible());
    2781             : 
    2782           0 :         desc = irq_get_desc_lock(irq, &flags,
    2783             :                                  IRQ_GET_DESC_CHECK_PERCPU);
    2784           0 :         if (!desc)
    2785           0 :                 return;
    2786             : 
    2787           0 :         if (WARN_ON(!(desc->istate & IRQS_NMI)))
    2788             :                 goto out;
    2789             : 
    2790             :         irq_nmi_teardown(desc);
    2791             : out:
    2792           0 :         irq_put_desc_unlock(desc, flags);
    2793             : }
    2794             : 
    2795           0 : int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
    2796             :                             bool *state)
    2797             : {
    2798             :         struct irq_chip *chip;
    2799           0 :         int err = -EINVAL;
    2800             : 
    2801             :         do {
    2802           0 :                 chip = irq_data_get_irq_chip(data);
    2803           0 :                 if (WARN_ON_ONCE(!chip))
    2804             :                         return -ENODEV;
    2805           0 :                 if (chip->irq_get_irqchip_state)
    2806             :                         break;
    2807             : #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
    2808           0 :                 data = data->parent_data;
    2809             : #else
    2810             :                 data = NULL;
    2811             : #endif
    2812           0 :         } while (data);
    2813             : 
    2814           0 :         if (data)
    2815           0 :                 err = chip->irq_get_irqchip_state(data, which, state);
    2816             :         return err;
    2817             : }
    2818             : 
    2819             : /**
    2820             :  *      irq_get_irqchip_state - returns the irqchip state of a interrupt.
    2821             :  *      @irq: Interrupt line that is forwarded to a VM
    2822             :  *      @which: One of IRQCHIP_STATE_* the caller wants to know about
    2823             :  *      @state: a pointer to a boolean where the state is to be stored
    2824             :  *
    2825             :  *      This call snapshots the internal irqchip state of an
    2826             :  *      interrupt, returning into @state the bit corresponding to
    2827             :  *      stage @which
    2828             :  *
    2829             :  *      This function should be called with preemption disabled if the
    2830             :  *      interrupt controller has per-cpu registers.
    2831             :  */
    2832           0 : int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
    2833             :                           bool *state)
    2834             : {
    2835             :         struct irq_desc *desc;
    2836             :         struct irq_data *data;
    2837             :         unsigned long flags;
    2838           0 :         int err = -EINVAL;
    2839             : 
    2840           0 :         desc = irq_get_desc_buslock(irq, &flags, 0);
    2841           0 :         if (!desc)
    2842             :                 return err;
    2843             : 
    2844           0 :         data = irq_desc_get_irq_data(desc);
    2845             : 
    2846           0 :         err = __irq_get_irqchip_state(data, which, state);
    2847             : 
    2848           0 :         irq_put_desc_busunlock(desc, flags);
    2849           0 :         return err;
    2850             : }
    2851             : EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
    2852             : 
    2853             : /**
    2854             :  *      irq_set_irqchip_state - set the state of a forwarded interrupt.
    2855             :  *      @irq: Interrupt line that is forwarded to a VM
    2856             :  *      @which: State to be restored (one of IRQCHIP_STATE_*)
    2857             :  *      @val: Value corresponding to @which
    2858             :  *
    2859             :  *      This call sets the internal irqchip state of an interrupt,
    2860             :  *      depending on the value of @which.
    2861             :  *
    2862             :  *      This function should be called with migration disabled if the
    2863             :  *      interrupt controller has per-cpu registers.
    2864             :  */
    2865           0 : int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
    2866             :                           bool val)
    2867             : {
    2868             :         struct irq_desc *desc;
    2869             :         struct irq_data *data;
    2870             :         struct irq_chip *chip;
    2871             :         unsigned long flags;
    2872           0 :         int err = -EINVAL;
    2873             : 
    2874           0 :         desc = irq_get_desc_buslock(irq, &flags, 0);
    2875           0 :         if (!desc)
    2876             :                 return err;
    2877             : 
    2878           0 :         data = irq_desc_get_irq_data(desc);
    2879             : 
    2880             :         do {
    2881           0 :                 chip = irq_data_get_irq_chip(data);
    2882           0 :                 if (WARN_ON_ONCE(!chip)) {
    2883             :                         err = -ENODEV;
    2884             :                         goto out_unlock;
    2885             :                 }
    2886           0 :                 if (chip->irq_set_irqchip_state)
    2887             :                         break;
    2888             : #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
    2889           0 :                 data = data->parent_data;
    2890             : #else
    2891             :                 data = NULL;
    2892             : #endif
    2893           0 :         } while (data);
    2894             : 
    2895           0 :         if (data)
    2896           0 :                 err = chip->irq_set_irqchip_state(data, which, val);
    2897             : 
    2898             : out_unlock:
    2899           0 :         irq_put_desc_busunlock(desc, flags);
    2900           0 :         return err;
    2901             : }
    2902             : EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
    2903             : 
    2904             : /**
    2905             :  * irq_has_action - Check whether an interrupt is requested
    2906             :  * @irq:        The linux irq number
    2907             :  *
    2908             :  * Returns: A snapshot of the current state
    2909             :  */
    2910           0 : bool irq_has_action(unsigned int irq)
    2911             : {
    2912             :         bool res;
    2913             : 
    2914             :         rcu_read_lock();
    2915           0 :         res = irq_desc_has_action(irq_to_desc(irq));
    2916             :         rcu_read_unlock();
    2917           0 :         return res;
    2918             : }
    2919             : EXPORT_SYMBOL_GPL(irq_has_action);
    2920             : 
    2921             : /**
    2922             :  * irq_check_status_bit - Check whether bits in the irq descriptor status are set
    2923             :  * @irq:        The linux irq number
    2924             :  * @bitmask:    The bitmask to evaluate
    2925             :  *
    2926             :  * Returns: True if one of the bits in @bitmask is set
    2927             :  */
    2928           0 : bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
    2929             : {
    2930             :         struct irq_desc *desc;
    2931           0 :         bool res = false;
    2932             : 
    2933             :         rcu_read_lock();
    2934           0 :         desc = irq_to_desc(irq);
    2935           0 :         if (desc)
    2936           0 :                 res = !!(desc->status_use_accessors & bitmask);
    2937             :         rcu_read_unlock();
    2938           0 :         return res;
    2939             : }
    2940             : EXPORT_SYMBOL_GPL(irq_check_status_bit);

Generated by: LCOV version 1.14