Skip to content
Snippets Groups Projects
Select Git revision
  • c4bc680cf7bcd257865dac97dd2debbc9dcffad4
  • openEuler-1.0-LTS default protected
  • openEuler-22.09
  • OLK-5.10
  • openEuler-22.03-LTS-Ascend
  • openEuler-22.03-LTS
  • master
  • openEuler-22.03-LTS-LoongArch-NW
  • openEuler-22.09-HCK
  • openEuler-20.03-LTS-SP3
  • openEuler-21.09
  • openEuler-21.03
  • openEuler-20.09
  • 5.10.0-121.0.0
  • 5.10.0-60.61.0
  • 4.19.90-2210.3.0
  • 5.10.0-60.60.0
  • 5.10.0-120.0.0
  • 5.10.0-60.59.0
  • 5.10.0-119.0.0
  • 4.19.90-2210.2.0
  • 4.19.90-2210.1.0
  • 5.10.0-118.0.0
  • 5.10.0-106.19.0
  • 5.10.0-60.58.0
  • 4.19.90-2209.6.0
  • 5.10.0-106.18.0
  • 5.10.0-106.17.0
  • 5.10.0-106.16.0
  • 5.10.0-106.15.0
  • 5.10.0-117.0.0
  • 5.10.0-60.57.0
  • 5.10.0-116.0.0
33 results

irqdesc.h

Blame
  • user avatar
    Thomas Gleixner authored
    When a cpu goes up some architectures (e.g. x86) have to walk the irq
    space to set up the vector space for the cpu. While this needs extra
    protection at the architecture level we can avoid a few race
    conditions by preventing the concurrent allocation/free of irq
    descriptors and the associated data.
    
    When a cpu goes down it moves the interrupts which are targeted to
    this cpu away by reassigning the affinities. While this happens
    interrupts can be allocated and freed, which opens a can of race
    conditions in the code which reassignes the affinities because
    interrupt descriptors might be freed underneath.
    
    Example:
    
    CPU1				CPU2
    cpu_up/down
     irq_desc = irq_to_desc(irq);
    				remove_from_radix_tree(desc);
     raw_spin_lock(&desc->lock);
    				free(desc);
    
    We could protect the irq descriptors with RCU, but that would require
    a full tree change of all accesses to interrupt descriptors. But
    fortunately these kind of race conditions are rather limited to a few
    things like cpu hotplug. The normal setup/teardown is very well
    serialized. So the simpler and obvious solution is:
    
    Prevent allocation and freeing of interrupt descriptors accross cpu
    hotplug.
    
    Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
    Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
    Cc: xiao jin <jin.xiao@intel.com>
    Cc: Joerg Roedel <jroedel@suse.de>
    Cc: Borislav Petkov <bp@suse.de>
    Cc: Yanmin Zhang <yanmin_zhang@linux.intel.com>
    Link: http://lkml.kernel.org/r/20150705171102.063519515@linutronix.de
    a8994181
    History
    irqdesc.h 8.05 KiB
    #ifndef _LINUX_IRQDESC_H
    #define _LINUX_IRQDESC_H
    
    /*
     * Core internal functions to deal with irq descriptors
     */
    
    struct irq_affinity_notify;
    struct proc_dir_entry;
    struct module;
    struct irq_desc;
    struct irq_domain;
    struct pt_regs;
    
    /**
     * struct irq_desc - interrupt descriptor
     * @irq_common_data:	per irq and chip data passed down to chip functions
     * @kstat_irqs:		irq stats per cpu
     * @handle_irq:		highlevel irq-events handler
     * @preflow_handler:	handler called before the flow handler (currently used by sparc)
     * @action:		the irq action chain
     * @status:		status information
     * @core_internal_state__do_not_mess_with_it: core internal status information
     * @depth:		disable-depth, for nested irq_disable() calls
     * @wake_depth:		enable depth, for multiple irq_set_irq_wake() callers
     * @irq_count:		stats field to detect stalled irqs
     * @last_unhandled:	aging timer for unhandled count
     * @irqs_unhandled:	stats field for spurious unhandled interrupts
     * @threads_handled:	stats field for deferred spurious detection of threaded handlers
     * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
     * @lock:		locking for SMP
     * @affinity_hint:	hint to user space for preferred irq affinity
     * @affinity_notify:	context for notification of affinity changes
     * @pending_mask:	pending rebalanced interrupts
     * @threads_oneshot:	bitfield to handle shared oneshot threads
     * @threads_active:	number of irqaction threads currently running
     * @wait_for_threads:	wait queue for sync_irq to wait for threaded handlers
     * @nr_actions:		number of installed actions on this descriptor
     * @no_suspend_depth:	number of irqactions on a irq descriptor with
     *			IRQF_NO_SUSPEND set
     * @force_resume_depth:	number of irqactions on a irq descriptor with
     *			IRQF_FORCE_RESUME set
     * @dir:		/proc/irq/ procfs entry
     * @name:		flow handler name for /proc/interrupts output
     */
    struct irq_desc {
    	struct irq_common_data	irq_common_data;
    	struct irq_data		irq_data;
    	unsigned int __percpu	*kstat_irqs;
    	irq_flow_handler_t	handle_irq;
    #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
    	irq_preflow_handler_t	preflow_handler;
    #endif
    	struct irqaction	*action;	/* IRQ action list */
    	unsigned int		status_use_accessors;
    	unsigned int		core_internal_state__do_not_mess_with_it;
    	unsigned int		depth;		/* nested irq disables */
    	unsigned int		wake_depth;	/* nested wake enables */
    	unsigned int		irq_count;	/* For detecting broken IRQs */
    	unsigned long		last_unhandled;	/* Aging timer for unhandled count */
    	unsigned int		irqs_unhandled;
    	atomic_t		threads_handled;
    	int			threads_handled_last;
    	raw_spinlock_t		lock;
    	struct cpumask		*percpu_enabled;
    #ifdef CONFIG_SMP
    	const struct cpumask	*affinity_hint;
    	struct irq_affinity_notify *affinity_notify;
    #ifdef CONFIG_GENERIC_PENDING_IRQ
    	cpumask_var_t		pending_mask;
    #endif
    #endif
    	unsigned long		threads_oneshot;
    	atomic_t		threads_active;
    	wait_queue_head_t       wait_for_threads;
    #ifdef CONFIG_PM_SLEEP
    	unsigned int		nr_actions;
    	unsigned int		no_suspend_depth;
    	unsigned int		cond_suspend_depth;
    	unsigned int		force_resume_depth;
    #endif
    #ifdef CONFIG_PROC_FS
    	struct proc_dir_entry	*dir;
    #endif
    	int			parent_irq;
    	struct module		*owner;
    	const char		*name;
    } ____cacheline_internodealigned_in_smp;
    
    #ifdef CONFIG_SPARSE_IRQ
    extern void irq_lock_sparse(void);
    extern void irq_unlock_sparse(void);
    #else
    static inline void irq_lock_sparse(void) { }
    static inline void irq_unlock_sparse(void) { }
    extern struct irq_desc irq_desc[NR_IRQS];
    #endif
    
    static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
    {
    #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
    	return irq_to_desc(data->irq);
    #else
    	return container_of(data, struct irq_desc, irq_data);
    #endif
    }
    
    static inline unsigned int irq_desc_get_irq(struct irq_desc *desc)
    {
    	return desc->irq_data.irq;
    }
    
    static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
    {
    	return &desc->irq_data;
    }
    
    static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
    {
    	return desc->irq_data.chip;
    }
    
    static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
    {
    	return desc->irq_data.chip_data;
    }
    
    static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
    {
    	return desc->irq_data.handler_data;
    }
    
    static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
    {
    	return desc->irq_data.msi_desc;
    }
    
    /*
     * Architectures call this to let the generic IRQ layer
     * handle an interrupt. If the descriptor is attached to an
     * irqchip-style controller then we call the ->handle_irq() handler,
     * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
     */
    static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
    {
    	desc->handle_irq(irq, desc);
    }
    
    int generic_handle_irq(unsigned int irq);
    
    #ifdef CONFIG_HANDLE_DOMAIN_IRQ
    /*
     * Convert a HW interrupt number to a logical one using a IRQ domain,
     * and handle the result interrupt number. Return -EINVAL if
     * conversion failed. Providing a NULL domain indicates that the
     * conversion has already been done.
     */
    int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
    			bool lookup, struct pt_regs *regs);
    
    static inline int handle_domain_irq(struct irq_domain *domain,
    				    unsigned int hwirq, struct pt_regs *regs)
    {
    	return __handle_domain_irq(domain, hwirq, true, regs);
    }
    #endif
    
    /* Test to see if a driver has successfully requested an irq */
    static inline int irq_has_action(unsigned int irq)
    {
    	struct irq_desc *desc = irq_to_desc(irq);
    	return desc->action != NULL;
    }
    
    /* caller has locked the irq_desc and both params are valid */
    static inline void __irq_set_handler_locked(unsigned int irq,
    					    irq_flow_handler_t handler)
    {
    	struct irq_desc *desc;
    
    	desc = irq_to_desc(irq);
    	desc->handle_irq = handler;
    }
    
    /* caller has locked the irq_desc and both params are valid */
    static inline void
    __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
    				   irq_flow_handler_t handler, const char *name)
    {
    	struct irq_desc *desc;
    
    	desc = irq_to_desc(irq);
    	irq_desc_get_irq_data(desc)->chip = chip;
    	desc->handle_irq = handler;
    	desc->name = name;
    }
    
    /**
     * irq_set_handler_locked - Set irq handler from a locked region
     * @data:	Pointer to the irq_data structure which identifies the irq
     * @handler:	Flow control handler function for this interrupt
     *
     * Sets the handler in the irq descriptor associated to @data.
     *
     * Must be called with irq_desc locked and valid parameters. Typical
     * call site is the irq_set_type() callback.
     */
    static inline void irq_set_handler_locked(struct irq_data *data,
    					  irq_flow_handler_t handler)
    {
    	struct irq_desc *desc = irq_data_to_desc(data);
    
    	desc->handle_irq = handler;
    }
    
    /**
     * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region
     * @data:	Pointer to the irq_data structure for which the chip is set
     * @chip:	Pointer to the new irq chip
     * @handler:	Flow control handler function for this interrupt
     * @name:	Name of the interrupt
     *
     * Replace the irq chip at the proper hierarchy level in @data and
     * sets the handler and name in the associated irq descriptor.
     *
     * Must be called with irq_desc locked and valid parameters.
     */
    static inline void
    irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
    				 irq_flow_handler_t handler, const char *name)
    {
    	struct irq_desc *desc = irq_data_to_desc(data);
    
    	desc->handle_irq = handler;
    	desc->name = name;
    	data->chip = chip;
    }
    
    static inline int irq_balancing_disabled(unsigned int irq)
    {
    	struct irq_desc *desc;
    
    	desc = irq_to_desc(irq);
    	return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
    }
    
    static inline int irq_is_percpu(unsigned int irq)
    {
    	struct irq_desc *desc;
    
    	desc = irq_to_desc(irq);
    	return desc->status_use_accessors & IRQ_PER_CPU;
    }
    
    static inline void
    irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
    {
    	struct irq_desc *desc = irq_to_desc(irq);
    
    	if (desc)
    		lockdep_set_class(&desc->lock, class);
    }
    
    #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
    static inline void
    __irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
    {
    	struct irq_desc *desc;
    
    	desc = irq_to_desc(irq);
    	desc->preflow_handler = handler;
    }
    #endif
    
    #endif