Newer
Older
/*
* Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
*
* Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
* Moved from arch/x86/kernel/apic/io_apic.c.
* Jiang Liu <jiang.liu@linux.intel.com>
* Enable support of hierarchical irqdomains
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/slab.h>
#include <asm/irqdomain.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <asm/i8259.h>
#include <asm/desc.h>
#include <asm/irq_remapping.h>
#include <asm/trace/irq_vectors.h>
struct apic_chip_data {
struct irq_cfg cfg;
unsigned int cpu;
unsigned int prev_cpu;
cpumask_var_t domain;
cpumask_var_t old_domain;
u8 move_in_progress : 1;
};
struct irq_domain *x86_vector_domain;
EXPORT_SYMBOL_GPL(x86_vector_domain);
static DEFINE_RAW_SPINLOCK(vector_lock);
static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
static struct irq_chip lapic_controller;
static struct irq_matrix *vector_matrix;
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
#endif
void lock_vector_lock(void)
{
/* Used to the online set of cpus does not change
* during assign_irq_vector.
*/
raw_spin_lock(&vector_lock);
}
void unlock_vector_lock(void)
{
raw_spin_unlock(&vector_lock);
}
void init_irq_alloc_info(struct irq_alloc_info *info,
const struct cpumask *mask)
{
memset(info, 0, sizeof(*info));
info->mask = mask;
}
void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
{
if (src)
*dst = *src;
else
memset(dst, 0, sizeof(*dst));
}
static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
return NULL;
while (irqd->parent_data)
irqd = irqd->parent_data;
struct irq_cfg *irqd_cfg(struct irq_data *irqd)
struct apic_chip_data *apicd = apic_chip_data(irqd);
EXPORT_SYMBOL_GPL(irqd_cfg);
struct irq_cfg *irq_cfg(unsigned int irq)
return irqd_cfg(irq_get_irq_data(irq));
}
static struct apic_chip_data *alloc_apic_chip_data(int node)
{
apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
if (!apicd)
return NULL;
if (!zalloc_cpumask_var_node(&apicd->domain, GFP_KERNEL, node))
goto out_data;
if (!zalloc_cpumask_var_node(&apicd->old_domain, GFP_KERNEL, node))
goto out_domain;
INIT_HLIST_NODE(&apicd->clist);
out_domain:
out_data:
return NULL;
}
static void free_apic_chip_data(struct apic_chip_data *apicd)
if (apicd) {
free_cpumask_var(apicd->domain);
free_cpumask_var(apicd->old_domain);
kfree(apicd);
static int __assign_irq_vector(int irq, struct apic_chip_data *d,
const struct cpumask *mask,
{
/*
* NOTE! The local APIC isn't very good at handling
* multiple interrupts at the same interrupt level.
* As the interrupt level is determined by taking the
* vector number and shifting that right by 4, we
* want to spread these out a bit so that they don't
* all fall in the same interrupt level.
*
* Also, we've got to be careful not to trash gate
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
static int current_offset = VECTOR_OFFSET_START % 16;
/*
* If there is still a move in progress or the previous move has not
* been cleaned up completely, tell the caller to come back later.
*/
return -EBUSY;
/* Only try and allocate irqs on cpus that are present */
cpumask_clear(d->old_domain);
cpumask_clear(searched_cpumask);
cpu = cpumask_first_and(mask, cpu_online_mask);
while (cpu < nr_cpu_ids) {
cpumask_copy(vector_cpumask, cpumask_of(cpu));
/*
* Clear the offline cpus from @vector_cpumask for searching
* and verify whether the result overlaps with @mask. If true,
* then the call to apic->cpu_mask_to_apicid() will
* succeed as well. If not, no point in trying to find a
* vector in this mask.
*/
cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
if (!cpumask_intersects(vector_searchmask, mask))
goto next_cpu;
if (cpumask_subset(vector_cpumask, d->domain)) {
if (cpumask_equal(vector_cpumask, d->domain))
goto success;
* Mark the cpus which are not longer in the mask for
* cleanup.
cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
vector = d->cfg.vector;
goto update;
}
vector = current_vector;
offset = current_offset;
next:
vector += 16;
if (vector >= FIRST_SYSTEM_VECTOR) {
offset = (offset + 1) % 16;
vector = FIRST_EXTERNAL_VECTOR + offset;
}
/* If the search wrapped around, try the next cpu */
if (unlikely(current_vector == vector))
goto next_cpu;
if (test_bit(vector, system_vectors))
goto next;
Loading full blame...