Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: APIC: enable workaround on AMD Fam10h CPUs xen: disable interrupts before saving in percpu x86: add x86@kernel.org to MAINTAINERS x86: push old stack address on irqstack for unwinder irq, x86: fix lock status with numa_migrate_irq_desc x86: add cache descriptors for Intel Core i7 x86/Voyager: make it build and boot
This commit is contained in:
commit
647802d6db
|
@ -4841,6 +4841,7 @@ P: Ingo Molnar
|
|||
M: mingo@redhat.com
|
||||
P: H. Peter Anvin
|
||||
M: hpa@zytor.com
|
||||
M: x86@kernel.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
T: git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
|
||||
S: Maintained
|
||||
|
|
|
@ -1436,7 +1436,7 @@ static int __init detect_init_APIC(void)
|
|||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
|
||||
(boot_cpu_data.x86 == 15))
|
||||
(boot_cpu_data.x86 >= 15))
|
||||
break;
|
||||
goto no_apic;
|
||||
case X86_VENDOR_INTEL:
|
||||
|
|
|
@ -36,8 +36,11 @@ static struct _cache_table cache_table[] __cpuinitdata =
|
|||
{
|
||||
{ 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
|
||||
{ 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
|
||||
{ 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
|
||||
{ 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
|
||||
{ 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
|
||||
{ 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
|
||||
{ 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
||||
{ 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
|
||||
|
@ -85,6 +88,18 @@ static struct _cache_table cache_table[] __cpuinitdata =
|
|||
{ 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
|
||||
{ 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
|
||||
{ 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
|
||||
{ 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
|
||||
{ 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
|
||||
{ 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
|
||||
{ 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
|
||||
{ 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
|
||||
{ 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
|
||||
{ 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
|
||||
{ 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
|
||||
{ 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
|
||||
{ 0x00, 0, 0}
|
||||
};
|
||||
|
||||
|
|
|
@ -346,6 +346,7 @@ ENTRY(save_args)
|
|||
popq_cfi %rax /* move return address... */
|
||||
mov %gs:pda_irqstackptr,%rsp
|
||||
EMPTY_FRAME 0
|
||||
pushq_cfi %rbp /* backlink for unwinder */
|
||||
pushq_cfi %rax /* ... to the new stack */
|
||||
/*
|
||||
* We entered an interrupt context - irqs are off:
|
||||
|
|
|
@ -2528,14 +2528,15 @@ static void irq_complete_move(struct irq_desc **descp)
|
|||
|
||||
vector = ~get_irq_regs()->orig_ax;
|
||||
me = smp_processor_id();
|
||||
|
||||
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) {
|
||||
#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
|
||||
*descp = desc = move_irq_desc(desc, me);
|
||||
/* get the new one */
|
||||
cfg = desc->chip_data;
|
||||
#endif
|
||||
|
||||
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
||||
send_cleanup_vector(cfg);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void irq_complete_move(struct irq_desc **descp) {}
|
||||
|
|
|
@ -78,15 +78,6 @@ void __init init_ISA_irqs(void)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* IRQ2 is cascade interrupt to second interrupt controller
|
||||
*/
|
||||
static struct irqaction irq2 = {
|
||||
.handler = no_action,
|
||||
.mask = CPU_MASK_NONE,
|
||||
.name = "cascade",
|
||||
};
|
||||
|
||||
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
|
||||
[0 ... IRQ0_VECTOR - 1] = -1,
|
||||
[IRQ0_VECTOR] = 0,
|
||||
|
@ -178,9 +169,6 @@ void __init native_init_IRQ(void)
|
|||
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
|
||||
#endif
|
||||
|
||||
if (!acpi_ioapic)
|
||||
setup_irq(2, &irq2);
|
||||
|
||||
/* setup after call gates are initialised (usually add in
|
||||
* the architecture specific gates)
|
||||
*/
|
||||
|
|
|
@ -38,6 +38,15 @@ void __init pre_intr_init_hook(void)
|
|||
init_ISA_irqs();
|
||||
}
|
||||
|
||||
/*
|
||||
* IRQ2 is cascade interrupt to second interrupt controller
|
||||
*/
|
||||
static struct irqaction irq2 = {
|
||||
.handler = no_action,
|
||||
.mask = CPU_MASK_NONE,
|
||||
.name = "cascade",
|
||||
};
|
||||
|
||||
/**
|
||||
* intr_init_hook - post gate setup interrupt initialisation
|
||||
*
|
||||
|
@ -53,6 +62,9 @@ void __init intr_init_hook(void)
|
|||
if (x86_quirks->arch_intr_init())
|
||||
return;
|
||||
}
|
||||
if (!acpi_ioapic)
|
||||
setup_irq(2, &irq2);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -33,13 +33,23 @@ void __init intr_init_hook(void)
|
|||
setup_irq(2, &irq2);
|
||||
}
|
||||
|
||||
void __init pre_setup_arch_hook(void)
|
||||
static void voyager_disable_tsc(void)
|
||||
{
|
||||
/* Voyagers run their CPUs from independent clocks, so disable
|
||||
* the TSC code because we can't sync them */
|
||||
setup_clear_cpu_cap(X86_FEATURE_TSC);
|
||||
}
|
||||
|
||||
void __init pre_setup_arch_hook(void)
|
||||
{
|
||||
voyager_disable_tsc();
|
||||
}
|
||||
|
||||
void __init pre_time_init_hook(void)
|
||||
{
|
||||
voyager_disable_tsc();
|
||||
}
|
||||
|
||||
void __init trap_init_hook(void)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ static void enable_local_vic_irq(unsigned int irq);
|
|||
static void disable_local_vic_irq(unsigned int irq);
|
||||
static void before_handle_vic_irq(unsigned int irq);
|
||||
static void after_handle_vic_irq(unsigned int irq);
|
||||
static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask);
|
||||
static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
|
||||
static void ack_vic_irq(unsigned int irq);
|
||||
static void vic_enable_cpi(void);
|
||||
static void do_boot_cpu(__u8 cpuid);
|
||||
|
@ -211,8 +211,6 @@ static __u32 cpu_booted_map;
|
|||
static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
|
||||
|
||||
/* This is for the new dynamic CPU boot code */
|
||||
cpumask_t cpu_callin_map = CPU_MASK_NONE;
|
||||
cpumask_t cpu_callout_map = CPU_MASK_NONE;
|
||||
|
||||
/* The per processor IRQ masks (these are usually kept in sync) */
|
||||
static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
|
||||
|
@ -378,7 +376,7 @@ void __init find_smp_config(void)
|
|||
cpus_addr(phys_cpu_present_map)[0] |=
|
||||
voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
|
||||
3) << 24;
|
||||
cpu_possible_map = phys_cpu_present_map;
|
||||
init_cpu_possible(&phys_cpu_present_map);
|
||||
printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
|
||||
cpus_addr(phys_cpu_present_map)[0]);
|
||||
/* Here we set up the VIC to enable SMP */
|
||||
|
@ -1599,16 +1597,16 @@ static void after_handle_vic_irq(unsigned int irq)
|
|||
* change the mask and then do an interrupt enable CPI to re-enable on
|
||||
* the selected processors */
|
||||
|
||||
void set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
|
||||
void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
/* Only extended processors handle interrupts */
|
||||
unsigned long real_mask;
|
||||
unsigned long irq_mask = 1 << irq;
|
||||
int cpu;
|
||||
|
||||
real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
|
||||
real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
|
||||
|
||||
if (cpus_addr(mask)[0] == 0)
|
||||
if (cpus_addr(*mask)[0] == 0)
|
||||
/* can't have no CPUs to accept the interrupt -- extremely
|
||||
* bad things will happen */
|
||||
return;
|
||||
|
@ -1750,10 +1748,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
|
|||
init_gdt(smp_processor_id());
|
||||
switch_to_new_gdt();
|
||||
|
||||
cpu_set(smp_processor_id(), cpu_online_map);
|
||||
cpu_set(smp_processor_id(), cpu_callout_map);
|
||||
cpu_set(smp_processor_id(), cpu_possible_map);
|
||||
cpu_set(smp_processor_id(), cpu_present_map);
|
||||
cpu_online_map = cpumask_of_cpu(smp_processor_id());
|
||||
cpu_callout_map = cpumask_of_cpu(smp_processor_id());
|
||||
cpu_callin_map = CPU_MASK_NONE;
|
||||
cpu_present_map = cpumask_of_cpu(smp_processor_id());
|
||||
|
||||
}
|
||||
|
||||
static int __cpuinit voyager_cpu_up(unsigned int cpu)
|
||||
|
@ -1783,9 +1782,9 @@ void __init smp_setup_processor_id(void)
|
|||
x86_write_percpu(cpu_number, hard_smp_processor_id());
|
||||
}
|
||||
|
||||
static void voyager_send_call_func(cpumask_t callmask)
|
||||
static void voyager_send_call_func(const struct cpumask *callmask)
|
||||
{
|
||||
__u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
|
||||
__u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
|
||||
send_CPI(mask, VIC_CALL_FUNCTION_CPI);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,8 +19,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags);
|
|||
paired with xen_mc_issue() */
|
||||
static inline void xen_mc_batch(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
/* need to disable interrupts until this entry is complete */
|
||||
local_irq_save(__get_cpu_var(xen_mc_irq_flags));
|
||||
local_irq_save(flags);
|
||||
__get_cpu_var(xen_mc_irq_flags) = flags;
|
||||
}
|
||||
|
||||
static inline struct multicall_space xen_mc_entry(size_t args)
|
||||
|
|
|
@ -71,7 +71,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
|||
desc = irq_desc_ptrs[irq];
|
||||
|
||||
if (desc && old_desc != desc)
|
||||
goto out_unlock;
|
||||
goto out_unlock;
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
|
||||
|
@ -84,10 +84,15 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
|||
init_copy_one_irq_desc(irq, old_desc, desc, cpu);
|
||||
|
||||
irq_desc_ptrs[irq] = desc;
|
||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
||||
/* free the old one */
|
||||
free_one_irq_desc(old_desc, desc);
|
||||
spin_unlock(&old_desc->lock);
|
||||
kfree(old_desc);
|
||||
spin_lock(&desc->lock);
|
||||
|
||||
return desc;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
|
Loading…
Reference in New Issue