linux-stable-rt/arch/um/kernel/skas/process.c

70 lines
1.3 KiB
C
Raw Normal View History

/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "linux/init.h"
#include "linux/sched.h"
#include "as-layout.h"
#include "os.h"
#include "skas.h"
int new_mm(unsigned long stack)
{
int fd;
fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
if (fd < 0)
return fd;
if (skas_needs_stub)
uml: fix stub address calculations The calculation of CONFIG_STUB_CODE and CONFIG_STUB_DATA didn't take into account anything but 3G/1G and 2G/2G, leaving the other vmsplits out in the cold. I'd rather not duplicate the four known host vmsplit cases for each of these symbols. I'd also like to calculate them based on the highest userspace address. The Kconfig language seems not to allow calculation of hex constants, so I moved this to as-layout.h. CONFIG_STUB_CODE, CONFIG_STUB_DATA, and CONFIG_STUB_START are now gone. In their place are STUB_CODE, STUB_DATA, and STUB_START in as-layout.h. i386 and x86_64 seem to differ as to whether an unadorned constant is an int or a long, so I cast them to unsigned long so they can be printed consistently. However, they are also used in stub.S, where C types don't work so well. So, there are ASM_ versions of these constants for use in stub.S. I also ifdef-ed the non-asm-friendly portion of as-layout.h. With this in place, most of the rest of this patch is changing CONFIG_STUB_* to STUB_*, except in stub.S, where they are changed to ASM_STUB_*. defconfig has the old symbols deleted. I also print these addresses out in case there is any problem mapping them on the host. The two stub.S files had some trailing whitespace, so that is cleaned up here. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-10-16 16:27:33 +08:00
map_stub_pages(fd, STUB_CODE, STUB_DATA, stack);
return fd;
}
extern void start_kernel(void);
static int __init start_kernel_proc(void *unused)
{
int pid;
block_signals();
pid = os_getpid();
cpu_tasks[0].pid = pid;
cpu_tasks[0].task = current;
#ifdef CONFIG_SMP
cpu_online_map = cpumask_of_cpu(0);
#endif
start_kernel();
return 0;
}
extern int userspace_pid[];
uml: iRQ stacks Add a separate IRQ stack. This differs from i386 in having the entire interrupt run on a separate stack rather than starting on the normal kernel stack and switching over once some preparation has been done. The underlying mechanism, is of course, sigaltstack. Another difference is that interrupts that happen in userspace are handled on the normal kernel stack. These cause a wait wakeup instead of a signal delivery so there is no point in trying to switch stacks for these. There's no other stuff on the stack, so there is no extra stack consumption. This quirk makes it possible to have the entire interrupt run on a separate stack - process preemption (and calls to schedule()) happens on a normal kernel stack. If we enable CONFIG_PREEMPT, this will need to be rethought. The IRQ stack for CPU 0 is declared in the same way as the initial kernel stack. IRQ stacks for other CPUs will be allocated dynamically. An extra field was added to the thread_info structure. When the active thread_info is copied to the IRQ stack, the real_thread field points back to the original stack. This makes it easy to tell where to copy the thread_info struct back to when the interrupt is finished. It also serves as a marker of a nested interrupt. It is NULL for the first interrupt on the stack, and non-NULL for any nested interrupts. Care is taken to behave correctly if a second interrupt comes in when the thread_info structure is being set up or taken down. I could just disable interrupts here, but I don't feel like giving up any of the performance gained by not flipping signals on and off. If an interrupt comes in during these critical periods, the handler can't run because it has no idea what shape the stack is in. So, it sets a bit for its signal in a global mask and returns. The outer handler will deal with this signal itself. Atomicity is had with xchg. A nested interrupt that needs to bail out will xchg its signal mask into pending_mask and repeat in case yet another interrupt hit at the same time, until the mask stabilizes. The outermost interrupt will set up the thread_info and xchg a zero into pending_mask when it is done. At this point, nested interrupts will look at ->real_thread and see that no setup needs to be done. They can just continue normally. Similar care needs to be taken when exiting the outer handler. If another interrupt comes in while it is copying the thread_info, it will drop a bit into pending_mask. The outer handler will check this and if it is non-zero, will loop, set up the stack again, and handle the interrupt. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-11 13:22:34 +08:00
extern char cpu0_irqstack[];
int __init start_uml(void)
{
uml: iRQ stacks Add a separate IRQ stack. This differs from i386 in having the entire interrupt run on a separate stack rather than starting on the normal kernel stack and switching over once some preparation has been done. The underlying mechanism, is of course, sigaltstack. Another difference is that interrupts that happen in userspace are handled on the normal kernel stack. These cause a wait wakeup instead of a signal delivery so there is no point in trying to switch stacks for these. There's no other stuff on the stack, so there is no extra stack consumption. This quirk makes it possible to have the entire interrupt run on a separate stack - process preemption (and calls to schedule()) happens on a normal kernel stack. If we enable CONFIG_PREEMPT, this will need to be rethought. The IRQ stack for CPU 0 is declared in the same way as the initial kernel stack. IRQ stacks for other CPUs will be allocated dynamically. An extra field was added to the thread_info structure. When the active thread_info is copied to the IRQ stack, the real_thread field points back to the original stack. This makes it easy to tell where to copy the thread_info struct back to when the interrupt is finished. It also serves as a marker of a nested interrupt. It is NULL for the first interrupt on the stack, and non-NULL for any nested interrupts. Care is taken to behave correctly if a second interrupt comes in when the thread_info structure is being set up or taken down. I could just disable interrupts here, but I don't feel like giving up any of the performance gained by not flipping signals on and off. If an interrupt comes in during these critical periods, the handler can't run because it has no idea what shape the stack is in. So, it sets a bit for its signal in a global mask and returns. The outer handler will deal with this signal itself. Atomicity is had with xchg. A nested interrupt that needs to bail out will xchg its signal mask into pending_mask and repeat in case yet another interrupt hit at the same time, until the mask stabilizes. The outermost interrupt will set up the thread_info and xchg a zero into pending_mask when it is done. At this point, nested interrupts will look at ->real_thread and see that no setup needs to be done. They can just continue normally. Similar care needs to be taken when exiting the outer handler. If another interrupt comes in while it is copying the thread_info, it will drop a bit into pending_mask. The outer handler will check this and if it is non-zero, will loop, set up the stack again, and handle the interrupt. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-11 13:22:34 +08:00
stack_protections((unsigned long) &cpu0_irqstack);
set_sigstack(cpu0_irqstack, THREAD_SIZE);
if (proc_mm)
userspace_pid[0] = start_userspace(0);
init_new_thread_signals();
init_task.thread.request.u.thread.proc = start_kernel_proc;
init_task.thread.request.u.thread.arg = NULL;
return start_idle_thread(task_stack_page(&init_task),
&init_task.thread.switch_buf);
}
unsigned long current_stub_stack(void)
{
if (current->mm == NULL)
return 0;
return current->mm->context.id.stack;
}