original_kernel/arch/arm64/mm/mmap.c

143 lines
3.5 KiB
C

/*
* Based on arch/arm/mm/mmap.c
*
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/elf.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/export.h>
#include <linux/shm.h>
#include <linux/sched.h>
#include <linux/io.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <asm/cputype.h>
/*
* Leave enough space between the mmap area and the stack to honour ulimit in
* the face of randomisation.
*/
#define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1))
#define MAX_GAP (STACK_TOP/6*5)
static int mmap_is_legacy(void)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
return 1;
return sysctl_legacy_va_layout;
}
/*
* Since get_random_int() returns the same value within a 1 jiffy window, we
* will almost always get the same randomisation for the stack and mmap
* region. This will mean the relative distance between stack and mmap will be
* the same.
*
* To avoid this we can shift the randomness by 1 bit.
*/
static unsigned long mmap_rnd(void)
{
unsigned long rnd = 0;
if (current->flags & PF_RANDOMIZE)
rnd = (long)get_random_int() & (STACK_RND_MASK >> 1);
return rnd << (PAGE_SHIFT + 1);
}
static unsigned long mmap_base(void)
{
unsigned long gap = rlimit(RLIMIT_STACK);
if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;
return PAGE_ALIGN(STACK_TOP - gap - mmap_rnd());
}
/*
* This function, called very early during the creation of a new process VM
* image, sets up which VM layout function to use:
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
/*
* Fall back to the standard layout if the personality bit is set, or
* if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
/*
* You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future.
*/
int valid_phys_addr_range(unsigned long addr, size_t size)
{
if (addr < PHYS_OFFSET)
return 0;
if (addr + size > __pa(high_memory - 1) + 1)
return 0;
return 1;
}
/*
* Do not allow /dev/mem mappings beyond the supported physical range.
*/
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
{
return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
}
#ifdef CONFIG_STRICT_DEVMEM
#include <linux/ioport.h>
/*
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number. We mimic x86 here by
* disallowing access to system RAM as well as device-exclusive MMIO regions.
* This effectively disable read()/write() on /dev/mem.
*/
int devmem_is_allowed(unsigned long pfn)
{
if (iomem_is_exclusive(pfn << PAGE_SHIFT))
return 0;
if (!page_is_ram(pfn))
return 1;
return 0;
}
#endif