Skip to content
Snippets Groups Projects
Commit 94521b2f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'parisc-4.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc update from Helge Deller:
 "This patchset adds Huge Page and HUGETLBFS support for parisc"

Honestly, the hugepage support should have gone through in the merge
window, and is not really an rc-time fix.  But it only touches
arch/parisc, and I cannot find it in myself to care.  If one of the
three parisc users notices a breakage, I will point at Helge and make
rude farting noises.

* 'parisc-4.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Map kernel text and data on huge pages
  parisc: Add Huge Page and HUGETLBFS support
  parisc: Use long branch to do_syscall_trace_exit
  parisc: Increase initial kernel mapping to 32MB on 64bit kernel
  parisc: Initialize the fault vector earlier in the boot process.
  parisc: Add defines for Huge page support
  parisc: Drop unused MADV_xxxK_PAGES flags from asm/mman.h
  parisc: Drop definition of start_thread_som for HP-UX SOM binaries
  parisc: Fix wrong comment regarding first pmd entry flags
parents 727cde6c 41b85a11
No related branches found
No related tags found
No related merge requests found
Showing
with 382 additions and 116 deletions
......@@ -108,6 +108,9 @@ config PGTABLE_LEVELS
default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
default 2
config SYS_SUPPORTS_HUGETLBFS
def_bool y if PA20
source "init/Kconfig"
source "kernel/Kconfig.freezer"
......
#ifndef _ASM_PARISC64_HUGETLB_H
#define _ASM_PARISC64_HUGETLB_H
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len) {
return 0;
}
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
if (addr & ~HPAGE_MASK)
return -EINVAL;
return 0;
}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor,
unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t old_pte = *ptep;
set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
int changed = !pte_same(*ptep, pte);
if (changed) {
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
flush_tlb_page(vma, addr);
}
return changed;
}
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
#endif /* _ASM_PARISC64_HUGETLB_H */
......@@ -145,11 +145,22 @@ extern int npmem_ranges;
#endif /* CONFIG_DISCONTIGMEM */
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */
#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
# define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */
# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M
#elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
# define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */
# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M
#else
# define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */
# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M
#endif
#endif /* CONFIG_HUGETLB_PAGE */
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
......
......@@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
PxD_FLAG_VALID |
PxD_FLAG_ATTACHED)
+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
/* The first pmd entry also is marked with _PAGE_GATEWAY as
/* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
* a signal that this pmd may not be freed */
__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
#endif
......
......@@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
/* This is the size of the initially mapped kernel memory */
#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
#ifdef CONFIG_64BIT
#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
#else
#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
#endif
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
#if CONFIG_PGTABLE_LEVELS == 3
......@@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
/* bit 21 was formerly the FLUSH bit but is now unused */
#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
/* N.B. The bits are defined in terms of a 32 bit word above, so the */
......@@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
......@@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
#define PxD_FLAG_MASK (0xf)
#define PxD_FLAG_SHIFT (4)
#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */
#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
#ifndef __ASSEMBLY__
......@@ -362,6 +367,18 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; ret
static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
* Huge pte definitions.
*/
#ifdef CONFIG_HUGETLB_PAGE
#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
#else
#define pte_huge(pte) (0)
#define pte_mkhuge(pte) (pte)
#endif
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
......@@ -410,8 +427,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* Find an entry in the second-level page table.. */
#if CONFIG_PGTABLE_LEVELS == 3
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
#define pmd_offset(dir,address) \
((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
#else
#define pmd_offset(dir,addr) ((pmd_t *) dir)
#endif
......
......@@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack);
*/
typedef unsigned int elf_caddr_t;
#define start_thread_som(regs, new_pc, new_sp) do { \
unsigned long *sp = (unsigned long *)new_sp; \
__u32 spaceid = (__u32)current->mm->context; \
unsigned long pc = (unsigned long)new_pc; \
/* offset pc for priv. level */ \
pc |= 3; \
\
regs->iasq[0] = spaceid; \
regs->iasq[1] = spaceid; \
regs->iaoq[0] = pc; \
regs->iaoq[1] = pc + 4; \
regs->sr[2] = LINUX_GATEWAY_SPACE; \
regs->sr[3] = 0xffff; \
regs->sr[4] = spaceid; \
regs->sr[5] = spaceid; \
regs->sr[6] = spaceid; \
regs->sr[7] = spaceid; \
regs->gr[ 0] = USER_PSW; \
regs->gr[30] = ((new_sp)+63)&~63; \
regs->gr[31] = pc; \
\
get_user(regs->gr[26],&sp[0]); \
get_user(regs->gr[25],&sp[-1]); \
get_user(regs->gr[24],&sp[-2]); \
get_user(regs->gr[23],&sp[-3]); \
} while(0)
/* The ELF abi wants things done a "wee bit" differently than
* som does. Supporting this behavior here avoids
* having our own version of create_elf_tables.
......
......@@ -49,16 +49,6 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
/* The range 12-64 is reserved for page size specification. */
#define MADV_4K_PAGES 12 /* Use 4K pages */
#define MADV_16K_PAGES 14 /* Use 16K pages */
#define MADV_64K_PAGES 16 /* Use 64K pages */
#define MADV_256K_PAGES 18 /* Use 256K pages */
#define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */
#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
......
......@@ -289,6 +289,14 @@ int main(void)
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
BLANK();
/* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
* and kernel data on physical huge pages */
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
#else
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
#endif
BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
......
......@@ -502,21 +502,38 @@
STREG \pte,0(\ptp)
.endm
/* We have (depending on the page size):
* - 38 to 52-bit Physical Page Number
* - 12 to 26-bit page offset
*/
/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
.macro convert_for_tlb_insert20 pte
.macro convert_for_tlb_insert20 pte,tmp
#ifdef CONFIG_HUGETLB_PAGE
copy \pte,\tmp
extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
#else /* Huge pages disabled */
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
#endif
.endm
/* Convert the pte and prot to tlb insertion values. How
* this happens is quite subtle, read below */
.macro make_insert_tlb spc,pte,prot
.macro make_insert_tlb spc,pte,prot,tmp
space_to_prot \spc \prot /* create prot id from space */
/* The following is the real subtlety. This is depositing
* T <-> _PAGE_REFTRAP
......@@ -553,7 +570,7 @@
depdi 1,12,1,\prot
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
convert_for_tlb_insert20 \pte
convert_for_tlb_insert20 \pte \tmp
.endm
/* Identical macro to make_insert_tlb above, except it
......@@ -646,17 +663,12 @@
/*
* Align fault_vector_20 on 4K boundary so that both
* fault_vector_11 and fault_vector_20 are on the
* same page. This is only necessary as long as we
* write protect the kernel text, which we may stop
* doing once we use large page translations to cover
* the static part of the kernel address space.
* Fault_vectors are architecturally required to be aligned on a 2K
* boundary
*/
.text
.align 4096
.align 2048
ENTRY(fault_vector_20)
/* First vector is invalid (0) */
......@@ -1147,7 +1159,7 @@ dtlb_miss_20w:
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
......@@ -1173,7 +1185,7 @@ nadtlb_miss_20w:
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
......@@ -1267,7 +1279,7 @@ dtlb_miss_20:
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
......@@ -1295,7 +1307,7 @@ nadtlb_miss_20:
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
......@@ -1404,7 +1416,7 @@ itlb_miss_20w:
tlb_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
make_insert_tlb spc,pte,prot,t1
iitlbt pte,prot
......@@ -1428,7 +1440,7 @@ naitlb_miss_20w:
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
make_insert_tlb spc,pte,prot,t1
iitlbt pte,prot
......@@ -1514,7 +1526,7 @@ itlb_miss_20:
tlb_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
......@@ -1534,7 +1546,7 @@ naitlb_miss_20:
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
......@@ -1566,7 +1578,7 @@ dbit_trap_20w:
tlb_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot
make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
......@@ -1610,7 +1622,7 @@ dbit_trap_20:
tlb_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot
make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
......
......@@ -69,7 +69,7 @@ $bss_loop:
stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1)
/* Initialize startup VM. Just map first 8/16 MB of memory */
/* Initialize startup VM. Just map first 16/32 MB of memory */
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
......@@ -107,7 +107,7 @@ $bss_loop:
/* Now initialize the PTEs themselves. We use RWX for
* everything ... it will get remapped correctly later */
ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
load32 PA(pg0),%r1
$pgt_fill_loop:
......
......@@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "The 32-bit Kernel has started...\n");
#endif
printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024));
printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ",
(int)(PAGE_SIZE / 1024));
#ifdef CONFIG_HUGETLB_PAGE
printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size",
1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20));
#else
printk(KERN_CONT "disabled");
#endif
printk(KERN_CONT ".\n");
pdc_console_init();
......@@ -377,6 +386,7 @@ arch_initcall(parisc_init);
void start_parisc(void)
{
extern void start_kernel(void);
extern void early_trap_init(void);
int ret, cpunum;
struct pdc_coproc_cfg coproc_cfg;
......@@ -397,6 +407,8 @@ void start_parisc(void)
panic("must have an fpu to boot linux");
}
early_trap_init(); /* initialize checksum of fault_vector */
start_kernel();
// not reached
}
......@@ -369,7 +369,7 @@ tracesys_exit:
ldo -16(%r30),%r29 /* Reference param save area */
#endif
ldo TASK_REGS(%r1),%r26
bl do_syscall_trace_exit,%r2
BL do_syscall_trace_exit,%r2
STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
......@@ -390,7 +390,7 @@ tracesys_sigexit:
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
bl do_syscall_trace_exit,%r2
BL do_syscall_trace_exit,%r2
ldo TASK_REGS(%r1),%r26
ldil L%syscall_exit_rfi,%r1
......
......@@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
}
int __init check_ivt(void *iva)
void __init initialize_ivt(const void *iva)
{
extern u32 os_hpmc_size;
extern const u32 os_hpmc[];
......@@ -818,8 +818,8 @@ int __init check_ivt(void *iva)
u32 *hpmcp;
u32 length;
if (strcmp((char *)iva, "cows can fly"))
return -1;
if (strcmp((const char *)iva, "cows can fly"))
panic("IVT invalid");
ivap = (u32 *)iva;
......@@ -839,28 +839,23 @@ int __init check_ivt(void *iva)
check += ivap[i];
ivap[5] = -check;
return 0;
}
#ifndef CONFIG_64BIT
extern const void fault_vector_11;
#endif
extern const void fault_vector_20;
void __init trap_init(void)
/* early_trap_init() is called before we set up kernel mappings and
* write-protect the kernel */
void __init early_trap_init(void)
{
void *iva;
extern const void fault_vector_20;
if (boot_cpu_data.cpu_type >= pcxu)
iva = (void *) &fault_vector_20;
else
#ifdef CONFIG_64BIT
panic("Can't boot 64-bit OS on PA1.1 processor!");
#else
iva = (void *) &fault_vector_11;
#ifndef CONFIG_64BIT
extern const void fault_vector_11;
initialize_ivt(&fault_vector_11);
#endif
if (check_ivt(iva))
panic("IVT invalid");
initialize_ivt(&fault_vector_20);
}
void __init trap_init(void)
{
}
......@@ -60,7 +60,7 @@ SECTIONS
EXIT_DATA
}
PERCPU_SECTION(8)
. = ALIGN(PAGE_SIZE);
. = ALIGN(HUGEPAGE_SIZE);
__init_end = .;
/* freed after init ends here */
......@@ -116,7 +116,7 @@ SECTIONS
* that we can properly leave these
* as writable
*/
. = ALIGN(PAGE_SIZE);
. = ALIGN(HUGEPAGE_SIZE);
data_start = .;
EXCEPTION_TABLE(8)
......@@ -135,8 +135,11 @@ SECTIONS
_edata = .;
/* BSS */
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8)
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
/* bootmap is allocated in setup_bootmem() directly behind bss. */
. = ALIGN(HUGEPAGE_SIZE);
_end = . ;
STABS_DEBUG
......
......@@ -3,3 +3,4 @@
#
obj-y := init.o fault.o ioremap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
/*
* PARISC64 Huge TLB page support.
*
* This parisc implementation is heavily based on the SPARC and x86 code.
*
* Copyright (C) 2015 Helge Deller <deller@gmx.de>
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (len > TASK_SIZE)
return -ENOMEM;
if (flags & MAP_FIXED)
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
if (addr)
addr = ALIGN(addr, huge_page_size(h));
/* we need to make sure the colouring is OK */
return arch_get_unmapped_area(file, addr, len, pgoff, flags);
}
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
/* We must align the address, because our caller will run
* set_huge_pte_at() on whatever we return, which writes out
* all of the sub-ptes for the hugepage range. So we have
* to give it the first such sub-pte.
*/
addr &= HPAGE_MASK;
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
if (pud) {
pmd = pmd_alloc(mm, pud, addr);
if (pmd)
pte = pte_alloc_map(mm, NULL, pmd, addr);
}
return pte;
}
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
addr &= HPAGE_MASK;
pgd = pgd_offset(mm, addr);
if (!pgd_none(*pgd)) {
pud = pud_offset(pgd, addr);
if (!pud_none(*pud)) {
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd))
pte = pte_offset_map(pmd, addr);
}
}
return pte;
}
/* Purge data and instruction TLB entries. Must be called holding
* the pa_tlb_lock. The TLB purge instructions are slow on SMP
* machines since the purge must be broadcast to all CPUs.
*/
static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
{
int i;
/* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
* Linux standard huge pages (e.g. 2 MB) */
BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
addr &= HPAGE_MASK;
addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
mtsp(mm->context, 1);
pdtlb(addr);
if (unlikely(split_tlb))
pitlb(addr);
addr += (1UL << REAL_HPAGE_SHIFT);
}
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
unsigned long addr_start;
int i;
addr &= HPAGE_MASK;
addr_start = addr;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
/* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry)
* instead, but then we get double locking on pa_tlb_lock. */
*ptep = entry;
ptep++;
/* Drop the PAGE_SIZE/non-huge tlb entry */
purge_tlb_entries(mm, addr);
addr += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE;
}
purge_tlb_entries_huge(mm, addr_start);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_t entry;
entry = *ptep;
set_huge_pte_at(mm, addr, ptep, __pte(0));
return entry;
}
int pmd_huge(pmd_t pmd)
{
return 0;
}
int pud_huge(pud_t pud)
{
return 0;
}
......@@ -409,15 +409,11 @@ static void __init map_pages(unsigned long start_vaddr,
unsigned long vaddr;
unsigned long ro_start;
unsigned long ro_end;
unsigned long fv_addr;
unsigned long gw_addr;
extern const unsigned long fault_vector_20;
extern void * const linux_gateway_page;
unsigned long kernel_end;
ro_start = __pa((unsigned long)_text);
ro_end = __pa((unsigned long)&data_start);
fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
kernel_end = __pa((unsigned long)&_end);
end_paddr = start_paddr + size;
......@@ -475,24 +471,25 @@ static void __init map_pages(unsigned long start_vaddr,
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
pte_t pte;
/*
* Map the fault vector writable so we can
* write the HPMC checksum.
*/
if (force)
pte = __mk_pte(address, pgprot);
else if (parisc_text_address(vaddr) &&
address != fv_addr)
else if (parisc_text_address(vaddr)) {
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
if (address >= ro_start && address < kernel_end)
pte = pte_mkhuge(pte);
}
else
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
if (address >= ro_start && address < ro_end
&& address != fv_addr
&& address != gw_addr)
pte = __mk_pte(address, PAGE_KERNEL_RO);
else
if (address >= ro_start && address < ro_end) {
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
pte = pte_mkhuge(pte);
} else
#endif
{
pte = __mk_pte(address, pgprot);
if (address >= ro_start && address < kernel_end)
pte = pte_mkhuge(pte);
}
if (address >= end_paddr) {
if (force)
......@@ -536,15 +533,12 @@ void free_initmem(void)
/* force the kernel to see the new TLB entries */
__flush_tlb_range(0, init_begin, init_end);
/* Attempt to catch anyone trying to execute code here
* by filling the page with BRK insns.
*/
memset((void *)init_begin, 0x00, init_end - init_begin);
/* finally dump all the instructions which were cached, since the
* pages are no-longer executable */
flush_icache_range(init_begin, init_end);
free_initmem_default(-1);
free_initmem_default(POISON_FREE_INITMEM);
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
......@@ -728,8 +722,8 @@ static void __init pagetable_init(void)
unsigned long size;
start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
size = pmem_ranges[range].pages << PAGE_SHIFT;
end_paddr = start_paddr + size;
map_pages((unsigned long)__va(start_paddr), start_paddr,
size, PAGE_KERNEL, 0);
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment