Skip to content
Snippets Groups Projects
Commit 3b54765c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge updates from Andrew Morton:

 - a few misc things

 - ocfs2 updates

 - the v9fs maintainers have been missing for a long time. I've taken
   over v9fs patch slinging.

 - most of MM

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (116 commits)
  mm,oom_reaper: check for MMF_OOM_SKIP before complaining
  mm/ksm: fix interaction with THP
  mm/memblock.c: cast constant ULLONG_MAX to phys_addr_t
  headers: untangle kmemleak.h from mm.h
  include/linux/mmdebug.h: make VM_WARN* non-rvals
  mm/page_isolation.c: make start_isolate_page_range() fail if already isolated
  mm: change return type to vm_fault_t
  mm, oom: remove 3% bonus for CAP_SYS_ADMIN processes
  mm, page_alloc: wakeup kcompactd even if kswapd cannot free more memory
  kernel/fork.c: detect early free of a live mm
  mm: make counting of list_lru_one::nr_items lockless
  mm/swap_state.c: make bool enable_vma_readahead and swap_vma_readahead() static
  block_invalidatepage(): only r...
parents 3fd14cdc 97b1255c
No related branches found
No related tags found
No related merge requests found
Showing
with 50 additions and 71 deletions
......@@ -1840,30 +1840,29 @@
keepinitrd [HW,ARM]
kernelcore= [KNL,X86,IA-64,PPC]
Format: nn[KMGTPE] | "mirror"
This parameter
specifies the amount of memory usable by the kernel
for non-movable allocations. The requested amount is
spread evenly throughout all nodes in the system. The
remaining memory in each node is used for Movable
pages. In the event, a node is too small to have both
kernelcore and Movable pages, kernelcore pages will
take priority and other nodes will have a larger number
of Movable pages. The Movable zone is used for the
allocation of pages that may be reclaimed or moved
by the page migration subsystem. This means that
HugeTLB pages may not be allocated from this zone.
Note that allocations like PTEs-from-HighMem still
use the HighMem zone if it exists, and the Normal
Format: nn[KMGTPE] | nn% | "mirror"
This parameter specifies the amount of memory usable by
the kernel for non-movable allocations. The requested
amount is spread evenly throughout all nodes in the
system as ZONE_NORMAL. The remaining memory is used for
movable memory in its own zone, ZONE_MOVABLE. In the
event, a node is too small to have both ZONE_NORMAL and
ZONE_MOVABLE, kernelcore memory will take priority and
other nodes will have a larger ZONE_MOVABLE.
ZONE_MOVABLE is used for the allocation of pages that
may be reclaimed or moved by the page migration
subsystem. Note that allocations like PTEs-from-HighMem
still use the HighMem zone if it exists, and the Normal
zone if it does not.
Instead of specifying the amount of memory (nn[KMGTPE]),
you can specify "mirror" option. In case "mirror"
It is possible to specify the exact amount of memory in
the form of "nn[KMGTPE]", a percentage of total system
memory in the form of "nn%", or "mirror". If "mirror"
option is specified, mirrored (reliable) memory is used
for non-movable allocations and remaining memory is used
for Movable pages. nn[KMGTPE] and "mirror" are exclusive,
so you can NOT specify nn[KMGTPE] and "mirror" at the same
time.
for Movable pages. "nn[KMGTPE]", "nn%", and "mirror"
are exclusive, so you cannot specify multiple forms.
kgdbdbgp= [KGDB,HW] kgdb over EHCI usb debug port.
Format: <Controller#>[,poll interval]
......@@ -2377,13 +2376,14 @@
mousedev.yres= [MOUSE] Vertical screen resolution, used for devices
reporting absolute coordinates, such as tablets
movablecore=nn[KMG] [KNL,X86,IA-64,PPC] This parameter
is similar to kernelcore except it specifies the
amount of memory used for migratable allocations.
If both kernelcore and movablecore is specified,
then kernelcore will be at *least* the specified
value but may be more. If movablecore on its own
is specified, the administrator must be careful
movablecore= [KNL,X86,IA-64,PPC]
Format: nn[KMGTPE] | nn%
This parameter is the complement to kernelcore=, it
specifies the amount of memory used for migratable
allocations. If both kernelcore and movablecore is
specified, then kernelcore will be at *least* the
specified value but may be more. If movablecore on its
own is specified, the administrator must be careful
that the amount of memory usable for all allocations
is not too small.
......
......@@ -111,7 +111,7 @@ my $regex_direct_begin_default = 'order=([0-9]*) may_writepage=([0-9]*) gfp_flag
my $regex_direct_end_default = 'nr_reclaimed=([0-9]*)';
my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*) gfp_flags=([A-Z_|]*)';
my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) classzone_idx=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_skipped=([0-9]*) nr_taken=([0-9]*) lru=([a-z_]*)';
my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) nr_dirty=([0-9]*) nr_writeback=([0-9]*) nr_congested=([0-9]*) nr_immediate=([0-9]*) nr_activate=([0-9]*) nr_ref_keep=([0-9]*) nr_unmap_fail=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
......@@ -201,7 +201,7 @@ $regex_kswapd_sleep = generate_traceevent_regex(
$regex_wakeup_kswapd = generate_traceevent_regex(
"vmscan/mm_vmscan_wakeup_kswapd",
$regex_wakeup_kswapd_default,
"nid", "zid", "order");
"nid", "zid", "order", "gfp_flags");
$regex_lru_isolate = generate_traceevent_regex(
"vmscan/mm_vmscan_lru_isolate",
$regex_lru_isolate_default,
......
......@@ -833,7 +833,7 @@ void flush_dcache_page(struct page *page)
}
/* don't handle anon pages here */
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!mapping)
return;
......
......@@ -128,12 +128,7 @@ asmlinkage void __div0(void)
error("Attempting division by 0!");
}
unsigned long __stack_chk_guard;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_fail(void)
{
......@@ -150,8 +145,6 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
{
int ret;
__stack_chk_guard_setup();
output_data = (unsigned char *)output_start;
free_mem_ptr = free_mem_ptr_p;
free_mem_end_ptr = free_mem_ptr_end_p;
......
......@@ -70,7 +70,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
__flush_dcache_page(page_mapping_file(from), from);
raw_spin_lock(&minicache_lock);
......
......@@ -76,7 +76,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
unsigned long kfrom, kto;
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
__flush_dcache_page(page_mapping_file(from), from);
/* FIXME: not highmem safe */
discard_old_kernel_data(page_address(to));
......
......@@ -90,7 +90,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
void *kto = kmap_atomic(to);
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
__flush_dcache_page(page_mapping(from), from);
__flush_dcache_page(page_mapping_file(from), from);
raw_spin_lock(&minicache_lock);
......
......@@ -195,7 +195,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
if (mapping) {
......
......@@ -285,7 +285,7 @@ void __sync_icache_dcache(pte_t pteval)
page = pfn_to_page(pfn);
if (cache_is_vipt_aliasing())
mapping = page_mapping(page);
mapping = page_mapping_file(page);
else
mapping = NULL;
......@@ -333,7 +333,7 @@ void flush_dcache_page(struct page *page)
return;
}
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!cache_ops_need_broadcast() &&
mapping && !page_mapcount(page))
......@@ -363,7 +363,7 @@ void flush_kernel_dcache_page(struct page *page)
if (cache_is_vivt() || cache_is_vipt_aliasing()) {
struct address_space *mapping;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!mapping || mapping_mapped(mapping)) {
void *addr;
......
......@@ -76,12 +76,7 @@ void error(char *x)
#include "../../../../lib/decompress_unxz.c"
#endif
unsigned long __stack_chk_guard;
void __stack_chk_guard_setup(void)
{
__stack_chk_guard = 0x000a0dff;
}
const unsigned long __stack_chk_guard = 0x000a0dff;
void __stack_chk_fail(void)
{
......@@ -92,8 +87,6 @@ void decompress_kernel(unsigned long boot_heap_start)
{
unsigned long zimage_start, zimage_size;
__stack_chk_guard_setup();
zimage_start = (unsigned long)(&__image_begin);
zimage_size = (unsigned long)(&__image_end) -
(unsigned long)(&__image_begin);
......
......@@ -86,7 +86,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
void __flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
unsigned long addr;
if (mapping && !mapping_mapped(mapping)) {
......
......@@ -180,7 +180,7 @@ void flush_dcache_page(struct page *page)
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
/* Flush this page if there are aliases. */
if (mapping && !mapping_mapped(mapping)) {
......@@ -215,7 +215,7 @@ void update_mmu_cache(struct vm_area_struct *vma,
if (page == ZERO_PAGE(0))
return;
mapping = page_mapping(page);
mapping = page_mapping_file(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
__flush_dcache_page(mapping, page);
......
......@@ -88,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
return;
page = pfn_to_page(pfn);
if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
if (page_mapping_file(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency())
......@@ -304,7 +305,7 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = page_mapping_file(page);
struct vm_area_struct *mpnt;
unsigned long offset;
unsigned long addr, old_addr = 0;
......
......@@ -117,12 +117,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling);
/*
* The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
* to override the version in mm/hugetlb.c
*/
#define vma_mmu_pagesize vma_mmu_pagesize
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
......
......@@ -568,10 +568,7 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
if (!radix_enabled())
return 1UL << mmu_psize_to_shift(psize);
#endif
if (!is_vm_hugetlb_page(vma))
return PAGE_SIZE;
return huge_page_size(hstate_vma(vma));
return vma_kernel_pagesize(vma);
}
static inline bool is_power_of_4(unsigned long x)
......
......@@ -112,7 +112,7 @@ static int mm_iommu_move_page_from_cma(struct page *page)
put_page(page); /* Drop the gup reference */
ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
NULL, 0, MIGRATE_SYNC, MR_CMA);
NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
if (ret) {
if (!list_empty(&cma_migrate_pages))
putback_movable_pages(&cma_migrate_pages);
......
......@@ -38,6 +38,7 @@
#include <linux/suspend.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/kmemleak.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
......
......@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/bitmap.h>
#include <linux/bootmem.h>
#include <asm/msi_bitmap.h>
......
......@@ -15,7 +15,7 @@
#include <linux/hardirq.h>
#include <linux/log2.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <linux/kmemleak.h>
#include <linux/time.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
......
......@@ -27,7 +27,6 @@
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/kernel_stat.h>
#include <linux/kmemleak.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqflags.h>
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment