diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 7e6c7774041358c33ec6a1f6dce57753407e22f2..4f00068354c11c5fb8e471b6e1e18ffd35788e1b 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -20,6 +20,7 @@ struct fault_attr { atomic_t space; unsigned long verbose; bool task_filter; + bool no_warn; unsigned long stacktrace_depth; unsigned long require_start; unsigned long require_end; @@ -39,6 +40,7 @@ struct fault_attr { .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \ .verbose = 2, \ .dname = NULL, \ + .no_warn = false, \ } #define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER diff --git a/lib/fault-inject.c b/lib/fault-inject.c index cf7b129b0b2b08adcc1aae98f990c384761532dc..af8b0ae64a858059ddba7bc0600a489ecaa0709d 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -40,6 +40,9 @@ EXPORT_SYMBOL_GPL(setup_fault_attr); static void fail_dump(struct fault_attr *attr) { + if (attr->no_warn) + return; + if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) { printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" "name %pd, interval %lu, probability %lu, " diff --git a/mm/failslab.c b/mm/failslab.c index 215cb1ea169d6552fba669e05611d02b0a4b5f17..f4d829f954546aff52bac62ee65846e8b731b8dd 100644 --- a/mm/failslab.c +++ b/mm/failslab.c @@ -30,6 +30,9 @@ bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB)) return false; + if (gfpflags & __GFP_NOWARN) + failslab.attr.no_warn = true; + return should_fail(&failslab.attr, s->object_size); } diff --git a/mm/internal.h b/mm/internal.h index c94de10189eb54f974ec482b0311f563336d97ab..1b861446c75198784b644bd97485e7555e9bd1c5 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -36,6 +36,21 @@ /* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) +/* + * Different from WARN_ON_ONCE(), no warning will be issued + * when we specify __GFP_NOWARN. + */ +#define WARN_ON_ONCE_GFP(cond, gfp) ({ \ + static bool __section(".data.once") __warned; \ + int __ret_warn_once = !!(cond); \ + \ + if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) + void page_writeback_init(void); vm_fault_t do_swap_page(struct vm_fault *vmf); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 44d286286dbbf8ebaeb03743b7b6040033bec277..cede3ebe73539f4abc54ad5537647f7e9c7c9ebf 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3253,6 +3253,9 @@ static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) (gfp_mask & __GFP_DIRECT_RECLAIM)) return false; + if (gfp_mask & __GFP_NOWARN) + fail_page_alloc.attr.no_warn = true; + return should_fail(&fail_page_alloc.attr, 1 << order); } @@ -3743,7 +3746,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, goto out; /* Exhausted what can be done so it's blame time */ - if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { + if (out_of_memory(&oc) || + WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { *did_some_progress = 1; /* @@ -4534,7 +4538,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * All existing users of the __GFP_NOFAIL are blockable, so warn * of any new users that actually require GFP_NOWAIT */ - if (WARN_ON_ONCE(!can_direct_reclaim)) + if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) goto fail; /* @@ -4542,7 +4546,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * because we cannot reclaim anything and only can loop waiting * for somebody to do a work for us */ - WARN_ON_ONCE(current->flags & PF_MEMALLOC); + WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); /* * non failing costly orders are a hard requirement which we @@ -4550,7 +4554,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * so that we can identify them and convert them to something * else. */ - WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); + WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask); /* * Help non-failing allocations by giving them access to memory @@ -4732,10 +4736,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, * There are several places where we assume that the order value is sane * so bail out early if the request is out of bound. */ - if (unlikely(order >= MAX_ORDER)) { - WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); + if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp_mask)) return NULL; - } prepare_before_alloc(&gfp_mask);