Skip to content
Snippets Groups Projects
Select Git revision
  • ead7ad4bcdffa8c823d8dacb3fa14c6bbedfa25c
  • openEuler-1.0-LTS default protected
  • openEuler-22.09
  • OLK-5.10
  • openEuler-22.03-LTS
  • openEuler-22.03-LTS-Ascend
  • master
  • openEuler-22.03-LTS-LoongArch-NW
  • openEuler-22.09-HCK
  • openEuler-20.03-LTS-SP3
  • openEuler-21.09
  • openEuler-21.03
  • openEuler-20.09
  • 4.19.90-2210.5.0
  • 5.10.0-123.0.0
  • 5.10.0-60.63.0
  • 5.10.0-60.62.0
  • 4.19.90-2210.4.0
  • 5.10.0-121.0.0
  • 5.10.0-60.61.0
  • 4.19.90-2210.3.0
  • 5.10.0-60.60.0
  • 5.10.0-120.0.0
  • 5.10.0-60.59.0
  • 5.10.0-119.0.0
  • 4.19.90-2210.2.0
  • 4.19.90-2210.1.0
  • 5.10.0-118.0.0
  • 5.10.0-106.19.0
  • 5.10.0-60.58.0
  • 4.19.90-2209.6.0
  • 5.10.0-106.18.0
  • 5.10.0-106.17.0
33 results

slab_common.c

Blame
  • slab_common.c 38.20 KiB
    // SPDX-License-Identifier: GPL-2.0
    /*
     * Slab allocator functions that are independent of the allocator strategy
     *
     * (C) 2012 Christoph Lameter <cl@linux.com>
     */
    #include <linux/slab.h>
    
    #include <linux/mm.h>
    #include <linux/poison.h>
    #include <linux/interrupt.h>
    #include <linux/memory.h>
    #include <linux/cache.h>
    #include <linux/compiler.h>
    #include <linux/module.h>
    #include <linux/cpu.h>
    #include <linux/uaccess.h>
    #include <linux/seq_file.h>
    #include <linux/proc_fs.h>
    #include <asm/cacheflush.h>
    #include <asm/tlbflush.h>
    #include <asm/page.h>
    #include <linux/memcontrol.h>
    
    #define CREATE_TRACE_POINTS
    #include <trace/events/kmem.h>
    
    #include "slab.h"
    
    enum slab_state slab_state;
    LIST_HEAD(slab_caches);
    DEFINE_MUTEX(slab_mutex);
    struct kmem_cache *kmem_cache;
    
    #ifdef CONFIG_HARDENED_USERCOPY
    bool usercopy_fallback __ro_after_init =
    		IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
    module_param(usercopy_fallback, bool, 0400);
    MODULE_PARM_DESC(usercopy_fallback,
    		"WARN instead of reject usercopy whitelist violations");
    #endif
    
    static LIST_HEAD(slab_caches_to_rcu_destroy);
    static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
    static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
    		    slab_caches_to_rcu_destroy_workfn);
    
    /*
     * Set of flags that will prevent slab merging
     */
    #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
    		SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
    		SLAB_FAILSLAB | SLAB_KASAN)
    
    #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
    			 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
    
    /*
     * Merge control. If this is set then no merging of slab caches will occur.
     */
    static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
    
    static int __init setup_slab_nomerge(char *str)
    {
    	slab_nomerge = true;
    	return 1;
    }
    
    #ifdef CONFIG_SLUB
    __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);