diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 80ab9c9dd43c099663d93dcfb8e969c1ae2926d9..0ad6ce436355bbb4cd534975d124fee4b447eb42 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1389,6 +1389,21 @@ config RANDOMIZE_MODULE_REGION_FULL
 	  a limited range that contains the [_stext, _etext] interval of the
 	  core kernel, so branch relocations are always in range.
 
+config ARCH_GET_PREFERRED_SIBLING_CPUMASK
+	bool "Get preferred sibling cpumask from mpidr"
+	depends on ARM64
+	default n
+	help
+	  For some architectures, masking the underlying processor topology
+	  differences can make software unable to identify the cpu distance,
+	  which results in performance fluctuations.
+
+	  So we provide additional interface for getting preferred sibling's
+	  cpumask supported by platform, this siblings' cpumask indicates those
+	  CPUs which are clustered with relatively short distances, NOTE this
+	  hardly depends on the specific implementation of the specific platform.
+
+
 menuconfig ASCEND_FEATURES
 	bool "Support Ascend Features"
 	depends on ARM64
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index af58dcdefb21a03aafd752d49b0bcbaefc793089..63e29335f426cc1e9e6f6328e15caa1a15074d8a 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -56,4 +56,18 @@ static inline int get_logical_index(u64 mpidr)
 	return -EINVAL;
 }
 
+#ifdef CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK
+void update_mpidr_siblings_masks(unsigned int cpu, bool remove);
+
+static inline void mpidr_siblings_add_cpu(unsigned int cpu)
+{
+	update_mpidr_siblings_masks(cpu, false);
+}
+
+static inline void mpidr_siblings_remove_cpu(unsigned int cpu)
+{
+	update_mpidr_siblings_masks(cpu, true);
+}
+#endif
+
 #endif /* __ASM_SMP_PLAT_H */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index cdb81a36be8585012bef5241ee899cbb3efc5d83..6b8bc313a87b37a223e7f8fbe5cabc3767364b96 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -426,6 +426,9 @@ asmlinkage notrace void secondary_start_kernel(void)
 
 	store_cpu_topology(cpu);
 	numa_add_cpu(cpu);
+#ifdef CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK
+	mpidr_siblings_add_cpu(cpu);
+#endif
 
 	/*
 	 * OK, now it's safe to let the boot CPU continue.  Wait for
@@ -481,6 +484,9 @@ int __cpu_disable(void)
 
 	remove_cpu_topology(cpu);
 	numa_remove_cpu(cpu);
+#ifdef CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK
+	mpidr_siblings_remove_cpu(cpu);
+#endif
 
 	/*
 	 * Take this CPU offline.  Once we clear this, we can't return,
@@ -945,6 +951,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
 	store_cpu_topology(this_cpu);
 	numa_store_cpu_info(this_cpu);
 	numa_add_cpu(this_cpu);
+#ifdef CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK
+	mpidr_siblings_add_cpu(this_cpu);
+#endif
 
 	/*
 	 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 2646695e2f2a49db55cac014d0b82fb4e21cbb90..bf937d334b812fba28cec9c19181d128817ee419 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -348,6 +348,58 @@ void remove_cpu_topology(unsigned int cpu)
 	clear_cpu_topology(cpu);
 }
 
+#ifdef CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK
+#define MAX_MPIDR_SIBLINGS 100
+static struct cpumask mpidr_siblings_cpumask_map[MAX_MPIDR_SIBLINGS];
+
+static void
+__update_mpidr_siblings_masks(unsigned int cpu, int sibling, bool remove)
+{
+	if (WARN_ON_ONCE(sibling < 0 || sibling >= MAX_MPIDR_SIBLINGS))
+		return;
+
+	if (remove)
+		cpumask_clear_cpu(cpu, &mpidr_siblings_cpumask_map[sibling]);
+	else
+		cpumask_set_cpu(cpu, &mpidr_siblings_cpumask_map[sibling]);
+}
+
+void update_mpidr_siblings_masks(unsigned int cpu, bool remove)
+{
+	int sibling, affinity;
+	u32 midr_impl = MIDR_IMPLEMENTOR(read_cpuid_id());
+	u64 mpidr = read_cpuid_mpidr();
+	bool mt = mpidr & MPIDR_MT_BITMASK;
+
+	switch (midr_impl) {
+	case ARM_CPU_IMP_HISI:
+		if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
+			affinity = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+			sibling = ((affinity >> 3) - 1) / 2;
+			__update_mpidr_siblings_masks(cpu, sibling, remove);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+void arch_get_preferred_sibling_cpumask(unsigned int sibling,
+					cpumask_var_t dstp)
+{
+	if (!dstp)
+		return;
+
+	if (sibling >= MAX_MPIDR_SIBLINGS) {
+		cpumask_clear(dstp);
+		return;
+	}
+
+	cpumask_copy(dstp, &mpidr_siblings_cpumask_map[sibling]);
+}
+EXPORT_SYMBOL(arch_get_preferred_sibling_cpumask);
+#endif
+
 #ifdef CONFIG_ACPI
 static bool __init acpi_cpu_is_threaded(int cpu)
 {
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 2b709416de051989c56b916f1c8e6a3b02b4b3c7..80c28bfce5573c3266e3340b44557709e878edbe 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -32,4 +32,16 @@ unsigned long topology_get_freq_scale(int cpu)
 	return per_cpu(freq_scale, cpu);
 }
 
+#ifdef CONFIG_ARCH_GET_PREFERRED_SIBLING_CPUMASK
+void arch_get_preferred_sibling_cpumask(unsigned int sibling,
+					cpumask_var_t dstp);
+#else
+static inline void
+arch_get_preferred_sibling_cpumask(unsigned int sibling, cpumask_var_t dstp)
+{
+	if (dstp)
+		cpumask_clear(dstp);
+}
+#endif
+
 #endif /* _LINUX_ARCH_TOPOLOGY_H_ */