diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1e59761354e1762757843a4b6e1cfc31e05570ba..44d286286dbbf8ebaeb03743b7b6040033bec277 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4272,6 +4272,25 @@ check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
 }
 
 #ifdef CONFIG_MEMORY_RELIABLE
+/*
+ * if fallback is enabled, fallback to movable zone if no dma/normal zone
+ * found
+ */
+static inline struct zone *mem_reliable_fallback_zone(gfp_t gfp_mask,
+						      struct alloc_context *ac)
+{
+	if (!reliable_allow_fb_enabled())
+		return NULL;
+
+	if (!(gfp_mask & ___GFP_RELIABILITY))
+		return NULL;
+
+	ac->high_zoneidx = gfp_zone(gfp_mask & ~___GFP_RELIABILITY);
+	ac->preferred_zoneref = first_zones_zonelist(
+		ac->zonelist, ac->high_zoneidx, ac->nodemask);
+	return ac->preferred_zoneref->zone;
+}
+
 static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask,
 						  struct alloc_context *ac)
 {
@@ -4290,6 +4309,11 @@ static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask,
 	}
 }
 #else
+static inline struct zone *mem_reliable_fallback_zone(gfp_t gfp_mask,
+						      struct alloc_context *ac)
+{
+	return NULL;
+}
 static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask,
 						  struct alloc_context *ac) {}
 #endif
@@ -4339,8 +4363,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
 	 */
 	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
 					ac->high_zoneidx, ac->nodemask);
-	if (!ac->preferred_zoneref->zone)
-		goto nopage;
+	if (!ac->preferred_zoneref->zone) {
+		if (!mem_reliable_fallback_zone(gfp_mask, ac))
+			goto nopage;
+	}
 
 	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
 		wake_all_kswapds(order, gfp_mask, ac);