diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index af438b1e8a3cbc53eb2e02ce02c4323d71fceb37..8221e717bbce869538fd958a61d547191bc27e9f 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -173,12 +173,13 @@ struct kvmppc_spapr_tce_table {
 	struct page *pages[0];
 };
 
-struct kvmppc_rma_info {
+struct kvmppc_linear_info {
 	void		*base_virt;
 	unsigned long	 base_pfn;
 	unsigned long	 npages;
 	struct list_head list;
-	atomic_t 	 use_count;
+	atomic_t	 use_count;
+	int		 type;
 };
 
 /*
@@ -224,7 +225,7 @@ struct kvm_arch {
 	int tlbie_lock;
 	unsigned long lpcr;
 	unsigned long rmor;
-	struct kvmppc_rma_info *rma;
+	struct kvmppc_linear_info *rma;
 	unsigned long vrma_slb_v;
 	int rma_setup_done;
 	int using_mmu_notifiers;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a61b5b5047d6b3ef0ad9d2e47f0a7247bb544053..1c37a2f8d0f41fdb466120746a3a47f720872567 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -128,8 +128,8 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 				struct kvm_create_spapr_tce *args);
 extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
 				struct kvm_allocate_rma *rma);
-extern struct kvmppc_rma_info *kvm_alloc_rma(void);
-extern void kvm_release_rma(struct kvmppc_rma_info *ri);
+extern struct kvmppc_linear_info *kvm_alloc_rma(void);
+extern void kvm_release_rma(struct kvmppc_linear_info *ri);
 extern int kvmppc_core_init_vm(struct kvm *kvm);
 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
@@ -187,13 +187,13 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
 	paca[cpu].kvm_hstate.xics_phys = addr;
 }
 
-extern void kvm_rma_init(void);
+extern void kvm_linear_init(void);
 
 #else
 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
 {}
 
-static inline void kvm_rma_init(void)
+static inline void kvm_linear_init(void)
 {}
 #endif
 
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4cb8f1e9d0442a2a6631d93bc4ce03a05e69ba17..4721b0c8d7b776b31fd75f40dbe6792c1afd5e18 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -598,7 +598,7 @@ void __init setup_arch(char **cmdline_p)
 	/* Initialize the MMU context management stuff */
 	mmu_context_init();
 
-	kvm_rma_init();
+	kvm_linear_init();
 
 	ppc64_boot_msg(0x15, "Setup Done");
 }
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3580db8a23268b9cac3b2b9775555295f727fffe..ce1cac7651931c50e532e7f4e72decc01f73dc1f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1055,7 +1055,7 @@ static inline int lpcr_rmls(unsigned long rma_size)
 
 static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	struct kvmppc_rma_info *ri = vma->vm_file->private_data;
+	struct kvmppc_linear_info *ri = vma->vm_file->private_data;
 	struct page *page;
 
 	if (vmf->pgoff >= ri->npages)
@@ -1080,7 +1080,7 @@ static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
 
 static int kvm_rma_release(struct inode *inode, struct file *filp)
 {
-	struct kvmppc_rma_info *ri = filp->private_data;
+	struct kvmppc_linear_info *ri = filp->private_data;
 
 	kvm_release_rma(ri);
 	return 0;
@@ -1093,7 +1093,7 @@ static struct file_operations kvm_rma_fops = {
 
 long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
 {
-	struct kvmppc_rma_info *ri;
+	struct kvmppc_linear_info *ri;
 	long fd;
 
 	ri = kvm_alloc_rma();
@@ -1212,7 +1212,7 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
 {
 	int err = 0;
 	struct kvm *kvm = vcpu->kvm;
-	struct kvmppc_rma_info *ri = NULL;
+	struct kvmppc_linear_info *ri = NULL;
 	unsigned long hva;
 	struct kvm_memory_slot *memslot;
 	struct vm_area_struct *vma;
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index a795a13f4a70f9ddc212e8d125857ad50232248a..1c7e6ab5f9de1075693b7fead4a01c6a0bc75123 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -18,6 +18,14 @@
 #include <asm/kvm_ppc.h>
 #include <asm/kvm_book3s.h>
 
+#define KVM_LINEAR_RMA		0
+
+static void __init kvm_linear_init_one(ulong size, int count, int type);
+static struct kvmppc_linear_info *kvm_alloc_linear(int type);
+static void kvm_release_linear(struct kvmppc_linear_info *ri);
+
+/*************** RMA *************/
+
 /*
  * This maintains a list of RMAs (real mode areas) for KVM guests to use.
  * Each RMA has to be physically contiguous and of a size that the
@@ -29,32 +37,6 @@
 static unsigned long kvm_rma_size = 64 << 20;	/* 64MB */
 static unsigned long kvm_rma_count;
 
-static int __init early_parse_rma_size(char *p)
-{
-	if (!p)
-		return 1;
-
-	kvm_rma_size = memparse(p, &p);
-
-	return 0;
-}
-early_param("kvm_rma_size", early_parse_rma_size);
-
-static int __init early_parse_rma_count(char *p)
-{
-	if (!p)
-		return 1;
-
-	kvm_rma_count = simple_strtoul(p, NULL, 0);
-
-	return 0;
-}
-early_param("kvm_rma_count", early_parse_rma_count);
-
-static struct kvmppc_rma_info *rma_info;
-static LIST_HEAD(free_rmas);
-static DEFINE_SPINLOCK(rma_lock);
-
 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
    Assumes POWER7 or PPC970. */
 static inline int lpcr_rmls(unsigned long rma_size)
@@ -81,45 +63,73 @@ static inline int lpcr_rmls(unsigned long rma_size)
 	}
 }
 
-/*
- * Called at boot time while the bootmem allocator is active,
- * to allocate contiguous physical memory for the real memory
- * areas for guests.
- */
-void __init kvm_rma_init(void)
+static int __init early_parse_rma_size(char *p)
+{
+	if (!p)
+		return 1;
+
+	kvm_rma_size = memparse(p, &p);
+
+	return 0;
+}
+early_param("kvm_rma_size", early_parse_rma_size);
+
+static int __init early_parse_rma_count(char *p)
+{
+	if (!p)
+		return 1;
+
+	kvm_rma_count = simple_strtoul(p, NULL, 0);
+
+	return 0;
+}
+early_param("kvm_rma_count", early_parse_rma_count);
+
+struct kvmppc_linear_info *kvm_alloc_rma(void)
+{
+	return kvm_alloc_linear(KVM_LINEAR_RMA);
+}
+EXPORT_SYMBOL_GPL(kvm_alloc_rma);
+
+void kvm_release_rma(struct kvmppc_linear_info *ri)
+{
+	kvm_release_linear(ri);
+}
+EXPORT_SYMBOL_GPL(kvm_release_rma);
+
+/*************** generic *************/
+
+static LIST_HEAD(free_linears);
+static DEFINE_SPINLOCK(linear_lock);
+
+static void __init kvm_linear_init_one(ulong size, int count, int type)
 {
 	unsigned long i;
 	unsigned long j, npages;
-	void *rma;
+	void *linear;
 	struct page *pg;
+	const char *typestr;
+	struct kvmppc_linear_info *linear_info;
 
-	/* Only do this on PPC970 in HV mode */
-	if (!cpu_has_feature(CPU_FTR_HVMODE) ||
-	    !cpu_has_feature(CPU_FTR_ARCH_201))
-		return;
-
-	if (!kvm_rma_size || !kvm_rma_count)
+	if (!count)
 		return;
 
-	/* Check that the requested size is one supported in hardware */
-	if (lpcr_rmls(kvm_rma_size) < 0) {
-		pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
-		return;
-	}
-
-	npages = kvm_rma_size >> PAGE_SHIFT;
-	rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info));
-	for (i = 0; i < kvm_rma_count; ++i) {
-		rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size);
-		pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma,
-			kvm_rma_size >> 20);
-		rma_info[i].base_virt = rma;
-		rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT;
-		rma_info[i].npages = npages;
-		list_add_tail(&rma_info[i].list, &free_rmas);
-		atomic_set(&rma_info[i].use_count, 0);
-
-		pg = pfn_to_page(rma_info[i].base_pfn);
+	typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "";
+
+	npages = size >> PAGE_SHIFT;
+	linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
+	for (i = 0; i < count; ++i) {
+		linear = alloc_bootmem_align(size, size);
+		pr_info("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
+			size >> 20);
+		linear_info[i].base_virt = linear;
+		linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT;
+		linear_info[i].npages = npages;
+		linear_info[i].type = type;
+		list_add_tail(&linear_info[i].list, &free_linears);
+		atomic_set(&linear_info[i].use_count, 0);
+
+		pg = pfn_to_page(linear_info[i].base_pfn);
 		for (j = 0; j < npages; ++j) {
 			atomic_inc(&pg->_count);
 			++pg;
@@ -127,30 +137,55 @@ void __init kvm_rma_init(void)
 	}
 }
 
-struct kvmppc_rma_info *kvm_alloc_rma(void)
+static struct kvmppc_linear_info *kvm_alloc_linear(int type)
 {
-	struct kvmppc_rma_info *ri;
+	struct kvmppc_linear_info *ri;
 
 	ri = NULL;
-	spin_lock(&rma_lock);
-	if (!list_empty(&free_rmas)) {
-		ri = list_first_entry(&free_rmas, struct kvmppc_rma_info, list);
+	spin_lock(&linear_lock);
+	list_for_each_entry(ri, &free_linears, list) {
+		if (ri->type != type)
+			continue;
+
 		list_del(&ri->list);
 		atomic_inc(&ri->use_count);
+		break;
 	}
-	spin_unlock(&rma_lock);
+	spin_unlock(&linear_lock);
 	return ri;
 }
-EXPORT_SYMBOL_GPL(kvm_alloc_rma);
 
-void kvm_release_rma(struct kvmppc_rma_info *ri)
+static void kvm_release_linear(struct kvmppc_linear_info *ri)
 {
 	if (atomic_dec_and_test(&ri->use_count)) {
-		spin_lock(&rma_lock);
-		list_add_tail(&ri->list, &free_rmas);
-		spin_unlock(&rma_lock);
+		spin_lock(&linear_lock);
+		list_add_tail(&ri->list, &free_linears);
+		spin_unlock(&linear_lock);
 
 	}
 }
-EXPORT_SYMBOL_GPL(kvm_release_rma);
 
+/*
+ * Called at boot time while the bootmem allocator is active,
+ * to allocate contiguous physical memory for the hash page
+ * tables for guests.
+ */
+void __init kvm_linear_init(void)
+{
+	/* RMA */
+	/* Only do this on PPC970 in HV mode */
+	if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+	    !cpu_has_feature(CPU_FTR_ARCH_201))
+		return;
+
+	if (!kvm_rma_size || !kvm_rma_count)
+		return;
+
+	/* Check that the requested size is one supported in hardware */
+	if (lpcr_rmls(kvm_rma_size) < 0) {
+		pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
+		return;
+	}
+
+	kvm_linear_init_one(kvm_rma_size, kvm_rma_count, KVM_LINEAR_RMA);
+}