diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 97bfc00d2a8204e46244d477edc57c1483e3f767..c62346fdc05d5f241bda610fa7b4ceb901b75553 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -119,16 +119,6 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
 	if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
 		return 0;
 
-	if (map) {
-		vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
-						MEMREMAP_WC);
-		if (!vgpu->gm.aperture_va)
-			return -ENOMEM;
-	} else {
-		memunmap(vgpu->gm.aperture_va);
-		vgpu->gm.aperture_va = NULL;
-	}
-
 	val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
 	if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
 		val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
@@ -141,11 +131,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
 						  aperture_pa >> PAGE_SHIFT,
 						  aperture_sz >> PAGE_SHIFT,
 						  map);
-	if (ret) {
-		memunmap(vgpu->gm.aperture_va);
-		vgpu->gm.aperture_va = NULL;
+	if (ret)
 		return ret;
-	}
 
 	vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
 	return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 769c1c24ae7598e9299c1301a93e7fce284de29f..70494e394d2cbd0c2dceba4a8d466dfb11ec7658 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -521,24 +521,23 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
 
 	ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
 			_EL_OFFSET_STATUS_PTR);
-
 	ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
 	ctx_status_ptr.read_ptr = 0;
 	ctx_status_ptr.write_ptr = 0x7;
 	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
-static void clean_execlist(struct intel_vgpu *vgpu)
+static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
 {
-	enum intel_engine_id i;
+	unsigned int tmp;
+	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	struct intel_engine_cs *engine;
+	struct intel_vgpu_submission *s = &vgpu->submission;
 
-	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
-		struct intel_vgpu_submission *s = &vgpu->submission;
-
-		kfree(s->ring_scan_buffer[i]);
-		s->ring_scan_buffer[i] = NULL;
-		s->ring_scan_buffer_size[i] = 0;
+	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+		kfree(s->ring_scan_buffer[engine->id]);
+		s->ring_scan_buffer[engine->id] = NULL;
+		s->ring_scan_buffer_size[engine->id] = 0;
 	}
 }
 
@@ -553,9 +552,10 @@ static void reset_execlist(struct intel_vgpu *vgpu,
 		init_vgpu_execlist(vgpu, engine->id);
 }
 
-static int init_execlist(struct intel_vgpu *vgpu)
+static int init_execlist(struct intel_vgpu *vgpu,
+			 unsigned long engine_mask)
 {
-	reset_execlist(vgpu, ALL_ENGINES);
+	reset_execlist(vgpu, engine_mask);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 7dc7a80213a8afacd8bc4e219ec67880b0eaf51d..c6197d990818614a4339f4111577c482acbae341 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -82,7 +82,6 @@ struct intel_gvt_device_info {
 struct intel_vgpu_gm {
 	u64 aperture_sz;
 	u64 hidden_sz;
-	void *aperture_va;
 	struct drm_mm_node low_gm_node;
 	struct drm_mm_node high_gm_node;
 };
@@ -127,7 +126,6 @@ struct intel_vgpu_irq {
 struct intel_vgpu_opregion {
 	bool mapped;
 	void *va;
-	void *va_gopregion;
 	u32 gfn[INTEL_GVT_OPREGION_PAGES];
 };
 
@@ -152,8 +150,8 @@ enum {
 
 struct intel_vgpu_submission_ops {
 	const char *name;
-	int (*init)(struct intel_vgpu *vgpu);
-	void (*clean)(struct intel_vgpu *vgpu);
+	int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+	void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
 	void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
 };
 
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 38f3b00d3a7a0e111ed11f44dd12e8606f7e8209..9be639aa3b55439b1640794bfbae12e3aec133c8 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 		void *p_data, unsigned int bytes)
 {
-	struct intel_vgpu_submission *s = &vgpu->submission;
 	u32 data = *(u32 *)p_data;
 	int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
 	bool enable_execlist;
@@ -1523,11 +1522,9 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 		if (!enable_execlist)
 			return 0;
 
-		if (s->active)
-			return 0;
-
 		ret = intel_vgpu_select_submission_ops(vgpu,
-				INTEL_VGPU_EXECLIST_SUBMISSION);
+			       ENGINE_MASK(ring_id),
+			       INTEL_VGPU_EXECLIST_SUBMISSION);
 		if (ret)
 			return ret;
 
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index eb92572056c37e5c9b24c1fc0df4d9213150a067..801a3375c7b4fcbf6e2d73c261221faa8d565bba 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -651,6 +651,39 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
 	return ret;
 }
 
+static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
+{
+	return off >= vgpu_aperture_offset(vgpu) &&
+	       off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
+}
+
+static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
+		void *buf, unsigned long count, bool is_write)
+{
+	void *aperture_va;
+
+	if (!intel_vgpu_in_aperture(vgpu, off) ||
+	    !intel_vgpu_in_aperture(vgpu, off + count)) {
+		gvt_vgpu_err("Invalid aperture offset %llu\n", off);
+		return -EINVAL;
+	}
+
+	aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
+					ALIGN_DOWN(off, PAGE_SIZE),
+					count + offset_in_page(off));
+	if (!aperture_va)
+		return -EIO;
+
+	if (is_write)
+		memcpy(aperture_va + offset_in_page(off), buf, count);
+	else
+		memcpy(buf, aperture_va + offset_in_page(off), count);
+
+	io_mapping_unmap(aperture_va);
+
+	return 0;
+}
+
 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
 			size_t count, loff_t *ppos, bool is_write)
 {
@@ -679,8 +712,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
 					buf, count, is_write);
 		break;
 	case VFIO_PCI_BAR2_REGION_INDEX:
-		ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos,
-					buf, count, is_write);
+		ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
 		break;
 	case VFIO_PCI_BAR1_REGION_INDEX:
 	case VFIO_PCI_BAR3_REGION_INDEX:
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 562b5ad857a4eeec834f11751b514c28669b208f..5c869e3fdf3bc5b6e9df06eb84d39473cead4803 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -56,38 +56,6 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
 	(reg >= gvt->device_info.gtt_start_offset \
 	 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
 
-static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
-{
-	u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
-	u64 aperture_sz = vgpu_aperture_sz(vgpu);
-
-	return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
-}
-
-static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
-			    void *pdata, unsigned int size, bool is_read)
-{
-	u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
-	u64 offset = gpa - aperture_gpa;
-
-	if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
-		gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
-			     offset, size);
-		return -EINVAL;
-	}
-
-	if (!vgpu->gm.aperture_va) {
-		gvt_vgpu_err("BAR is not enabled\n");
-		return -ENXIO;
-	}
-
-	if (is_read)
-		memcpy(pdata, vgpu->gm.aperture_va + offset, size);
-	else
-		memcpy(vgpu->gm.aperture_va + offset, pdata, size);
-	return 0;
-}
-
 static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
 		void *p_data, unsigned int bytes, bool read)
 {
@@ -144,11 +112,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
 	}
 	mutex_lock(&gvt->lock);
 
-	if (vgpu_gpa_is_aperture(vgpu, pa)) {
-		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
-		goto out;
-	}
-
 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
 
 	if (WARN_ON(bytes > 8))
@@ -222,11 +185,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
 
 	mutex_lock(&gvt->lock);
 
-	if (vgpu_gpa_is_aperture(vgpu, pa)) {
-		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
-		goto out;
-	}
-
 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
 
 	if (WARN_ON(bytes > 8))
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 8420d1fc3ddbe922db517fb95e9ff4c1186d62f5..fa75a2eead9070fbfd1894b659b37f84eed9e11a 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -299,21 +299,13 @@ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
 {
 
 	int i, ret = 0;
-	unsigned long pfn;
 
 	gvt_dbg_core("emulate opregion from kernel\n");
 
 	switch (intel_gvt_host.hypervisor_type) {
 	case INTEL_GVT_HYPERVISOR_KVM:
-		pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT);
-		vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT,
-						INTEL_GVT_OPREGION_SIZE,
-						MEMREMAP_WB);
-		if (!vgpu_opregion(vgpu)->va_gopregion) {
-			gvt_vgpu_err("failed to map guest opregion\n");
-			ret = -EFAULT;
-		}
-		vgpu_opregion(vgpu)->mapped = true;
+		for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+			vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
 		break;
 	case INTEL_GVT_HYPERVISOR_XEN:
 		/**
@@ -352,10 +344,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
 		if (vgpu_opregion(vgpu)->mapped)
 			map_vgpu_opregion(vgpu, false);
 	} else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
-		if (vgpu_opregion(vgpu)->mapped) {
-			memunmap(vgpu_opregion(vgpu)->va_gopregion);
-			vgpu_opregion(vgpu)->va_gopregion = NULL;
-		}
+		/* Guest opregion is released by VFIO */
 	}
 	free_pages((unsigned long)vgpu_opregion(vgpu)->va,
 		   get_order(INTEL_GVT_OPREGION_SIZE));
@@ -480,19 +469,40 @@ static bool querying_capabilities(u32 scic)
  */
 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
 {
-	u32 *scic, *parm;
+	u32 scic, parm;
 	u32 func, subfunc;
+	u64 scic_pa = 0, parm_pa = 0;
+	int ret;
 
 	switch (intel_gvt_host.hypervisor_type) {
 	case INTEL_GVT_HYPERVISOR_XEN:
-		scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
-		parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+		scic = *((u32 *)vgpu_opregion(vgpu)->va +
+					INTEL_GVT_OPREGION_SCIC);
+		parm = *((u32 *)vgpu_opregion(vgpu)->va +
+					INTEL_GVT_OPREGION_PARM);
 		break;
 	case INTEL_GVT_HYPERVISOR_KVM:
-		scic = vgpu_opregion(vgpu)->va_gopregion +
-						INTEL_GVT_OPREGION_SCIC;
-		parm = vgpu_opregion(vgpu)->va_gopregion +
-						INTEL_GVT_OPREGION_PARM;
+		scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
+					INTEL_GVT_OPREGION_SCIC;
+		parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
+					INTEL_GVT_OPREGION_PARM;
+
+		ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
+						    &scic, sizeof(scic));
+		if (ret) {
+			gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
+				ret, scic_pa, sizeof(scic));
+			return ret;
+		}
+
+		ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
+						    &parm, sizeof(parm));
+		if (ret) {
+			gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
+				ret, scic_pa, sizeof(scic));
+			return ret;
+		}
+
 		break;
 	default:
 		gvt_vgpu_err("not supported hypervisor\n");
@@ -510,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
 		return 0;
 	}
 
-	func = GVT_OPREGION_FUNC(*scic);
-	subfunc = GVT_OPREGION_SUBFUNC(*scic);
-	if (!querying_capabilities(*scic)) {
+	func = GVT_OPREGION_FUNC(scic);
+	subfunc = GVT_OPREGION_SUBFUNC(scic);
+	if (!querying_capabilities(scic)) {
 		gvt_vgpu_err("requesting runtime service: func \"%s\","
 				" subfunc \"%s\"\n",
 				opregion_func_name(func),
@@ -521,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
 		 * emulate exit status of function call, '0' means
 		 * "failure, generic, unsupported or unknown cause"
 		 */
-		*scic &= ~OPREGION_SCIC_EXIT_MASK;
-		return 0;
+		scic &= ~OPREGION_SCIC_EXIT_MASK;
+		goto out;
+	}
+
+	scic = 0;
+	parm = 0;
+
+out:
+	switch (intel_gvt_host.hypervisor_type) {
+	case INTEL_GVT_HYPERVISOR_XEN:
+		*((u32 *)vgpu_opregion(vgpu)->va +
+					INTEL_GVT_OPREGION_SCIC) = scic;
+		*((u32 *)vgpu_opregion(vgpu)->va +
+					INTEL_GVT_OPREGION_PARM) = parm;
+		break;
+	case INTEL_GVT_HYPERVISOR_KVM:
+		ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
+						    &scic, sizeof(scic));
+		if (ret) {
+			gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
+				ret, scic_pa, sizeof(scic));
+			return ret;
+		}
+
+		ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
+						    &parm, sizeof(parm));
+		if (ret) {
+			gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
+				ret, scic_pa, sizeof(scic));
+			return ret;
+		}
+
+		break;
+	default:
+		gvt_vgpu_err("not supported hypervisor\n");
+		return -EINVAL;
 	}
 
-	*scic = 0;
-	*parm = 0;
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index d031f6486ce3fa19b602a7535f25c17efa82ba6a..cc1ce361cd76a365c5036f82c2618e0b5681028e 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
 struct vgpu_sched_data {
 	struct list_head lru_list;
 	struct intel_vgpu *vgpu;
+	bool active;
 
 	ktime_t sched_in_time;
 	ktime_t sched_out_time;
@@ -332,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
 	if (!hrtimer_active(&sched_data->timer))
 		hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
 			sched_data->period), HRTIMER_MODE_ABS);
+	vgpu_data->active = true;
 }
 
 static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
@@ -339,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
 
 	list_del_init(&vgpu_data->lru_list);
+	vgpu_data->active = false;
 }
 
 static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
@@ -374,9 +377,12 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
 
 void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
 {
-	gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
+	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
 
-	vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
+	if (!vgpu_data->active) {
+		gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
+		vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
+	}
 }
 
 void intel_gvt_kick_schedule(struct intel_gvt *gvt)
@@ -389,6 +395,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
 	struct intel_gvt_workload_scheduler *scheduler =
 		&vgpu->gvt->scheduler;
 	int ring_id;
+	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+
+	if (!vgpu_data->active)
+		return;
 
 	gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
 
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 0056638b0c16dc2090e826aaa5b9b30c5aed13bb..b55b3580ca1dd00402374354f3d12918817de378 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -991,7 +991,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
 {
 	struct intel_vgpu_submission *s = &vgpu->submission;
 
-	intel_vgpu_select_submission_ops(vgpu, 0);
+	intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
 	i915_gem_context_put(s->shadow_ctx);
 	kmem_cache_destroy(s->workloads);
 }
@@ -1079,6 +1079,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
  *
  */
 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+				     unsigned long engine_mask,
 				     unsigned int interface)
 {
 	struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1091,21 +1092,21 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
 	if (WARN_ON(interface >= ARRAY_SIZE(ops)))
 		return -EINVAL;
 
-	if (s->active) {
-		s->ops->clean(vgpu);
-		s->active = false;
-		gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
-				vgpu->id, s->ops->name);
-	}
+	if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
+		return -EINVAL;
+
+	if (s->active)
+		s->ops->clean(vgpu, engine_mask);
 
 	if (interface == 0) {
 		s->ops = NULL;
 		s->virtual_submission_interface = 0;
-		gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id);
+		s->active = false;
+		gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
 		return 0;
 	}
 
-	ret = ops[interface]->init(vgpu);
+	ret = ops[interface]->init(vgpu, engine_mask);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 3de77dfa7c59f27d6daaa1249cb81f5e803c7561..ff175a98b19ed41c6cdd8405a8872d4bdb6003dd 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -141,6 +141,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
 
 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+				     unsigned long engine_mask,
 				     unsigned int interface);
 
 extern const struct intel_vgpu_submission_ops
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index a8784fa91289c82d442f6b90cb13fb4f837fe7b3..b87b19d8443c64a1826e44dca789e938d91a0dd7 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -520,8 +520,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
 	intel_vgpu_reset_submission(vgpu, resetting_eng);
 	/* full GPU reset or device model level reset */
 	if (engine_mask == ALL_ENGINES || dmlr) {
-		intel_vgpu_select_submission_ops(vgpu, 0);
-
+		intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
 		/*fence will not be reset during virtual reset */
 		if (dmlr) {
 			intel_vgpu_reset_gtt(vgpu);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index ccb5ba043b63a344e8bdf09766a8ce8dd32693ba..95478db9998b51a410b927d654990967c74c5fdc 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1032,7 +1032,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
 	const struct drm_i915_reg_table *table = engine->reg_tables;
 	int count = engine->reg_table_count;
 
-	do {
+	for (; count > 0; ++table, --count) {
 		if (!table->master || is_master) {
 			const struct drm_i915_reg_descriptor *reg;
 
@@ -1040,7 +1040,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
 			if (reg != NULL)
 				return reg;
 		}
-	} while (table++, --count);
+	}
 
 	return NULL;
 }
@@ -1212,6 +1212,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
 					continue;
 			}
 
+			if (desc->bits[i].offset >= length) {
+				DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
+						 *cmd, engine->name);
+				return false;
+			}
+
 			dword = cmd[desc->bits[i].offset] &
 				desc->bits[i].mask;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index caebd5825279e4564b9ed46b3ab7537938a6e271..a42deebedb0f12784155bb7499fef44a2c31cf28 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3717,7 +3717,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
 					    struct intel_display_error_state *error);
 
 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
+				    u32 val, int timeout_us);
+#define sandybridge_pcode_write(dev_priv, mbox, val)	\
+	sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
+
 int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
 		      u32 reply_mask, u32 reply, int timeout_base_ms);
 
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 51108ffc28d1d0eb7a986f6eb3e188563897c206..f7f771749e4809dcedbea023bea54e4be4ac1537 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1107,6 +1107,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
 }
 
 static const u8 cnp_ddc_pin_map[] = {
+	[0] = 0, /* N/A */
 	[DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
 	[DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
 	[DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
@@ -1115,9 +1116,14 @@ static const u8 cnp_ddc_pin_map[] = {
 
 static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
 {
-	if (HAS_PCH_CNP(dev_priv) &&
-	    vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map))
-		return cnp_ddc_pin_map[vbt_pin];
+	if (HAS_PCH_CNP(dev_priv)) {
+		if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
+			return cnp_ddc_pin_map[vbt_pin];
+		} else {
+			DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
+			return 0;
+		}
+	}
 
 	return vbt_pin;
 }
@@ -1323,11 +1329,13 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 		expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
 	} else if (bdb->version == 195) {
 		expected_size = 37;
-	} else if (bdb->version <= 197) {
+	} else if (bdb->version <= 215) {
 		expected_size = 38;
+	} else if (bdb->version <= 216) {
+		expected_size = 39;
 	} else {
-		expected_size = 38;
-		BUILD_BUG_ON(sizeof(*child) < 38);
+		expected_size = sizeof(*child);
+		BUILD_BUG_ON(sizeof(*child) < 39);
 		DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
 				 bdb->version, expected_size);
 	}
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index d77e2bec1e29ded804c05ef1df9f262839438454..5dc118f26b51b7b63c8849e53b6a295c11328c80 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1370,10 +1370,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
 		break;
 	}
 
-	/* Inform power controller of upcoming frequency change */
+	/*
+	 * Inform power controller of upcoming frequency change. BSpec
+	 * requires us to wait up to 150usec, but that leads to timeouts;
+	 * the 2ms used here is based on experiment.
+	 */
 	mutex_lock(&dev_priv->pcu_lock);
-	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-				      0x80000000);
+	ret = sandybridge_pcode_write_timeout(dev_priv,
+					      HSW_PCODE_DE_WRITE_FREQ_REQ,
+					      0x80000000, 2000);
 	mutex_unlock(&dev_priv->pcu_lock);
 
 	if (ret) {
@@ -1404,8 +1409,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
 	I915_WRITE(CDCLK_CTL, val);
 
 	mutex_lock(&dev_priv->pcu_lock);
-	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
-				      cdclk_state->voltage_level);
+	/*
+	 * The timeout isn't specified, the 2ms used here is based on
+	 * experiment.
+	 * FIXME: Waiting for the request completion could be delayed until
+	 * the next PCODE request based on BSpec.
+	 */
+	ret = sandybridge_pcode_write_timeout(dev_priv,
+					      HSW_PCODE_DE_WRITE_FREQ_REQ,
+					      cdclk_state->voltage_level, 2000);
 	mutex_unlock(&dev_priv->pcu_lock);
 
 	if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1db79a860b96eea8485b808668278c29152bbf81..1a6e699e19e004dfaa037d4c7b775e0090942295 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -9149,8 +9149,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
 	return 0;
 }
 
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
-			    u32 mbox, u32 val)
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
+				    u32 mbox, u32 val, int timeout_us)
 {
 	int status;
 
@@ -9173,7 +9173,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
 
 	if (__intel_wait_for_register_fw(dev_priv,
 					 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
-					 500, 0, NULL)) {
+					 timeout_us, 0, NULL)) {
 		DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
 			  val, mbox, __builtin_return_address(0));
 		return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index e3d7745a91518a6c031d03ffb0e9de551cfb11bc..98dff6058d3c92a50dddc0d0634879d9efdcbdbd 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -412,6 +412,8 @@ struct child_device_config {
 	u16 dp_gpio_pin_num;					/* 195 */
 	u8 dp_iboost_level:4;					/* 196 */
 	u8 hdmi_iboost_level:4;					/* 196 */
+	u8 dp_max_link_rate:2;					/* 216 CNL+ */
+	u8 dp_max_link_rate_reserved:6;				/* 216 */
 } __packed;
 
 struct bdb_general_definitions {