diff --git a/oneflow/python/framework/tensor.py b/oneflow/python/framework/tensor.py
index e7aa04f307d1cb5c3aedcc026ebc15bf24cf3ef4..0a9bb6bb109ea520603ad47997ff9521417f6a2c 100644
--- a/oneflow/python/framework/tensor.py
+++ b/oneflow/python/framework/tensor.py
@@ -259,17 +259,26 @@ class Tensor:
     @_auto_determine
     def grad(self, new_grad):
         def check_grad(grad, new_grad):
-            assert grad.shape == new_grad.shape, "Shape of new grad is not equal"
-            assert grad.device == new_grad.device, "Device of new grad is not equal"
-            assert grad.dtype == new_grad.dtype, "Data type of new grad is not equal"
-            assert type(grad) == type(new_grad), "Type of new grad is not equal"
+            assert (
+                grad.shape == new_grad.shape
+            ), f"Shape of grads are not equal, {grad.shape} vs {new_grad.shape}"
+            assert (
+                grad.device == new_grad.device
+            ), f"Device of grads are not equal, {grad.device} vs {new_grad.device}"
+            assert (
+                grad.dtype == new_grad.dtype
+            ), f"Data type of grads are not equal, {grad.dtype} vs {new_grad.dtype}"
 
         if self._local_or_consistent_tensor is not None:
             if new_grad is None:
                 self._local_or_consistent_tensor.set_grad(None)
             else:
-                new_grad_detach = new_grad.detach()._local_or_consistent_tensor
-                check_grad(self._local_or_consistent_tensor.grad, new_grad_detach)
+                if isinstance(new_grad, Tensor):
+                    if not new_grad.is_determined:
+                        new_grad.determine()
+                    new_grad = new_grad._local_or_consistent_tensor
+                new_grad_detach = new_grad.detach()
+                check_grad(self.grad, new_grad_detach)
                 self._local_or_consistent_tensor.set_grad(new_grad_detach)
 
     @property
diff --git a/oneflow/python/nn/parameter.py b/oneflow/python/nn/parameter.py
index 8ca2201e4d3fc85c976367f126a45dc3cfac1835..5b066758548d25ef978c8d1410b4b0d4740fdd97 100644
--- a/oneflow/python/nn/parameter.py
+++ b/oneflow/python/nn/parameter.py
@@ -26,6 +26,8 @@ class Parameter(Tensor):
         # TODO: uncomment the following two lines when consistent <-> local conversion is ready
         # data.set_is_consistent(True)
         # data.set_placement(flow.placement("gpu", ["0:0"], None))
+        if not isinstance(data, Tensor):
+            data = Tensor(data)
         self._data = data
         self._data.requires_grad = requires_grad
 
diff --git a/oneflow/python/test/tensor/test_tensor.py b/oneflow/python/test/tensor/test_tensor.py
index 5596eab2e59a3b87fd569e7eb61bd84bc1b5a03e..fabf56045191584691a3adb7d2cc465938a51098 100644
--- a/oneflow/python/test/tensor/test_tensor.py
+++ b/oneflow/python/test/tensor/test_tensor.py
@@ -763,22 +763,6 @@ class TestTensor(flow.unittest.TestCase):
         test_case.assertEqual(z.is_leaf, True)
         test_case.assertEqual(z.grad_fn, None)
 
-    @unittest.skipIf(
-        not flow.unittest.env.eager_execution_enabled(),
-        "numpy doesn't work in lazy mode",
-    )
-    def test_tensor_clone(test_case):
-        shape = (2, 3, 4, 5)
-        x = flow.Tensor(
-            np.random.randn(*shape), dtype=flow.float32, requires_grad=True,
-        )
-        y = x.clone()
-        test_case.assertTrue(np.allclose(y.numpy(), x.numpy(), 1e-4, 1e-4))
-        test_case.assertEqual(y.requires_grad, True)
-        test_case.assertEqual(y.is_leaf, False)
-        # Cannot print Copy grad function
-        test_case.assertTrue(y.grad_fn != None)
-
     @unittest.skipIf(
         not flow.unittest.env.eager_execution_enabled(),
         "numpy doesn't work in lazy mode",
@@ -1132,6 +1116,15 @@ class TestTensor(flow.unittest.TestCase):
                 1e-5,
             )
         )
+        of_input.grad = of_input.grad * 2
+        test_case.assertTrue(
+            np.allclose(
+                of_input.grad.detach().numpy(),
+                2 * np.full(np_input.shape, rand_init + rand_scale),
+                1e-5,
+                1e-5,
+            )
+        )
 
 
 if __name__ == "__main__":