Skip to content
Snippets Groups Projects
Unverified Commit dab6ba23 authored by Yurui Li's avatar Yurui Li Committed by GitHub
Browse files

Support create cpu only tensor (#4863)


* support create cpu tensor

* add empty op

* remove skip tensor test case:

* remove skip tensor test case

* remove TODO

Co-authored-by: default avataroneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
parent 986becca
No related branches found
No related tags found
No related merge requests found
......@@ -111,6 +111,17 @@ Maybe<one::UserOpExpr> OnesOp(const Shape& shape, const DataType& dtype, const s
}
}
Maybe<one::UserOpExpr> EmptyOp(const Shape& shape, const DataType& dtype) {
return EmptyOp(shape, dtype, UniqueOpName("empty"));
}
Maybe<one::UserOpExpr> EmptyOp(const Shape& shape, const DataType& dtype, const std::string& name) {
return one::OpBuilder("empty", name)
.Output("out")
.Attr<DataType>("dtype", dtype)
.Attr<Shape>("shape", shape)
.Build();
}
Maybe<one::UserOpExpr> IdentityOp() { return IdentityOp(UniqueOpName("identity")); }
Maybe<one::UserOpExpr> IdentityOp(const std::string& name) {
return one::OpBuilder("identity", name).Input("in").Output("out").Build();
......
......@@ -34,6 +34,9 @@ Maybe<one::UserOpExpr> ZerosOp(const Shape& shape, const DataType& dtype, const
Maybe<one::UserOpExpr> ZeroLikeOp();
Maybe<one::UserOpExpr> ZeroLikeOp(const std::string& name);
Maybe<one::UserOpExpr> EmptyOp(const Shape& shape, const DataType& dtype);
Maybe<one::UserOpExpr> EmptyOp(const Shape& shape, const DataType& dtype, const std::string& name);
Maybe<one::UserOpExpr> OnesLikeOp();
Maybe<one::UserOpExpr> OnesLikeOp(const std::string& name);
......
......@@ -39,17 +39,12 @@ Maybe<const Device> GetDefaultDevice() { return Device::New("cpu", 0); }
} // namespace
Maybe<void> NaiveInterpret(const UserOpExpr& user_op_expr, const TensorTuple& inputs,
const std::shared_ptr<const Device>& default_device,
const std::shared_ptr<EagerBlobObjectList>& output_eager_blob_objects,
const AttrMap& attrs,
std::vector<std::shared_ptr<const Device>>* out_devices) {
std::shared_ptr<EagerBlobObjectList> input_eager_blob_objects =
std::make_shared<EagerBlobObjectList>(inputs.size());
std::shared_ptr<const Device> default_device;
if (inputs.empty()) {
default_device = JUST(GetDefaultDevice());
} else {
default_device = inputs.at(0)->device();
}
for (int i = 0; i < inputs.size(); i++) {
if (i > 0) { CHECK_OR_RETURN(*default_device == *inputs.at(i)->device()); }
input_eager_blob_objects->at(i) = JUST(inputs.at(i)->eager_blob_object());
......@@ -118,14 +113,29 @@ Maybe<void> NaiveInterpret(const UserOpExpr& user_op_expr, const TensorTuple& in
return Maybe<void>::Ok();
}
Maybe<vm::EagerBlobObject> GenerateAllocatedEagerBlobObject(DataType data_type,
const Shape& shape) {
const auto zeros_expr = JUST(op_expr_helper::ZerosOp(shape, data_type));
Maybe<void> NaiveInterpret(const UserOpExpr& user_op_expr, const TensorTuple& inputs,
const std::shared_ptr<EagerBlobObjectList>& output_eager_blob_objects,
const AttrMap& attrs,
std::vector<std::shared_ptr<const Device>>* out_devices) {
std::shared_ptr<const Device> default_device;
if (inputs.empty()) {
default_device = JUST(GetDefaultDevice());
} else {
default_device = inputs.at(0)->device();
}
return NaiveInterpret(user_op_expr, inputs, default_device, output_eager_blob_objects, attrs,
out_devices);
}
Maybe<vm::EagerBlobObject> GenerateAllocatedEagerBlobObject(
DataType data_type, const Shape& shape, const std::shared_ptr<const Device>& device) {
const auto empty_expr = JUST(op_expr_helper::EmptyOp(shape, data_type));
std::shared_ptr<TensorTuple> inputs = std::make_shared<TensorTuple>();
std::shared_ptr<EagerBlobObjectList> output_eager_blob_objects =
std::make_shared<EagerBlobObjectList>(1);
std::vector<std::shared_ptr<const Device>> out_devices(1);
JUST(NaiveInterpret(*zeros_expr, *inputs, output_eager_blob_objects, AttrMap{}, &out_devices));
JUST(NaiveInterpret(*empty_expr, *inputs, device, output_eager_blob_objects, AttrMap{},
&out_devices));
return output_eager_blob_objects->at(0);
}
......
......@@ -16,7 +16,11 @@ limitations under the License.
#include "oneflow/core/eager/eager_blob_object.h"
namespace oneflow {
class Device;
namespace one {
Maybe<vm::EagerBlobObject> GenerateAllocatedEagerBlobObject(DataType data_type, const Shape& shape);
Maybe<vm::EagerBlobObject> GenerateAllocatedEagerBlobObject(
DataType data_type, const Shape& shape, const std::shared_ptr<const Device>& device);
}
} // namespace oneflow
......@@ -32,7 +32,7 @@ std::shared_ptr<MirroredTensor> MirroredTensor::MakeTensor(
impl = std::make_shared<LazyMirroredTensorImpl>(shape, dtype, device, requires_grad, is_leaf);
} else {
const auto eager_blob_object =
CHECK_JUST(GenerateAllocatedEagerBlobObject(dtype->data_type(), *shape));
CHECK_JUST(GenerateAllocatedEagerBlobObject(dtype->data_type(), *shape, device));
impl = std::make_shared<EagerMirroredTensorImpl>(eager_blob_object, device, requires_grad,
is_leaf);
}
......
......@@ -136,8 +136,8 @@ class TestTensor(flow.unittest.TestCase):
shape = (2, 3, 4, 5)
x = flow.Tensor(*shape)
test_case.assertTrue(not x.is_cuda)
# x = flow.Tensor(*shape, device=flow.device("cuda"))
# test_case.assertTrue(x.is_cuda)
x = flow.Tensor(*shape, device=flow.device("cuda"))
test_case.assertTrue(x.is_cuda)
x = flow.Tensor(*shape, device=flow.device("cpu"))
test_case.assertTrue(not x.is_cuda)
......@@ -162,9 +162,8 @@ class TestTensor(flow.unittest.TestCase):
with flow.no_grad():
m = x + y
# TODO: fix this autograd test case
# test_case.assertTrue(m.is_leaf)
# test_case.assertFalse(m.requires_grad)
test_case.assertTrue(m.is_leaf)
test_case.assertFalse(m.requires_grad)
v = flow.Tensor(*shape, requires_grad=True)
z.retain_grad()
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment