Skip to content
Snippets Groups Projects
Unverified Commit 9356c9d7 authored by cheng cheng's avatar cheng cheng Committed by GitHub
Browse files

Remove IOConf (#5419)


Co-authored-by: default avataroneflow-ci-bot <69100618+oneflow-ci-bot@users.noreply.github.com>
parent 915a9884
No related branches found
No related tags found
No related merge requests found
......@@ -4,11 +4,6 @@ package oneflow;
import "oneflow/core/job/job.proto";
import "oneflow/core/job/resource.proto";
message IOConf {
optional bool enable_model_io_v2 = 5 [default = false];
optional bool enable_legacy_model_io = 6 [default = false];
}
message ProfilerConf {
optional bool collect_act_event = 1 [default = false];
}
......@@ -37,7 +32,6 @@ message InterJobReuseMemStrategy {
message ConfigProto {
required Resource resource = 1;
required IOConf io_conf = 2;
optional ProfilerConf profiler_conf = 3;
repeated string load_lib_path = 4;
required int64 session_id = 5;
......
......@@ -1124,8 +1124,9 @@ Maybe<void> CompileJobsAndMergePlans(const PbRpf<Job>& job_confs, Plan& plan) {
CHECK(!job_desc.Bool("__is_user_function__"));
jobs.emplace_back(new Job(*job));
};
if (Global<const IOConf>::Get()->enable_legacy_model_io()) {
if (Global<const IOConf>::Get()->enable_model_io_v2()) {
if (Global<ResourceDesc, ForSession>::Get()->resource().enable_legacy_model_io()) {
if (Global<ResourceDesc, ForSession>::Get()->resource().enable_model_io_v2()) {
MakeModelIoV2Jobs(jobs, var_op_name2parallel_blob_conf, AppendJob);
} else {
MakeModelIoJobs(jobs, var_op_name2parallel_blob_conf, AppendJob);
......
......@@ -50,12 +50,18 @@ message Resource {
optional bool enable_thread_local_cache = 16 [default = true];
optional int64 thread_local_cache_max_size = 17 [default = 67108864]; // 64M
optional bool enable_debug_mode = 18 [default = false];
optional CollectiveBoxingConf collective_boxing_conf = 19;
optional bool enable_tensor_float_32_compute = 20 [default = true];
optional bool enable_mem_chain_merge = 21 [default = true];
optional CollectiveBoxingConf collective_boxing_conf = 19;
// NOTE(chengcheng) to reuse nccl memory and speed up
optional bool nccl_use_compute_stream = 30 [default = false];
optional bool disable_group_boxing_by_dst_parallel = 31 [default = false];
optional CudnnConfig cudnn_conf = 32;
// io_conf
optional bool enable_model_io_v2 = 41 [default = false];
optional bool enable_legacy_model_io = 42 [default = false];
}
......@@ -27,11 +27,9 @@ int64_t NewSessionId() {
}
ConfigProtoContext::ConfigProtoContext(const ConfigProto& config_proto)
: session_id_(config_proto.session_id()) {
Global<const IOConf>::SessionNew(session_id_, config_proto.io_conf());
}
: session_id_(config_proto.session_id()) {}
ConfigProtoContext::~ConfigProtoContext() { Global<const IOConf>::SessionDelete(session_id_); }
ConfigProtoContext::~ConfigProtoContext() {}
LogicalConfigProtoContext::LogicalConfigProtoContext(const std::string& config_proto_str) {
ConfigProto config_proto;
......
......@@ -99,8 +99,6 @@ Maybe<void> SessionGlobalObjectsScope::Init(const ConfigProto& config_proto) {
DumpVersionInfo();
Global<ResourceDesc, ForSession>::New(config_proto.resource(),
GlobalProcessCtx::NumOfProcessPerNode());
Global<const IOConf>::New(config_proto.io_conf());
Global<const IOConf>::SessionNew(config_proto.session_id(), config_proto.io_conf());
Global<const ProfilerConf>::New(config_proto.profiler_conf());
Global<IDMgr>::New();
if (GlobalProcessCtx::IsThisProcessMaster()
......@@ -130,7 +128,6 @@ Maybe<void> SessionGlobalObjectsScope::EagerInit(const ConfigProto& config_proto
Global<ResourceDesc, ForSession>::Delete();
DumpVersionInfo();
Global<ResourceDesc, ForSession>::New(config_proto.resource());
Global<const IOConf>::New(config_proto.io_conf());
Global<const ProfilerConf>::New(config_proto.profiler_conf());
if (GlobalProcessCtx::IsThisProcessMaster()
&& Global<const ProfilerConf>::Get()->collect_act_event()) {
......@@ -153,8 +150,6 @@ SessionGlobalObjectsScope::~SessionGlobalObjectsScope() {
if (Global<Profiler>::Get() != nullptr) { Global<Profiler>::Delete(); }
Global<IDMgr>::Delete();
Global<const ProfilerConf>::Delete();
Global<const IOConf>::Delete();
Global<const IOConf>::SessionDelete(session_id_);
Global<ResourceDesc, ForSession>::Delete();
Global<ResourceDesc, ForSession>::New(Global<ResourceDesc, ForEnv>::Get()->resource(),
GlobalProcessCtx::NumOfProcessPerNode());
......
......@@ -302,7 +302,7 @@ def enable_debug_mode(val):
@oneflow_export("config.legacy_model_io_enabled")
def api_legacy_model_io_enabled():
sess = session_ctx.GetDefaultSession()
return sess.config_proto.io_conf.enable_legacy_model_io
return sess.config_proto.resource.enable_legacy_model_io
@oneflow_export("config.enable_legacy_model_io")
......@@ -319,7 +319,7 @@ def api_enable_legacy_model_io(val: bool = True):
def enable_legacy_model_io(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.io_conf.enable_legacy_model_io = val
sess.config_proto.resource.enable_legacy_model_io = val
@oneflow_export("config.enable_model_io_v2")
......@@ -336,7 +336,7 @@ def api_enable_model_io_v2(val):
def enable_model_io_v2(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.io_conf.enable_model_io_v2 = val
sess.config_proto.resource.enable_model_io_v2 = val
@oneflow_export("config.collect_act_event")
......
......@@ -512,7 +512,6 @@ def _GetDefaultConfigProto():
else:
config_proto.resource.cpu_device_num = 1
config_proto.resource.gpu_device_num = 0
config_proto.io_conf.SetInParent()
config_proto.session_id = session_ctx.GetDefaultSession().id
return config_proto
......
......@@ -230,7 +230,7 @@ class InferenceSession(object):
"not supported device tag {}".format(self.option_.device_tag)
)
self.config_proto_.io_conf.enable_legacy_model_io = True
self.config_proto_.resource.enable_legacy_model_io = True
def set_checkpoint_path(self, checkpoint_path):
self._check_status(self.SessionStatus.OPEN)
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment