diff --git a/pkg/vm/driver/test/storage_test.go b/pkg/vm/driver/test/storage_test.go
index 1ab4c0ffc4cf2ff976ff6e3e6da4cbc5901467a6..640d3b85a50a725cb992d753213dd89d8b20124b 100644
--- a/pkg/vm/driver/test/storage_test.go
+++ b/pkg/vm/driver/test/storage_test.go
@@ -30,7 +30,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/common/codec"
 	"matrixone/pkg/vm/engine/aoe/common/helper"
 	"matrixone/pkg/vm/engine/aoe/storage"
-	md "matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
+	"matrixone/pkg/vm/engine/aoe/storage/adaptor"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"sync"
 	"testing"
@@ -55,7 +55,7 @@ const (
 var tableInfo *aoe.TableInfo
 
 func init() {
-	tableInfo = md.MockTableInfo(colCnt)
+	tableInfo = adaptor.MockTableInfo(colCnt)
 	tableInfo.Id = 100
 }
 
diff --git a/pkg/vm/engine/aoe/cmd/read/main.go b/pkg/vm/engine/aoe/cmd/read/main.go
index e5c0bcc7216f6e14b98fac84881619f99ace415b..2f6ea6fbf17e360905f1216778e226250dd5a3c4 100644
--- a/pkg/vm/engine/aoe/cmd/read/main.go
+++ b/pkg/vm/engine/aoe/cmd/read/main.go
@@ -25,7 +25,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/common"
 	"matrixone/pkg/vm/engine/aoe/storage/db"
 	"matrixone/pkg/vm/engine/aoe/storage/dbi"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	w "matrixone/pkg/vm/engine/aoe/storage/worker"
 	"matrixone/pkg/vm/mmu/host"
diff --git a/pkg/vm/engine/aoe/engine/engine_test.go b/pkg/vm/engine/aoe/engine/engine_test.go
index afdac4f04a9101ddf556ec8e2d391479230238b4..a633b39103916288e517377b72e48554a2e50773 100644
--- a/pkg/vm/engine/aoe/engine/engine_test.go
+++ b/pkg/vm/engine/aoe/engine/engine_test.go
@@ -30,7 +30,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/common/codec"
 	"matrixone/pkg/vm/engine/aoe/common/helper"
 	"matrixone/pkg/vm/engine/aoe/storage"
-	md "matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
+	"matrixone/pkg/vm/engine/aoe/storage/adaptor"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"sync"
 
@@ -164,7 +164,7 @@ func TestAOEEngine(t *testing.T) {
 	tbls := db.Relations()
 	require.Equal(t, 0, len(tbls))
 
-	mockTbl := md.MockTableInfo(colCnt)
+	mockTbl := adaptor.MockTableInfo(colCnt)
 	mockTbl.Name = fmt.Sprintf("%s%d", tableName, time.Now().Unix())
 	_, _, _, _, comment, defs, pdef, _ := helper.UnTransfer(*mockTbl)
 
@@ -313,7 +313,7 @@ func testTableDDL(t *testing.T, c []*catalog2.Catalog) {
 	require.Nil(t, tbs)
 
 	colCnt := 4
-	t1 := md.MockTableInfo(colCnt)
+	t1 := adaptor.MockTableInfo(colCnt)
 	t1.Name = "t1"
 
 	tid, err := c[0].CreateTable(1, dbid, *t1)
@@ -325,7 +325,7 @@ func testTableDDL(t *testing.T, c []*catalog2.Catalog) {
 	require.NotNil(t, tb)
 	require.Equal(t, aoe.StatePublic, tb.State)
 
-	t2 := md.MockTableInfo(colCnt)
+	t2 := adaptor.MockTableInfo(colCnt)
 	t2.Name = "t2"
 	_, err = c[0].CreateTable(2, dbid, *t2)
 	require.NoError(t, err)
diff --git a/pkg/vm/engine/aoe/storage/adaptor/meta.go b/pkg/vm/engine/aoe/storage/adaptor/meta.go
index c1dc86e1e80110c2453643ce5f45724e23ee305a..1544291aea2f94eade303c6df4824d0c56128986 100644
--- a/pkg/vm/engine/aoe/storage/adaptor/meta.go
+++ b/pkg/vm/engine/aoe/storage/adaptor/meta.go
@@ -19,7 +19,7 @@ import (
 	"matrixone/pkg/container/types"
 	"matrixone/pkg/vm/engine/aoe"
 	"matrixone/pkg/vm/engine/aoe/storage/dbi"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 )
 
 func MockTableInfo(colCnt int) *aoe.TableInfo {
diff --git a/pkg/vm/engine/aoe/storage/checkpointer.go b/pkg/vm/engine/aoe/storage/checkpointer.go
deleted file mode 100644
index 4454c300e883f9938c9dbf83a73b3c86ac764b80..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/checkpointer.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package storage
-
-import (
-	"errors"
-	"matrixone/pkg/logutil"
-	"matrixone/pkg/vm/engine/aoe/storage/common"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
-	"os"
-	"path/filepath"
-)
-
-var (
-	ErrAlreadyExist = errors.New("ckp already done")
-)
-
-type checkpointerFactory struct {
-	dir string
-}
-
-func NewCheckpointerFactory(dir string) *checkpointerFactory {
-	factory := &checkpointerFactory{
-		dir: dir,
-	}
-	return factory
-}
-
-func (f *checkpointerFactory) Create() *checkpointer {
-	ck := &checkpointer{
-		factory: f,
-	}
-	return ck
-}
-
-type checkpointer struct {
-	factory *checkpointerFactory
-	tmpfile string
-}
-
-func (ck *checkpointer) PreCommit(res metadata.Resource) error {
-	if res == nil {
-		logutil.Error("nil res")
-		return errors.New("nil res")
-	}
-	var fname string
-	switch res.GetResourceType() {
-	case metadata.ResInfo:
-		fname = common.MakeInfoCkpFileName(ck.factory.dir, res.GetFileName(), true)
-	case metadata.ResTable:
-		fname = common.MakeTableCkpFileName(ck.factory.dir, res.GetFileName(), res.GetTableId(), true)
-	default:
-		panic("not supported")
-	}
-	// log.Infof("PreCommit CheckPoint: %s", fname)
-	if _, err := os.Stat(fname); err == nil {
-		return ErrAlreadyExist
-	}
-	dir := filepath.Dir(fname)
-	if _, err := os.Stat(dir); os.IsNotExist(err) {
-		err = os.MkdirAll(dir, 0755)
-		if err != nil {
-			return err
-		}
-	}
-	w, err := os.Create(fname)
-	if err != nil {
-		return err
-	}
-	defer w.Close()
-	err = res.Serialize(w)
-	if err != nil {
-		return err
-	}
-	ck.tmpfile = fname
-	return nil
-}
-
-func (ck *checkpointer) Commit(res metadata.Resource) error {
-	if len(ck.tmpfile) == 0 {
-		return errors.New("Cannot Commit checkpoint, should do PreCommit before")
-	}
-	fname, err := common.FilenameFromTmpfile(ck.tmpfile)
-	if err != nil {
-		return err
-	}
-	// log.Infof("Commit CheckPoint: %s", fname)
-	err = os.Rename(ck.tmpfile, fname)
-	return err
-}
-
-func (ck *checkpointer) Load() error {
-	// TODO
-	return nil
-}
diff --git a/pkg/vm/engine/aoe/storage/db/blk_test.go b/pkg/vm/engine/aoe/storage/db/blk_test.go
index bfbffe33cc3928db8c11c34b77f09a1186151b67..5e603db1ebb64c093800c88444086c2f22744193 100644
--- a/pkg/vm/engine/aoe/storage/db/blk_test.go
+++ b/pkg/vm/engine/aoe/storage/db/blk_test.go
@@ -21,7 +21,7 @@ import (
 	bmgr "matrixone/pkg/vm/engine/aoe/storage/buffer/manager"
 	ldio "matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"os"
 	"sync"
 	"sync/atomic"
diff --git a/pkg/vm/engine/aoe/storage/db/db.go b/pkg/vm/engine/aoe/storage/db/db.go
index 82f312a38c89917472f52f1e0d4ddd82f22321c1..6069aac5dd6113dc8bddbd0c70146f13c9f6b2c9 100644
--- a/pkg/vm/engine/aoe/storage/db/db.go
+++ b/pkg/vm/engine/aoe/storage/db/db.go
@@ -33,7 +33,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/handle"
 	tiface "matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
 	mtif "matrixone/pkg/vm/engine/aoe/storage/memtable/v1/base"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	bb "matrixone/pkg/vm/engine/aoe/storage/mutation/buffer/base"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 	"matrixone/pkg/vm/engine/aoe/storage/wal"
diff --git a/pkg/vm/engine/aoe/storage/db/db_test.go b/pkg/vm/engine/aoe/storage/db/db_test.go
index 71291f9f2c3af22f7bb448301eda0e6ffffa86f0..a19eb6dbfd6a39c21d14f17ccfb36e96eef6b94c 100644
--- a/pkg/vm/engine/aoe/storage/db/db_test.go
+++ b/pkg/vm/engine/aoe/storage/db/db_test.go
@@ -26,7 +26,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/dbi"
 	"matrixone/pkg/vm/engine/aoe/storage/internal/invariants"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"matrixone/pkg/vm/engine/aoe/storage/testutils/config"
 	"os"
diff --git a/pkg/vm/engine/aoe/storage/db/factories.go b/pkg/vm/engine/aoe/storage/db/factories.go
index 38c548c5a7bca5aaeed0ecbb594c47a4c0d40c84..ca4a6283535baa8b6a3c4fa1bc024489acaf75c2 100644
--- a/pkg/vm/engine/aoe/storage/db/factories.go
+++ b/pkg/vm/engine/aoe/storage/db/factories.go
@@ -18,7 +18,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/container/batch"
 	"matrixone/pkg/vm/engine/aoe/storage/db/sched"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	bb "matrixone/pkg/vm/engine/aoe/storage/mutation/buffer/base"
 )
 
diff --git a/pkg/vm/engine/aoe/storage/db/factories/base/types.go b/pkg/vm/engine/aoe/storage/db/factories/base/types.go
index 6f06986281456eb004c9e6541b65821dbdfd02f1..c737f8bedac5353d0b71563af6dc3730ac9e0f2d 100644
--- a/pkg/vm/engine/aoe/storage/db/factories/base/types.go
+++ b/pkg/vm/engine/aoe/storage/db/factories/base/types.go
@@ -17,7 +17,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
 	imem "matrixone/pkg/vm/engine/aoe/storage/memtable/v1/base"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	mb "matrixone/pkg/vm/engine/aoe/storage/mutation/base"
 	bb "matrixone/pkg/vm/engine/aoe/storage/mutation/buffer/base"
 )
diff --git a/pkg/vm/engine/aoe/storage/db/factories/factories_test.go b/pkg/vm/engine/aoe/storage/db/factories/factories_test.go
index 9518eb52460f721e88bab41a3b77653477f83410..708b490e6fb43bd93f7fa6d433d2af43a59016ce 100644
--- a/pkg/vm/engine/aoe/storage/db/factories/factories_test.go
+++ b/pkg/vm/engine/aoe/storage/db/factories/factories_test.go
@@ -20,7 +20,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	ldio "matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"matrixone/pkg/vm/engine/aoe/storage/mutation"
 	mb "matrixone/pkg/vm/engine/aoe/storage/mutation/base"
diff --git a/pkg/vm/engine/aoe/storage/db/factories/mutblock.go b/pkg/vm/engine/aoe/storage/db/factories/mutblock.go
index fdd34f90f9fe8737cb9aa4593c4b51500148863d..f49743e6f3062f310390a99629d53af1dcd980f8 100644
--- a/pkg/vm/engine/aoe/storage/db/factories/mutblock.go
+++ b/pkg/vm/engine/aoe/storage/db/factories/mutblock.go
@@ -17,7 +17,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mutation"
 	mb "matrixone/pkg/vm/engine/aoe/storage/mutation/base"
 	bb "matrixone/pkg/vm/engine/aoe/storage/mutation/buffer/base"
diff --git a/pkg/vm/engine/aoe/storage/db/filter_test.go b/pkg/vm/engine/aoe/storage/db/filter_test.go
index 4ed0e0449282371fd8868461fc126fbdf0854704..7d759579604afab4ba9753376e8c78eace828674 100644
--- a/pkg/vm/engine/aoe/storage/db/filter_test.go
+++ b/pkg/vm/engine/aoe/storage/db/filter_test.go
@@ -23,7 +23,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/index"
 	table2 "matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"os"
 	"strconv"
diff --git a/pkg/vm/engine/aoe/storage/db/open_test.go b/pkg/vm/engine/aoe/storage/db/open_test.go
index 6be72c3e68d08c71687ad41ea7c00ba9ba5dcb65..458e13678b85746fbf34baad42e257de8d555464 100644
--- a/pkg/vm/engine/aoe/storage/db/open_test.go
+++ b/pkg/vm/engine/aoe/storage/db/open_test.go
@@ -21,7 +21,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/common"
 	"matrixone/pkg/vm/engine/aoe/storage/dbi"
 	"matrixone/pkg/vm/engine/aoe/storage/internal/invariants"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"os"
 	"path"
diff --git a/pkg/vm/engine/aoe/storage/db/relation.go b/pkg/vm/engine/aoe/storage/db/relation.go
index 53ee33c59e18f534a3b2596d8e58f7adb77d1e68..d84c25a3b3f9aa82f6c49c806abb1b630be9d98b 100644
--- a/pkg/vm/engine/aoe/storage/db/relation.go
+++ b/pkg/vm/engine/aoe/storage/db/relation.go
@@ -18,7 +18,7 @@ import (
 	"matrixone/pkg/vm/engine"
 	"matrixone/pkg/vm/engine/aoe/storage/dbi"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	md "matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	md "matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/metadata"
 	"matrixone/pkg/vm/process"
 	"sync"
diff --git a/pkg/vm/engine/aoe/storage/db/replay.go b/pkg/vm/engine/aoe/storage/db/replay.go
index cd9844d750a672592a8911873d9bb1f93a687b17..370344bf06583c45ffafc24a3e95e7d59b3ec4f7 100644
--- a/pkg/vm/engine/aoe/storage/db/replay.go
+++ b/pkg/vm/engine/aoe/storage/db/replay.go
@@ -22,7 +22,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/common"
 	dbsched "matrixone/pkg/vm/engine/aoe/storage/db/sched"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"os"
 	"path"
 	"sort"
diff --git a/pkg/vm/engine/aoe/storage/db/replay_test.go b/pkg/vm/engine/aoe/storage/db/replay_test.go
index deb355082ec92b19e9b2d9bb827ebc3fce923792..9f13d22a7fe136cb9d4567cc49889511d712aec0 100644
--- a/pkg/vm/engine/aoe/storage/db/replay_test.go
+++ b/pkg/vm/engine/aoe/storage/db/replay_test.go
@@ -19,7 +19,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/common"
 	"matrixone/pkg/vm/engine/aoe/storage/dbi"
 	"matrixone/pkg/vm/engine/aoe/storage/internal/invariants"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"os"
 	"sort"
diff --git a/pkg/vm/engine/aoe/storage/db/sched/commitblk.go b/pkg/vm/engine/aoe/storage/db/sched/commitblk.go
index 406e0300af6d0a7b8eae762b1ae6e6cff446e204..0d1769a1da1c3cc683fedbaa111a7db3500067ce 100644
--- a/pkg/vm/engine/aoe/storage/db/sched/commitblk.go
+++ b/pkg/vm/engine/aoe/storage/db/sched/commitblk.go
@@ -15,7 +15,7 @@
 package sched
 
 import (
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 )
 
diff --git a/pkg/vm/engine/aoe/storage/db/sched/flushblk.go b/pkg/vm/engine/aoe/storage/db/sched/flushblk.go
index b7a57fbfb87b88098604fee4d825cde9339a1cd2..42174ae17df59bbecb86fffe48d42d513be093a3 100644
--- a/pkg/vm/engine/aoe/storage/db/sched/flushblk.go
+++ b/pkg/vm/engine/aoe/storage/db/sched/flushblk.go
@@ -17,7 +17,7 @@ package sched
 import (
 	"matrixone/pkg/logutil"
 	imem "matrixone/pkg/vm/engine/aoe/storage/memtable/v1/base"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 )
 
diff --git a/pkg/vm/engine/aoe/storage/db/sched/flushmemblk.go b/pkg/vm/engine/aoe/storage/db/sched/flushmemblk.go
index a39182c10dee90f099a89b3189ff22e6c5fc3496..49e3e31730e932f84e26b423bf8c5f33334589cd 100644
--- a/pkg/vm/engine/aoe/storage/db/sched/flushmemblk.go
+++ b/pkg/vm/engine/aoe/storage/db/sched/flushmemblk.go
@@ -18,7 +18,7 @@ import (
 	"matrixone/pkg/logutil"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	mb "matrixone/pkg/vm/engine/aoe/storage/mutation/base"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 )
diff --git a/pkg/vm/engine/aoe/storage/db/sched/flushtblk.go b/pkg/vm/engine/aoe/storage/db/sched/flushtblk.go
index 07dbc4fdd71f5c49ac99adbe948427c89bbe063f..9db14043aecb1ae2a3ce1c85d136806bc37fbf87 100644
--- a/pkg/vm/engine/aoe/storage/db/sched/flushtblk.go
+++ b/pkg/vm/engine/aoe/storage/db/sched/flushtblk.go
@@ -16,7 +16,7 @@ package sched
 import (
 	"matrixone/pkg/vm/engine/aoe/storage/container/batch"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mutation/buffer/base"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 	// "matrixone/pkg/logutil"
diff --git a/pkg/vm/engine/aoe/storage/db/sched/scheduler.go b/pkg/vm/engine/aoe/storage/db/sched/scheduler.go
index 06ac151e9737109e11d228b964785c3bc4e5b60c..e85f43450568cc8d42567beee4feb32bba3112f2 100644
--- a/pkg/vm/engine/aoe/storage/db/sched/scheduler.go
+++ b/pkg/vm/engine/aoe/storage/db/sched/scheduler.go
@@ -18,7 +18,7 @@ import (
 	"matrixone/pkg/logutil"
 	"matrixone/pkg/vm/engine/aoe/storage"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 	"sync"
 )
diff --git a/pkg/vm/engine/aoe/storage/db/sched/types_test.go b/pkg/vm/engine/aoe/storage/db/sched/types_test.go
index 09024175f735e7f7246406978af5cfd8ef5a8730..003600913bce9ceedad2536189c5751b4352a742 100644
--- a/pkg/vm/engine/aoe/storage/db/sched/types_test.go
+++ b/pkg/vm/engine/aoe/storage/db/sched/types_test.go
@@ -20,7 +20,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	ldio "matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"os"
 	"sync"
 	"testing"
diff --git a/pkg/vm/engine/aoe/storage/db/sched/upgradeblk.go b/pkg/vm/engine/aoe/storage/db/sched/upgradeblk.go
index 984c3c6b2a9f032e9230bdc57e81fff13a3aaadd..16c4c6bdc56f874dfe798ae49203efbc2124d3e2 100644
--- a/pkg/vm/engine/aoe/storage/db/sched/upgradeblk.go
+++ b/pkg/vm/engine/aoe/storage/db/sched/upgradeblk.go
@@ -16,7 +16,7 @@ package sched
 
 import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 )
 
diff --git a/pkg/vm/engine/aoe/storage/db/seg_test.go b/pkg/vm/engine/aoe/storage/db/seg_test.go
index 2a22c124b754dd1f567680937a2cc80de05b3e55..648222419264fa0a8885516318c0401a54a6fdb8 100644
--- a/pkg/vm/engine/aoe/storage/db/seg_test.go
+++ b/pkg/vm/engine/aoe/storage/db/seg_test.go
@@ -20,7 +20,7 @@ import (
 	bmgr "matrixone/pkg/vm/engine/aoe/storage/buffer/manager"
 	ldio "matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"os"
 	"sync"
 	"sync/atomic"
diff --git a/pkg/vm/engine/aoe/storage/event/types.go b/pkg/vm/engine/aoe/storage/event/types.go
index 035e441daa0fdfcc20aa2186af111aae07e6d535..f896112f773d9899c2daf04bc3c6c6e5f369e0a7 100644
--- a/pkg/vm/engine/aoe/storage/event/types.go
+++ b/pkg/vm/engine/aoe/storage/event/types.go
@@ -17,7 +17,6 @@ package event
 import (
 	"matrixone/pkg/logutil"
 	imem "matrixone/pkg/vm/engine/aoe/storage/memtable/v1/base"
-	md "matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 )
 
 type EventListener struct {
@@ -25,8 +24,6 @@ type EventListener struct {
 	MemTableFullCB    func(imem.IMemTable)
 	FlushBlockBeginCB func(imem.IMemTable)
 	FlushBlockEndCB   func(imem.IMemTable)
-	CheckpointStartCB func(*md.MetaInfo)
-	CheckpointEndCB   func(*md.MetaInfo)
 }
 
 func (l *EventListener) FillDefaults() {
@@ -47,12 +44,4 @@ func (l *EventListener) FillDefaults() {
 	if l.FlushBlockEndCB == nil {
 		l.FlushBlockEndCB = func(table imem.IMemTable) {}
 	}
-
-	if l.CheckpointStartCB == nil {
-		l.CheckpointStartCB = func(info *md.MetaInfo) {}
-	}
-
-	if l.CheckpointEndCB == nil {
-		l.CheckpointEndCB = func(info *md.MetaInfo) {}
-	}
 }
diff --git a/pkg/vm/engine/aoe/storage/events/memdata/createsegblk.go b/pkg/vm/engine/aoe/storage/events/memdata/createsegblk.go
index 2f9d100ade7c660c0f9f8d67747ea57e08bad20a..1e6330834f52537d9844bb049fb72c3ed1ee61e3 100644
--- a/pkg/vm/engine/aoe/storage/events/memdata/createsegblk.go
+++ b/pkg/vm/engine/aoe/storage/events/memdata/createsegblk.go
@@ -16,7 +16,7 @@ package memdata
 
 import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 )
 
diff --git a/pkg/vm/engine/aoe/storage/events/memdata/types.go b/pkg/vm/engine/aoe/storage/events/memdata/types.go
index 5249ab5e6e017b2fa50cb02a943df3e7be8b7aad..b87d0b3537743a26c17b0748e5bfcce5d0aa5849 100644
--- a/pkg/vm/engine/aoe/storage/events/memdata/types.go
+++ b/pkg/vm/engine/aoe/storage/events/memdata/types.go
@@ -20,7 +20,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
 	mtif "matrixone/pkg/vm/engine/aoe/storage/memtable/v1/base"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/ops"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 )
diff --git a/pkg/vm/engine/aoe/storage/events/meta/createblk.go b/pkg/vm/engine/aoe/storage/events/meta/createblk.go
index b696442e87f9246e06ca944b21ba6b4beeeba6c2..69df9e2721bc1de7a9d4c383c08dce4044266797 100644
--- a/pkg/vm/engine/aoe/storage/events/meta/createblk.go
+++ b/pkg/vm/engine/aoe/storage/events/meta/createblk.go
@@ -18,7 +18,7 @@ import (
 	dbsched "matrixone/pkg/vm/engine/aoe/storage/db/sched"
 	"matrixone/pkg/vm/engine/aoe/storage/events/memdata"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 )
 
diff --git a/pkg/vm/engine/aoe/storage/events/meta/droptable.go b/pkg/vm/engine/aoe/storage/events/meta/droptable.go
index f57bb6f8cddcd419ef95c59a83843a447520a77e..bd2b1a67c680b3ed8bbc324c3f53ebfbbb0e7c1e 100644
--- a/pkg/vm/engine/aoe/storage/events/meta/droptable.go
+++ b/pkg/vm/engine/aoe/storage/events/meta/droptable.go
@@ -22,7 +22,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/dbi"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
 	mtif "matrixone/pkg/vm/engine/aoe/storage/memtable/v1/base"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 )
 
diff --git a/pkg/vm/engine/aoe/storage/events/meta/types_test.go b/pkg/vm/engine/aoe/storage/events/meta/types_test.go
index 33e0bf56e589bc8338f2d82d2fc5099a9771e12c..333ad5c6394ff481ce0798a8d21c64b5f4761026 100644
--- a/pkg/vm/engine/aoe/storage/events/meta/types_test.go
+++ b/pkg/vm/engine/aoe/storage/events/meta/types_test.go
@@ -16,7 +16,7 @@ package meta
 
 import (
 	"matrixone/pkg/vm/engine/aoe/storage/db/sched"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/testutils/config"
 	"os"
 	"testing"
diff --git a/pkg/vm/engine/aoe/storage/layout/dataio/blk.go b/pkg/vm/engine/aoe/storage/layout/dataio/blk.go
index 6dafe1b7a0f48812c06724302c763290985d7cb4..9393ca9471a5521b96a1f67806e106d1ac1bcf6e 100644
--- a/pkg/vm/engine/aoe/storage/layout/dataio/blk.go
+++ b/pkg/vm/engine/aoe/storage/layout/dataio/blk.go
@@ -23,7 +23,7 @@ import (
 	"matrixone/pkg/prefetch"
 	"matrixone/pkg/vm/engine/aoe/storage/common"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"os"
 	"path/filepath"
 )
diff --git a/pkg/vm/engine/aoe/storage/layout/dataio/blk_test.go b/pkg/vm/engine/aoe/storage/layout/dataio/blk_test.go
index 578ab7252a9bc1f9c085266640a654a2c65041a1..0dbdf814a7ac28f164f514678a51694cc777aff6 100644
--- a/pkg/vm/engine/aoe/storage/layout/dataio/blk_test.go
+++ b/pkg/vm/engine/aoe/storage/layout/dataio/blk_test.go
@@ -28,7 +28,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/container/vector"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/index"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"os"
 	"path/filepath"
diff --git a/pkg/vm/engine/aoe/storage/layout/dataio/blkwriter.go b/pkg/vm/engine/aoe/storage/layout/dataio/blkwriter.go
index e4949883ff4444aa99f3ac514526a6a0113b4afd..140132a24fd3f11f6eed7abbe6ffc69456f44709 100644
--- a/pkg/vm/engine/aoe/storage/layout/dataio/blkwriter.go
+++ b/pkg/vm/engine/aoe/storage/layout/dataio/blkwriter.go
@@ -24,7 +24,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/common"
 	"matrixone/pkg/vm/engine/aoe/storage/container/batch"
 	"matrixone/pkg/vm/engine/aoe/storage/container/vector"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"os"
 	"path/filepath"
 
diff --git a/pkg/vm/engine/aoe/storage/layout/dataio/segwriter.go b/pkg/vm/engine/aoe/storage/layout/dataio/segwriter.go
index b0c15169d548a79a325623cbace477bb52482db2..a12b5992e7f36a1723342d13f07258f1977f3a29 100644
--- a/pkg/vm/engine/aoe/storage/layout/dataio/segwriter.go
+++ b/pkg/vm/engine/aoe/storage/layout/dataio/segwriter.go
@@ -24,7 +24,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/mergesort"
 	"matrixone/pkg/vm/engine/aoe/storage/common"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/index"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"os"
 	"path/filepath"
 
diff --git a/pkg/vm/engine/aoe/storage/layout/dataio/sortedseg.go b/pkg/vm/engine/aoe/storage/layout/dataio/sortedseg.go
index 7a564e68dc44ff85a88f15a5686aafa4e8dce837..ffc452078a06d9a8bb31c1dedbc10d8daa2097a6 100644
--- a/pkg/vm/engine/aoe/storage/layout/dataio/sortedseg.go
+++ b/pkg/vm/engine/aoe/storage/layout/dataio/sortedseg.go
@@ -26,7 +26,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/common"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/index"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"os"
 	"path/filepath"
 )
diff --git a/pkg/vm/engine/aoe/storage/layout/dataio/tblk.go b/pkg/vm/engine/aoe/storage/layout/dataio/tblk.go
index a1c6b1504e9245e811c088301f94a5b01800bf8a..c8788a9ce7c6e0ba45bee6e6b22f7e8a3dd48c09 100644
--- a/pkg/vm/engine/aoe/storage/layout/dataio/tblk.go
+++ b/pkg/vm/engine/aoe/storage/layout/dataio/tblk.go
@@ -22,7 +22,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/container/batch"
 	"matrixone/pkg/vm/engine/aoe/storage/container/vector"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"os"
 	"path/filepath"
 	"sync"
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/baseblk.go b/pkg/vm/engine/aoe/storage/layout/table/v1/baseblk.go
index 7e2db3fc9d5a509e57b9ba3118ee5ae5beb8c8aa..8b683247dc24d7f32e490d435c098b8e8aab7252 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/baseblk.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/baseblk.go
@@ -20,7 +20,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/index"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 )
 
 type sllnode = common.SLLNode
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/blk.go b/pkg/vm/engine/aoe/storage/layout/table/v1/blk.go
index fa5f030de70db11a92f7a7c15c3a180c37c15a49..56e292a39477d28548be37a1a45a15fad296c26d 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/blk.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/blk.go
@@ -27,7 +27,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/col"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/wrapper"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 )
 
 type block struct {
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/col/blk.go b/pkg/vm/engine/aoe/storage/layout/table/v1/col/blk.go
index a3326b74460cc3b9501ad2a456bd52d5d24e0b3a..6a329fdba7ef95620e2fc02697f023314b67a17c 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/col/blk.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/col/blk.go
@@ -24,7 +24,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/index"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"sync"
 	"sync/atomic"
 )
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/col/stdblk.go b/pkg/vm/engine/aoe/storage/layout/table/v1/col/stdblk.go
index 6126c4de218d2cbea6b01ac130223681496f7c59..eb8ef24be87ca1de82bf76f37cf3d47d1e472c5c 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/col/stdblk.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/col/stdblk.go
@@ -22,7 +22,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/dbi"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"time"
 )
 
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/data.go b/pkg/vm/engine/aoe/storage/layout/table/v1/data.go
index afd374cc7d28dc589314060f166f54f4da8e28ac..9b1c8bdaae74b370e62bf0cd177d1ba60ef831e8 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/data.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/data.go
@@ -24,7 +24,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/index"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"sync"
 	"sync/atomic"
 	"unsafe"
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/data_test.go b/pkg/vm/engine/aoe/storage/layout/table/v1/data_test.go
index 217f30bce24514b396247f7d9baac895306a9904..c952aab3c49abe17730d534ecf0b85a9272e585d 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/data_test.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/data_test.go
@@ -19,7 +19,7 @@ import (
 	bmgr "matrixone/pkg/vm/engine/aoe/storage/buffer/manager"
 	"matrixone/pkg/vm/engine/aoe/storage/common"
 	ldio "matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"os"
 	"sync"
 	"testing"
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/factory.go b/pkg/vm/engine/aoe/storage/layout/table/v1/factory.go
index d3a2c504aafd566812c3118015f8f2c5b560470a..4ce32df6aa9e77a803160da8519eea912add8f6d 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/factory.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/factory.go
@@ -17,7 +17,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/db/factories/base"
 	fb "matrixone/pkg/vm/engine/aoe/storage/db/factories/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 )
 
 type altBlockFactory struct {
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/handle/types_test.go b/pkg/vm/engine/aoe/storage/layout/table/v1/handle/types_test.go
index 1e39e57e6214bec3454010843dbe615f63e23bde..5ab52c7bf097388508f11b372854dc49e6569804 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/handle/types_test.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/handle/types_test.go
@@ -19,7 +19,7 @@ import (
 	bmgr "matrixone/pkg/vm/engine/aoe/storage/buffer/manager"
 	ldio "matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"os"
 	"sync"
 	"testing"
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/iface/types.go b/pkg/vm/engine/aoe/storage/layout/table/v1/iface/types.go
index 72c72ece46e13d428594c8fb19f683e4f4e7d607..049b6aa3cc41620f890218d6d7ff7ff271eca666 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/iface/types.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/iface/types.go
@@ -25,7 +25,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/dbi"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/index"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	mb "matrixone/pkg/vm/engine/aoe/storage/mutation/base"
 	bb "matrixone/pkg/vm/engine/aoe/storage/mutation/buffer/base"
 )
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/seg.go b/pkg/vm/engine/aoe/storage/layout/table/v1/seg.go
index f4dbc3f30df0df954273bdb06e088f30435c5c52..d919bf5924161bbb3e239c6ce46cb19b678e832a 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/seg.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/seg.go
@@ -22,7 +22,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/index"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"sync"
 	"sync/atomic"
 )
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/tblk.go b/pkg/vm/engine/aoe/storage/layout/table/v1/tblk.go
index 8d42b43a52b1dcd00ef445e08a4f3a49e5aedfb0..83f40541e28ea58d92139aa2cabfc47dc31a74f9 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/tblk.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/tblk.go
@@ -24,7 +24,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/dbi"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/wrapper"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	mb "matrixone/pkg/vm/engine/aoe/storage/mutation/base"
 	bb "matrixone/pkg/vm/engine/aoe/storage/mutation/buffer/base"
 	"runtime"
diff --git a/pkg/vm/engine/aoe/storage/layout/table/v1/tblk_test.go b/pkg/vm/engine/aoe/storage/layout/table/v1/tblk_test.go
index 08e34495ec36db6dc35dbb8794a1e7c5a4c6235b..9b8a3edb280414a1e195bd2af2f42979a944c4f4 100644
--- a/pkg/vm/engine/aoe/storage/layout/table/v1/tblk_test.go
+++ b/pkg/vm/engine/aoe/storage/layout/table/v1/tblk_test.go
@@ -23,7 +23,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/db/factories"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	ldio "matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"matrixone/pkg/vm/engine/aoe/storage/mutation"
 	mb "matrixone/pkg/vm/engine/aoe/storage/mutation/base"
diff --git a/pkg/vm/engine/aoe/storage/memtable/v1/base/types.go b/pkg/vm/engine/aoe/storage/memtable/v1/base/types.go
index 39c308f3a4dbd1382e89ec997b7e7cbdc2790124..f6208db1c9c986aa9ad8ac072f77bdaaa02d3d3a 100644
--- a/pkg/vm/engine/aoe/storage/memtable/v1/base/types.go
+++ b/pkg/vm/engine/aoe/storage/memtable/v1/base/types.go
@@ -19,7 +19,7 @@ import (
 	"matrixone/pkg/container/batch"
 	"matrixone/pkg/vm/engine/aoe/storage/buffer/node/iface"
 	"matrixone/pkg/vm/engine/aoe/storage/common"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"sync"
 )
 
diff --git a/pkg/vm/engine/aoe/storage/memtable/v1/collection.go b/pkg/vm/engine/aoe/storage/memtable/v1/collection.go
index b1a3ee3876e38978262302894a32ca1c77290910..2330030163ce14d53f40c53bc227d9767090e515 100644
--- a/pkg/vm/engine/aoe/storage/memtable/v1/collection.go
+++ b/pkg/vm/engine/aoe/storage/memtable/v1/collection.go
@@ -24,7 +24,7 @@ import (
 	me "matrixone/pkg/vm/engine/aoe/storage/events/meta"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
 	imem "matrixone/pkg/vm/engine/aoe/storage/memtable/v1/base"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"sync"
 )
 
diff --git a/pkg/vm/engine/aoe/storage/memtable/v1/mc_test.go b/pkg/vm/engine/aoe/storage/memtable/v1/mc_test.go
index 1ffe5b25e99939d8a266e72a036fe46d1cedfd45..2f5f1d22728f641f8d771a0dce9de3be86933748 100644
--- a/pkg/vm/engine/aoe/storage/memtable/v1/mc_test.go
+++ b/pkg/vm/engine/aoe/storage/memtable/v1/mc_test.go
@@ -23,7 +23,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/events/meta"
 	ldio "matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"matrixone/pkg/vm/engine/aoe/storage/mutation/buffer"
 	"matrixone/pkg/vm/engine/aoe/storage/testutils/config"
diff --git a/pkg/vm/engine/aoe/storage/memtable/v1/memtable.go b/pkg/vm/engine/aoe/storage/memtable/v1/memtable.go
index 598887246edeaa51e2c5b39a5c8dd8261edb67e1..6726adf06f977ef61cc3aa17eda5749205fdbe7d 100644
--- a/pkg/vm/engine/aoe/storage/memtable/v1/memtable.go
+++ b/pkg/vm/engine/aoe/storage/memtable/v1/memtable.go
@@ -22,7 +22,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/container/batch"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
 	imem "matrixone/pkg/vm/engine/aoe/storage/memtable/v1/base"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/wal/shard"
 	"sync"
 )
diff --git a/pkg/vm/engine/aoe/storage/memtable/v1/mt_test.go b/pkg/vm/engine/aoe/storage/memtable/v1/mt_test.go
index 7d798bb4416a3aaac37776617c0fe73e5565c613..812f69ab167d3a1425b526718b2cb881216edcef 100644
--- a/pkg/vm/engine/aoe/storage/memtable/v1/mt_test.go
+++ b/pkg/vm/engine/aoe/storage/memtable/v1/mt_test.go
@@ -19,7 +19,7 @@ import (
 	dbsched "matrixone/pkg/vm/engine/aoe/storage/db/sched"
 	ldio "matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"matrixone/pkg/vm/engine/aoe/storage/testutils/config"
 	"os"
diff --git a/pkg/vm/engine/aoe/storage/memtable/v1/mutcollection.go b/pkg/vm/engine/aoe/storage/memtable/v1/mutcollection.go
index fa4385414b39e35bf261bc08d59413494fba80c6..882ba9c95d57512231e1cf2be9e111a6a4b6f726 100644
--- a/pkg/vm/engine/aoe/storage/memtable/v1/mutcollection.go
+++ b/pkg/vm/engine/aoe/storage/memtable/v1/mutcollection.go
@@ -23,7 +23,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/base"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
 	imem "matrixone/pkg/vm/engine/aoe/storage/memtable/v1/base"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	mb "matrixone/pkg/vm/engine/aoe/storage/mutation/base"
 	"sync"
 )
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/base.go b/pkg/vm/engine/aoe/storage/metadata/v1/base.go
index f9e51ca3d101ca4075d7b21f2d4bf826a4557fc3..1199d9d2eea8edac2b802a543c7f6358110dba56 100644
--- a/pkg/vm/engine/aoe/storage/metadata/v1/base.go
+++ b/pkg/vm/engine/aoe/storage/metadata/v1/base.go
@@ -10,107 +10,177 @@
 // distributed under the License is distributed on an "AS IS" BASIS,
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
-// limitations under the License.
 
 package metadata
 
 import (
-	"errors"
 	"fmt"
-	"sync/atomic"
-	"time"
+	"matrixone/pkg/vm/engine/aoe/storage/common"
+	"sync"
 )
 
-func NowMicro() int64 {
-	return time.Now().UnixNano() / 1000
+type PPLevel uint8
+
+const (
+	PPL0 PPLevel = iota
+	PPL1
+	PPL2
+)
+
+type BaseEntry struct {
+	sync.RWMutex
+	Id         uint64
+	CommitInfo *CommitInfo
 }
 
-// NewTimeStamp generates a new timestamp created on current time.
-func NewTimeStamp() *TimeStamp {
-	ts := &TimeStamp{
-		CreatedOn: NowMicro(),
+func (e *BaseEntry) GetFirstCommit() *CommitInfo {
+	prev := e.CommitInfo
+	curr := prev.GetNext()
+	for curr != nil {
+		prev = curr.(*CommitInfo)
+		curr = curr.GetNext()
 	}
-	return ts
+	return prev
 }
 
-// Delete deletes ts and set the deleting time to t.
-func (ts *TimeStamp) Delete(t int64) error {
-	val := atomic.LoadInt64(&(ts.DeletedOn))
-	if val != 0 {
-		return errors.New("already deleted")
-	}
-	ok := atomic.CompareAndSwapInt64(&(ts.DeletedOn), val, t)
-	if !ok {
-		return errors.New("already deleted")
-	}
-	return nil
+func (e *BaseEntry) GetCommit() *CommitInfo {
+	e.RLock()
+	defer e.RUnlock()
+	return e.CommitInfo
 }
 
-// IsDeleted checks if ts was deleted on t.
-func (ts *TimeStamp) IsDeleted(t int64) bool {
-	delon := atomic.LoadInt64(&(ts.DeletedOn))
-	if delon != 0 {
-		if delon <= t {
-			return true
-		}
-	}
-	return false
+// Should be guarded
+func (e *BaseEntry) IsFull() bool {
+	return e.CommitInfo.Op == OpUpgradeFull
 }
 
-// IsCreated checks if ts was created on t.
-func (ts *TimeStamp) IsCreated(t int64) bool {
-	return ts.CreatedOn < t
+// Should be guarded
+func (e *BaseEntry) IsClose() bool {
+	return e.CommitInfo.Op == OpUpgradeClose
 }
 
-// Select returns true if ts has been created but not deleted on t.
-func (ts *TimeStamp) Select(t int64) bool {
-	if ts.IsDeleted(t) {
-		return false
-	}
-	return ts.IsCreated(t)
+// Should be guarded
+func (e *BaseEntry) IsSorted() bool {
+	return e.CommitInfo.Op == OpUpgradeSorted
+}
+
+func (e *BaseEntry) onNewCommit(info *CommitInfo) {
+	info.SetNext(e.CommitInfo)
+	e.CommitInfo = info
 }
 
-func (ts *TimeStamp) String() string {
-	s := fmt.Sprintf("ts(%d,%d,%d)", ts.CreatedOn, ts.UpdatedOn, ts.DeletedOn)
+func (e *BaseEntry) PString(level PPLevel) string {
+	s := fmt.Sprintf("Id=%d,%s", e.Id, e.CommitInfo.PString(level))
 	return s
 }
 
-func (state *BoundSate) GetBoundState() BoundSate {
-	return *state
+func (e *BaseEntry) GetAppliedIndex() (uint64, bool) {
+	curr := e.CommitInfo
+	id, ok := curr.GetAppliedIndex()
+	if ok {
+		return id, ok
+	}
+	next := curr.GetNext()
+	for next != nil {
+		id, ok = next.(*CommitInfo).GetAppliedIndex()
+		if ok {
+			return id, ok
+		}
+		next = next.GetNext()
+	}
+	return id, ok
 }
 
-func (state *BoundSate) Detach() error {
-	if *state == Detached || *state == Standalone {
-		return errors.New(fmt.Sprintf("detatched or stalone already: %d", *state))
+// Guarded by entry mutex
+func (e *BaseEntry) HasCommittedLocked() bool {
+	return !IsTransientCommitId(e.CommitInfo.CommitId)
+}
+
+func (e *BaseEntry) HasCommitted() bool {
+	e.RLock()
+	defer e.RUnlock()
+	return !IsTransientCommitId(e.CommitInfo.CommitId)
+}
+
+func (e *BaseEntry) CanUse(tranId uint64) bool {
+	e.RLock()
+	defer e.RUnlock()
+	if e.HasCommittedLocked() && e.CommitInfo.TranId > tranId {
+		return true
 	}
-	*state = Detached
-	return nil
+	return tranId == e.CommitInfo.TranId
+}
+
+func (e *BaseEntry) onCommitted(id uint64) *BaseEntry {
+	if e.CommitInfo.CommitId > id {
+		return nil
+	}
+	be := *e
+	return &be
 }
 
-func (state *BoundSate) Attach() error {
-	if *state == Attached {
-		return errors.New("already attached")
+func (e *BaseEntry) UseCommitted(id uint64) *BaseEntry {
+	e.RLock()
+	defer e.RUnlock()
+	// if e.HasCommittedLocked() {
+	// 	return e.onCommitted(id)
+	// }
+	var curr common.ISSLLNode
+	curr = e.CommitInfo
+	for curr != nil {
+		info := curr.(*CommitInfo)
+		// if info.IsHardDeleted() {
+		// 	return nil
+		// }
+		if !IsTransientCommitId(info.CommitId) && info.CommitId <= id {
+			cInfo := *info
+			return &BaseEntry{
+				Id:         e.Id,
+				CommitInfo: &cInfo,
+			}
+		}
+		curr = curr.GetNext()
 	}
-	*state = Attached
 	return nil
 }
 
-func (seq *Sequence) GetSegmentID() uint64 {
-	return atomic.AddUint64(&(seq.NextSegmentID), uint64(1))
+// Guarded by e.Lock()
+func (e *BaseEntry) IsSoftDeletedLocked() bool {
+	return e.CommitInfo.IsSoftDeleted()
+}
+
+func (e *BaseEntry) IsDeletedLocked() bool {
+	return e.IsSoftDeletedLocked() || e.IsHardDeletedLocked()
 }
 
-func (seq *Sequence) GetBlockID() uint64 {
-	return atomic.AddUint64(&(seq.NextBlockID), uint64(1))
+func (e *BaseEntry) IsDeleted() bool {
+	e.RLock()
+	defer e.RUnlock()
+	return e.IsSoftDeletedLocked() || e.IsHardDeletedLocked()
 }
 
-func (seq *Sequence) GetTableID() uint64 {
-	return atomic.AddUint64(&(seq.NextTableID), uint64(1))
+func (e *BaseEntry) IsSoftDeleted() bool {
+	e.RLock()
+	defer e.RUnlock()
+	return e.CommitInfo.IsSoftDeleted()
 }
 
-func (seq *Sequence) GetPartitionID() uint64 {
-	return atomic.AddUint64(&(seq.NextPartitionID), uint64(1))
+func (e *BaseEntry) IsHardDeletedLocked() bool {
+	return e.CommitInfo.IsHardDeleted()
 }
 
-func (seq *Sequence) GetIndexID() uint64 {
-	return atomic.AddUint64(&(seq.NextIndexID), uint64(1))
+func (e *BaseEntry) IsHardDeleted() bool {
+	e.RLock()
+	defer e.RUnlock()
+	return e.CommitInfo.IsHardDeleted()
+}
+
+func (e *BaseEntry) CommitLocked(id uint64) {
+	if IsTransientCommitId(id) {
+		panic(fmt.Sprintf("Cannot commit transient id %d", id))
+	}
+	if e.HasCommittedLocked() {
+		panic(fmt.Sprintf("Cannot commit committed entry"))
+	}
+	e.CommitInfo.CommitId = id
 }
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/base_test.go b/pkg/vm/engine/aoe/storage/metadata/v1/base_test.go
deleted file mode 100644
index 3e0f28425eb94d016f6f525e1e773c1fec4c2746..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v1/base_test.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metadata
-
-import (
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func TestBase(t *testing.T) {
-	ts1 := NewTimeStamp()
-	time.Sleep(time.Microsecond)
-	assert.False(t, ts1.IsDeleted(NowMicro()))
-	assert.True(t, ts1.IsCreated(NowMicro()))
-	assert.True(t, ts1.Select(NowMicro()))
-	assert.Nil(t, ts1.Delete(NowMicro()))
-	assert.True(t, ts1.IsDeleted(NowMicro()))
-	assert.False(t, ts1.Select(NowMicro()))
-	assert.NotNil(t, ts1.Delete(NowMicro()))
-	t.Log(ts1.String())
-	//assert.Equal(t, )
-}
-
-func TestBoundState(t *testing.T) {
-	bs := Standalone
-	assert.Equal(t, Standalone, bs.GetBoundState())
-	assert.Nil(t, bs.Attach())
-	assert.NotNil(t, bs.Attach())
-	assert.Nil(t, bs.Detach())
-	assert.NotNil(t, bs.Detach())
-}
-
-func TestSequence(t *testing.T) {
-	seq1 := Sequence{
-		NextBlockID:     0,
-		NextSegmentID:   0,
-		NextPartitionID: 0,
-		NextTableID:     0,
-		NextIndexID:     0,
-	}
-	assert.Equal(t, uint64(1), seq1.GetBlockID())
-	assert.Equal(t, uint64(1), seq1.GetIndexID())
-	assert.Equal(t, uint64(1), seq1.GetPartitionID())
-	assert.Equal(t, uint64(1), seq1.GetSegmentID())
-	assert.Equal(t, uint64(1), seq1.GetTableID())
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/block.go b/pkg/vm/engine/aoe/storage/metadata/v1/block.go
index 0e541bd21476023ac33b0428a7ebfc7904d8e414..e6606df46df540f3d924ba08fc0199985251942b 100644
--- a/pkg/vm/engine/aoe/storage/metadata/v1/block.go
+++ b/pkg/vm/engine/aoe/storage/metadata/v1/block.go
@@ -10,7 +10,6 @@
 // distributed under the License is distributed on an "AS IS" BASIS,
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
-// limitations under the License.
 
 package metadata
 
@@ -18,218 +17,282 @@ import (
 	"encoding/json"
 	"errors"
 	"fmt"
-	"matrixone/pkg/container/types"
 	"matrixone/pkg/vm/engine/aoe/storage/common"
+	"matrixone/pkg/vm/engine/aoe/storage/logstore"
+	"runtime"
+	"sync"
 	"sync/atomic"
 )
 
-func EstimateColumnBlockSize(colIdx int, meta *Block) uint64 {
-	switch meta.Segment.Table.Schema.ColDefs[colIdx].Type.Oid {
-	case types.T_json, types.T_char, types.T_varchar:
-		return meta.Segment.Table.Conf.BlockMaxRows * 2 * 4
-	default:
-		return meta.Segment.Table.Conf.BlockMaxRows * uint64(meta.Segment.Table.Schema.ColDefs[colIdx].Type.Size)
-	}
+var (
+	UpgradeInfullBlockErr = errors.New("aoe: upgrade infull block")
+)
+
+type blockLogEntry struct {
+	BaseEntry
+	Catalog   *Catalog `json:"-"`
+	TableId   uint64
+	SegmentId uint64
 }
 
-func EstimateBlockSize(meta *Block) uint64 {
-	size := uint64(0)
-	for colIdx, _ := range meta.Segment.Table.Schema.ColDefs {
-		size += EstimateColumnBlockSize(colIdx, meta)
-	}
-	return size
+func (e *blockLogEntry) Marshal() ([]byte, error) {
+	return json.Marshal(e)
 }
 
-func NewBlock(id uint64, segment *Segment) *Block {
-	blk := &Block{
-		ID:          id,
-		TimeStamp:   *NewTimeStamp(),
-		MaxRowCount: segment.Table.Conf.BlockMaxRows,
-		Segment:     segment,
-	}
-	return blk
+func (e *blockLogEntry) Unmarshal(buf []byte) error {
+	return json.Unmarshal(buf, e)
 }
 
-// GetReplayIndex returns the clone of replay index for this block if exists.
-func (blk *Block) GetReplayIndex() *LogIndex {
-	if blk.Index == nil {
-		return nil
+func (e *blockLogEntry) ToEntry() *Block {
+	entry := &Block{
+		BaseEntry: e.BaseEntry,
 	}
-	ctx := &LogIndex{
-		ID:       blk.Index.ID,
-		Start:    blk.Index.Start,
-		Count:    blk.Index.Count,
-		Capacity: blk.Index.Capacity,
-	}
-	return ctx
+	table := e.Catalog.TableSet[e.TableId]
+	entry.Segment = table.GetSegment(e.SegmentId, MinUncommitId)
+	return entry
 }
 
-// GetAppliedIndex returns ID of the applied index (previous index or the current
-// index if it's applied) and true if exists, otherwise returns 0 and false.
-func (blk *Block) GetAppliedIndex() (uint64, bool) {
-	blk.RLock()
-	defer blk.RUnlock()
-	if blk.Index != nil && blk.Index.IsBatchApplied() {
-		return blk.Index.ID.Id, true
-	}
+type Block struct {
+	BaseEntry
+	Segment     *Segment `json:"-"`
+	Count       uint64
+	SegmentedId uint64
+}
 
-	if blk.PrevIndex != nil && blk.PrevIndex.IsBatchApplied() {
-		return blk.PrevIndex.ID.Id, true
+func newBlockEntry(segment *Segment, tranId uint64, exIndex *ExternalIndex) *Block {
+	e := &Block{
+		Segment: segment,
+		BaseEntry: BaseEntry{
+			Id: segment.Table.Catalog.NextBlockId(),
+			CommitInfo: &CommitInfo{
+				CommitId:      tranId,
+				TranId:        tranId,
+				SSLLNode:      *common.NewSSLLNode(),
+				Op:            OpCreate,
+				ExternalIndex: exIndex,
+			},
+		},
 	}
+	return e
+}
 
-	return 0, false
+func newCommittedBlockEntry(segment *Segment, base *BaseEntry) *Block {
+	e := &Block{
+		Segment:   segment,
+		BaseEntry: *base,
+	}
+	return e
 }
 
-func (blk *Block) GetID() uint64 {
-	return blk.ID
+func (e *Block) View() (view *Block) {
+	e.RLock()
+	view = &Block{
+		BaseEntry:   BaseEntry{Id: e.Id, CommitInfo: e.CommitInfo},
+		Segment:     e.Segment,
+		Count:       e.Count,
+		SegmentedId: e.SegmentedId,
+	}
+	e.RUnlock()
+	return
 }
 
-func (blk *Block) TryUpgrade() bool {
-	blk.Lock()
-	defer blk.Unlock()
-	if blk.Count == blk.MaxRowCount {
-		blk.DataState = FULL
+// Safe
+func (e *Block) Less(o *Block) bool {
+	if e == nil {
 		return true
 	}
-	return false
+	return e.Id < o.Id
+}
+
+func (e *Block) rebuild(segment *Segment) {
+	e.Segment = segment
 }
 
-func (blk *Block) GetSegmentID() uint64 {
-	return blk.Segment.ID
+// Safe
+func (e *Block) AsCommonID() *common.ID {
+	return &common.ID{
+		TableID:   e.Segment.Table.Id,
+		SegmentID: e.Segment.Id,
+		BlockID:   e.Id,
+	}
 }
 
-func (blk *Block) GetCount() uint64 {
-	return atomic.LoadUint64(&blk.Count)
+// Not safe
+// One writer, multi-readers
+func (e *Block) SetSegmentedId(id uint64) error {
+	atomic.StoreUint64(&e.SegmentedId, id)
+	return nil
 }
 
-func (blk *Block) AddCount(n uint64) (uint64, error) {
-	curCnt := blk.GetCount()
-	if curCnt+n > blk.Segment.Table.Conf.BlockMaxRows {
-		return 0, errors.New(fmt.Sprintf("block row count %d > block max rows %d", curCnt+n, blk.Segment.Table.Conf.BlockMaxRows))
+// Safe
+func (e *Block) GetAppliedIndex(rwmtx *sync.RWMutex) (uint64, bool) {
+	if rwmtx == nil {
+		e.RLock()
+		defer e.RUnlock()
 	}
-	for !atomic.CompareAndSwapUint64(&blk.Count, curCnt, curCnt+n) {
-		curCnt = blk.GetCount()
-		if curCnt+n > blk.Segment.Table.Conf.BlockMaxRows {
-			return 0, errors.New(fmt.Sprintf("block row count %d > block max rows %d", curCnt+n, blk.Segment.Table.Conf.BlockMaxRows))
+	if !e.IsFull() {
+		id := atomic.LoadUint64(&e.SegmentedId)
+		if id == 0 {
+			return id, false
 		}
+		return id, true
 	}
-	return curCnt + n, nil
+	return e.BaseEntry.GetAppliedIndex()
 }
 
-// SetIndex changes the current index to previous index if exists, and
-// sets the current index to idx.
-func (blk *Block) SetIndex(idx LogIndex) error {
-	blk.Lock()
-	defer blk.Unlock()
-	if blk.Index != nil {
-		if !blk.Index.IsApplied() {
-			return errors.New(fmt.Sprintf("block already has applied index: %s", blk.Index.ID.String()))
-		}
-		blk.PrevIndex = blk.Index
-		blk.Index = &idx
-	} else {
-		if blk.PrevIndex != nil {
-			return errors.New(fmt.Sprintf("block has no index but has prev index: %s", blk.PrevIndex.ID.String()))
-		}
-		blk.Index = &idx
-	}
-	return nil
+// Not safe
+func (e *Block) HasMaxRows() bool {
+	return e.Count == e.Segment.Table.Schema.BlockMaxRows
 }
 
-func (blk *Block) String() string {
-	s := fmt.Sprintf("Blk(%d-%d-%d)(DataState=%d)", blk.Segment.Table.ID, blk.Segment.ID, blk.ID, blk.DataState)
-	if blk.IsDeleted(NowMicro()) {
-		s += "[D]"
-	}
-	if blk.Count == blk.MaxRowCount {
-		s += "[F]"
+// Not safe
+func (e *Block) SetIndex(idx LogIndex) error {
+	return e.CommitInfo.SetIndex(idx)
+}
+
+// Not safe
+// TODO: should be safe
+func (e *Block) GetCount() uint64 {
+	if e.IsFull() {
+		return e.Segment.Table.Schema.BlockMaxRows
 	}
-	return s
+	return atomic.LoadUint64(&e.Count)
 }
 
-func (blk *Block) IsFull() bool {
-	return blk.Count == blk.MaxRowCount
+// Not safe
+// TODO: should be safe
+func (e *Block) AddCount(n uint64) (uint64, error) {
+	curCnt := e.GetCount()
+	if curCnt+n > e.Segment.Table.Schema.BlockMaxRows {
+		return 0, errors.New(fmt.Sprintf("block row count %d > block max rows %d", curCnt+n, e.Segment.Table.Schema.BlockMaxRows))
+	}
+	for !atomic.CompareAndSwapUint64(&e.Count, curCnt, curCnt+n) {
+		runtime.Gosched()
+		curCnt = e.GetCount()
+		if curCnt+n > e.Segment.Table.Schema.BlockMaxRows {
+			return 0, errors.New(fmt.Sprintf("block row count %d > block max rows %d", curCnt+n, e.Segment.Table.Schema.BlockMaxRows))
+		}
+	}
+	return curCnt + n, nil
 }
 
-// SetCount sets blk row count to count, changing its
-// DataState if needed.
-func (blk *Block) SetCount(count uint64) error {
-	blk.Lock()
-	defer blk.Unlock()
-	if count > blk.MaxRowCount {
+// TODO: remove it. Should not needed
+func (e *Block) SetCount(count uint64) error {
+	if count > e.Segment.Table.Schema.BlockMaxRows {
 		return errors.New("SetCount exceeds max limit")
 	}
-	if count < blk.Count {
+	if count < e.Count {
 		return errors.New("SetCount cannot set smaller count")
 	}
-	blk.Count = count
-	if count < blk.MaxRowCount {
-		blk.DataState = PARTIAL
-	} else {
-		blk.DataState = FULL
-	}
+	e.Count = count
 	return nil
 }
 
-// Update upgrades the current blk to target block if possible.
-func (blk *Block) Update(target *Block) error {
-	blk.Lock()
-	defer blk.Unlock()
-	if blk.ID != target.ID || blk.Segment.ID != target.Segment.ID || blk.Segment.Table.ID != target.Segment.Table.ID {
-		return errors.New("block, segment, table id not matched")
+// Safe
+func (e *Block) CommittedView(id uint64) *Block {
+	baseEntry := e.UseCommitted(id)
+	if baseEntry == nil {
+		return nil
 	}
-
-	if blk.MaxRowCount != target.MaxRowCount {
-		return errors.New("update block MaxRowCount not matched")
+	return &Block{
+		BaseEntry: *baseEntry,
 	}
+}
+
+// Safe
+func (e *Block) SimpleUpgrade(exIndice []*ExternalIndex) error {
+	ctx := newUpgradeBlockCtx(e, exIndice)
+	return e.Segment.Table.Catalog.onCommitRequest(ctx)
+	// return e.Upgrade(e.Segment.Table.Catalog.NextUncommitId(), exIndice, true)
+}
 
-	if blk.DataState > target.DataState {
-		return errors.New(fmt.Sprintf("Cannot Update block from DataState %d to %d", blk.DataState, target.DataState))
+// func (e *Block) Upgrade(tranId uint64, exIndice []*ExternalIndex, autoCommit bool) error {
+func (e *Block) prepareUpgrade(ctx *upgradeBlockCtx) (LogEntry, error) {
+	if e.GetCount() != e.Segment.Table.Schema.BlockMaxRows {
+		return nil, UpgradeInfullBlockErr
+	}
+	tranId := e.Segment.Table.Catalog.NextUncommitId()
+	e.Lock()
+	defer e.Unlock()
+	var newOp OpT
+	switch e.CommitInfo.Op {
+	case OpCreate:
+		newOp = OpUpgradeFull
+	default:
+		return nil, UpgradeNotNeededErr
+	}
+	cInfo := &CommitInfo{
+		TranId:   tranId,
+		CommitId: tranId,
+		Op:       newOp,
 	}
+	if ctx.exIndice != nil {
+		cInfo.ExternalIndex = ctx.exIndice[0]
+		if len(ctx.exIndice) > 1 {
+			cInfo.PrevIndex = ctx.exIndice[1]
+		}
+	} else {
+		cInfo.ExternalIndex = e.CommitInfo.ExternalIndex
+		id, ok := e.BaseEntry.GetAppliedIndex()
+		if ok {
+			cInfo.AppliedIndex = &ExternalIndex{
+				Id: SimpleBatchId(id),
+			}
+		}
+	}
+	e.onNewCommit(cInfo)
+	logEntry := e.Segment.Catalog.prepareCommitEntry(e, ETUpgradeBlock, e)
+	return logEntry, nil
+}
 
-	if blk.Count > target.Count {
-		return errors.New(fmt.Sprintf("Cannot Update block from Count %d to %d", blk.Count, target.Count))
+func (e *Block) toLogEntry() *blockLogEntry {
+	return &blockLogEntry{
+		BaseEntry: e.BaseEntry,
+		Catalog:   e.Segment.Catalog,
+		TableId:   e.Segment.Table.Id,
+		SegmentId: e.Segment.Id,
 	}
-	target.copyNoLock(blk)
-	blk.Segment.Table.UpdateVersion()
+}
 
-	return nil
+func (e *Block) Marshal() ([]byte, error) {
+	return json.Marshal(e)
 }
 
-// AsCommonID generates the unique commonID for the block.
-func (blk *Block) AsCommonID() *common.ID {
-	return &common.ID{
-		TableID:   blk.Segment.Table.ID,
-		SegmentID: blk.Segment.ID,
-		BlockID:   blk.ID,
-	}
+func (e *Block) Unmarshal(buf []byte) error {
+	return json.Unmarshal(buf, e)
 }
 
-func (blk *Block) Marshal() ([]byte, error) {
-	return json.Marshal(blk)
+// Not safe
+func (e *Block) PString(level PPLevel) string {
+	s := fmt.Sprintf("<Block %s>", e.BaseEntry.PString(level))
+	return s
 }
 
-func (blk *Block) Copy() *Block {
-	blk.RLock()
-	defer blk.RUnlock()
-	var newBlk *Block
-	newBlk = blk.copyNoLock(newBlk)
-	return newBlk
+// Not safe
+func (e *Block) String() string {
+	buf, _ := e.Marshal()
+	return string(buf)
 }
 
-func (blk *Block) copyNoLock(newBlk *Block) *Block {
-	if newBlk == nil {
-		newBlk = NewBlock(blk.ID, blk.Segment)
+// Not safe
+func (e *Block) ToLogEntry(eType LogEntryType) LogEntry {
+	switch eType {
+	case ETCreateBlock:
+		break
+	case ETUpgradeBlock:
+		break
+	case ETDropBlock:
+		if !e.IsSoftDeletedLocked() {
+			panic("logic error")
+		}
+		break
+	default:
+		panic("not supported")
 	}
-	newBlk.Segment = blk.Segment
-	newBlk.ID = blk.ID
-	newBlk.TimeStamp = blk.TimeStamp
-	newBlk.MaxRowCount = blk.MaxRowCount
-	newBlk.BoundSate = blk.BoundSate
-	newBlk.Count = blk.Count
-	newBlk.Index = blk.Index
-	newBlk.PrevIndex = blk.PrevIndex
-	newBlk.DataState = blk.DataState
-
-	return newBlk
+	entry := e.toLogEntry()
+	buf, _ := entry.Marshal()
+	logEntry := logstore.NewAsyncBaseEntry()
+	logEntry.Meta.SetType(eType)
+	logEntry.Unmarshal(buf)
+	return logEntry
 }
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/block_test.go b/pkg/vm/engine/aoe/storage/metadata/v1/block_test.go
deleted file mode 100644
index 957274890296a8e7c761c034ca71a90262d0f7c7..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v1/block_test.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metadata
-
-import (
-	"sync"
-	"testing"
-	"time"
-
-	"github.com/panjf2000/ants/v2"
-	"github.com/stretchr/testify/assert"
-)
-
-func TestBlock(t *testing.T) {
-	mu := &sync.RWMutex{}
-	ts1 := NowMicro()
-	time.Sleep(time.Microsecond)
-	info1 := MockInfo(mu, 2, 2)
-	info2 := MockInfo(mu, 1000, 4)
-	schema1 := MockSchema(2)
-	schema2 := MockVarCharSchema(2)
-
-	tbl1 := NewTable(NextGlobalSeqNum(), info1, schema1, 1)
-	seg1 := NewSegment(tbl1, info1.Sequence.GetSegmentID())
-	blk1 := NewBlock(info1.Sequence.GetBlockID(), seg1)
-	time.Sleep(time.Duration(1) * time.Microsecond)
-	ts2 := NowMicro()
-
-	tbl2 := NewTable(NextGlobalSeqNum(), info2, schema2, 2)
-	seg2 := NewSegment(tbl2, info2.Sequence.GetSegmentID())
-	blk2 := NewBlock(info2.Sequence.GetBlockID(), seg2)
-	time.Sleep(time.Duration(1) * time.Microsecond)
-	ts3 := NowMicro()
-	assert.Equal(t, "Blk(1-1-1)(DataState=0)", blk1.String())
-
-	assert.False(t, blk1.Select(ts1))
-	assert.True(t, blk1.Select(ts2))
-	assert.Nil(t, blk1.Delete(ts3))
-
-	time.Sleep(time.Duration(1) * time.Microsecond)
-	ts4 := NowMicro()
-	assert.False(t, blk1.IsFull())
-	assert.Nil(t, blk1.SetCount(1))
-	assert.False(t, blk1.TryUpgrade())
-	_, err := blk1.AddCount(1)
-	assert.Nil(t, err)
-	_, err = blk1.AddCount(1)
-	assert.NotNil(t, err)
-	assert.True(t, blk1.TryUpgrade())
-	assert.True(t, blk1.IsFull())
-	assert.Equal(t, "Blk(1-1-1)(DataState=2)[D][F]", blk1.String())
-	assert.True(t, blk1.IsDeleted(ts4))
-	assert.False(t, blk1.Select(ts4))
-	assert.True(t, blk1.Select(ts2))
-
-	assert.Equal(t, uint64(1), blk2.GetID())
-	assert.Equal(t, uint64(1), blk2.GetSegmentID())
-	assert.Equal(t, Standalone, blk2.GetBoundState())
-
-	assert.Equal(t, blk1.MaxRowCount*4*2, EstimateBlockSize(blk1))
-
-	assert.Nil(t, blk2.GetReplayIndex())
-	_, has := blk2.GetAppliedIndex()
-	assert.False(t, has)
-
-	blk3 := blk2.Copy()
-	assert.NotNil(t, blk1.Update(blk3))
-	blk3.Segment.Table.ID = blk1.Segment.Table.ID
-	assert.NotNil(t, blk1.Update(blk3))
-	blk3.MaxRowCount = blk1.MaxRowCount
-	assert.NotNil(t, blk1.Update(blk3))
-	blk3.DataState = blk1.DataState
-	assert.NotNil(t, blk1.Update(blk3))
-	assert.Nil(t, blk3.SetCount(blk1.GetCount()))
-	assert.Nil(t, blk1.Update(blk3))
-
-	assert.Nil(t, blk2.SetCount(1))
-	assert.Equal(t, blk2.DataState, PARTIAL)
-
-	assert.Equal(t, blk2.ID, blk2.copyNoLock(nil).ID)
-	assert.NotNil(t, blk2.SetCount(0))
-	assert.NotNil(t, blk2.SetCount(88888))
-	assert.Nil(t, blk2.SetCount(1000))
-	assert.Equal(t, blk2.DataState, FULL)
-
-	assert.Equal(t, blk2.ID, blk2.AsCommonID().BlockID)
-	n, _ := blk1.Marshal()
-	t.Log(string(n))
-	assert.Equal(t, 149, len(n))
-
-	assert.Equal(t, blk2.MaxRowCount*8*2, EstimateBlockSize(blk2))
-
-	blk4 := NewBlock(info2.Sequence.GetBlockID(), seg2)
-	pool, _ := ants.NewPool(20)
-	var wg sync.WaitGroup
-	for i := 0; i < 1000; i++ {
-		wg.Add(1)
-		pool.Submit(func() {
-			blk4.AddCount(1)
-			wg.Done()
-		})
-	}
-	wg.Wait()
-	assert.Equal(t, uint64(1000), blk4.GetCount())
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/catalog.go b/pkg/vm/engine/aoe/storage/metadata/v1/catalog.go
similarity index 100%
rename from pkg/vm/engine/aoe/storage/metadata/v2/catalog.go
rename to pkg/vm/engine/aoe/storage/metadata/v1/catalog.go
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/commit.go b/pkg/vm/engine/aoe/storage/metadata/v1/commit.go
similarity index 100%
rename from pkg/vm/engine/aoe/storage/metadata/v2/commit.go
rename to pkg/vm/engine/aoe/storage/metadata/v1/commit.go
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/ctx.go b/pkg/vm/engine/aoe/storage/metadata/v1/ctx.go
similarity index 100%
rename from pkg/vm/engine/aoe/storage/metadata/v2/ctx.go
rename to pkg/vm/engine/aoe/storage/metadata/v1/ctx.go
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/exindex.go b/pkg/vm/engine/aoe/storage/metadata/v1/exindex.go
similarity index 100%
rename from pkg/vm/engine/aoe/storage/metadata/v2/exindex.go
rename to pkg/vm/engine/aoe/storage/metadata/v1/exindex.go
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/info.go b/pkg/vm/engine/aoe/storage/metadata/v1/info.go
deleted file mode 100644
index 472d19944f1dfed4ebe334e1a368e98366cf2175..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v1/info.go
+++ /dev/null
@@ -1,375 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metadata
-
-import (
-	"encoding/json"
-	dump "encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"matrixone/pkg/vm/engine/aoe"
-	"matrixone/pkg/vm/engine/aoe/storage/dbi"
-	"sync"
-	"sync/atomic"
-
-	"github.com/google/btree"
-)
-
-func NewMetaInfo(mu *sync.RWMutex, conf *Configuration) *MetaInfo {
-	info := &MetaInfo{
-		RWMutex:   mu,
-		Tables:    make(map[uint64]*Table),
-		Conf:      conf,
-		TableIds:  make(map[uint64]bool),
-		NameMap:   make(map[string]uint64),
-		NameTree:  btree.New(2),
-		Tombstone: make(map[uint64]bool),
-	}
-	return info
-}
-
-func MockInfo(mu *sync.RWMutex, blkRows, blks uint64) *MetaInfo {
-	info := NewMetaInfo(mu, &Configuration{
-		BlockMaxRows:     blkRows,
-		SegmentMaxBlocks: blks,
-	})
-	return info
-}
-
-// SoftDeleteTable deletes the given table in meta space and marks it, but not deletes physically.
-func (info *MetaInfo) SoftDeleteTable(name string, logIndex uint64) (id uint64, err error) {
-	id, ok := info.NameMap[name]
-	if !ok {
-		return id, errors.New(fmt.Sprintf("Table %s not existed", name))
-	}
-	ts := NowMicro()
-	info.Lock()
-	defer info.Unlock()
-	delete(info.NameMap, name)
-	info.Tombstone[id] = true
-	table := info.Tables[id]
-	info.NameTree.Delete(table)
-	if err := table.Delete(ts); err != nil {
-		return 0, err
-	}
-	table.LogHistory.DeletedIndex = logIndex
-	atomic.AddUint64(&info.CheckPoint, uint64(1))
-	table.UpdateVersion()
-	return id, nil
-}
-
-func (info *MetaInfo) ReferenceTableByName(name string) (tbl *Table, err error) {
-	info.RLock()
-	defer info.RUnlock()
-	id, ok := info.NameMap[name]
-	if !ok {
-		return nil, errors.New(fmt.Sprintf("specified table %s not found in info", name))
-	}
-	return info.Tables[id], nil
-}
-
-func (info *MetaInfo) ReferenceTable(tableId uint64) (tbl *Table, err error) {
-	info.RLock()
-	defer info.RUnlock()
-	tbl, ok := info.Tables[tableId]
-	if !ok {
-		return nil, errors.New(fmt.Sprintf("specified table %d not found in info", tableId))
-	}
-	return tbl, nil
-}
-
-func (info *MetaInfo) ReferenceBlock(tableId, segmentId, blockId uint64) (blk *Block, err error) {
-	info.RLock()
-	tbl, ok := info.Tables[tableId]
-	if !ok {
-		info.RUnlock()
-		return nil, errors.New(fmt.Sprintf("specified table %d not found in info", tableId))
-	}
-	info.RUnlock()
-	blk, err = tbl.ReferenceBlock(segmentId, blockId)
-
-	return blk, err
-}
-
-func (info *MetaInfo) TableSegmentIDs(tableID uint64, args ...int64) (ids map[uint64]uint64, err error) {
-	info.RLock()
-	tbl, ok := info.Tables[tableID]
-	info.RUnlock()
-	if !ok {
-		return ids, errors.New(fmt.Sprintf("Specified table %d not found", tableID))
-	}
-	var ts int64
-	if len(args) == 0 {
-		ts = NowMicro()
-	} else {
-		ts = args[0]
-	}
-	ids = tbl.SegmentIDs(ts)
-	return ids, err
-}
-
-func (info *MetaInfo) UpdateCheckpointTime(ts int64) {
-	curr := atomic.LoadInt64(&info.CkpTime)
-	for curr < ts {
-		if atomic.CompareAndSwapInt64(&info.CkpTime, curr, ts) {
-			return
-		}
-		curr = atomic.LoadInt64(&info.CkpTime)
-	}
-}
-
-func (info *MetaInfo) GetCheckpointTime() int64 {
-	return atomic.LoadInt64(&info.CkpTime)
-}
-
-func (info *MetaInfo) GetTablesByNamePrefix(prefix string) (tbls []*Table) {
-	ts := NowMicro()
-	upperBound := []byte(prefix)
-	upperBound = append(upperBound, byte(255))
-	info.RLock()
-	defer info.RUnlock()
-	info.NameTree.AscendRange(
-		&Table{Schema: &Schema{Name: prefix}},
-		&Table{Schema: &Schema{Name: string(upperBound)}},
-		func(item btree.Item) bool {
-			t := item.(*Table)
-			if !t.Select(ts) {
-				return false
-			}
-			tbls = append(tbls, t)
-			return true
-		})
-	return tbls
-}
-
-func (info *MetaInfo) TableNames(args ...int64) []string {
-	var ts int64
-	if len(args) == 0 {
-		ts = NowMicro()
-	} else {
-		ts = args[0]
-	}
-	names := make([]string, 0)
-	info.RLock()
-	defer info.RUnlock()
-	for _, t := range info.Tables {
-		if !t.Select(ts) {
-			continue
-		}
-		names = append(names, t.Schema.Name)
-	}
-	return names
-}
-
-func (info *MetaInfo) TableIDs(args ...int64) map[uint64]uint64 {
-	var ts int64
-	if len(args) == 0 {
-		ts = NowMicro()
-	} else {
-		ts = args[0]
-	}
-	ids := make(map[uint64]uint64)
-	info.RLock()
-	defer info.RUnlock()
-	for _, t := range info.Tables {
-		if !t.Select(ts) {
-			continue
-		}
-		ids[t.GetID()] = t.GetID()
-	}
-	return ids
-}
-
-func (info *MetaInfo) CreateTable(logIdx uint64, schema *Schema) (tbl *Table, err error) {
-	if !schema.Valid() {
-		return nil, errors.New("invalid schema")
-	}
-	tbl = NewTable(logIdx, info, schema, info.Sequence.GetTableID())
-	return tbl, err
-}
-
-func (info *MetaInfo) UpdateCheckpoint(id uint64) error {
-	if !atomic.CompareAndSwapUint64(&info.CheckPoint, id-1, id) {
-		return errors.New(fmt.Sprintf("Cannot update checkpoint from %d to %d", info.CheckPoint, id))
-	}
-	return nil
-}
-
-func (info *MetaInfo) String() string {
-	s := fmt.Sprintf("Info(ck=%d)", info.CheckPoint)
-	s += "["
-	for i, t := range info.Tables {
-		if i != 0 {
-			s += "\n"
-		}
-		s += t.String()
-	}
-	if len(info.Tables) > 0 {
-		s += "\n"
-	}
-	s += "]"
-	return s
-}
-
-// RegisterTable add an existing table meta into meta info space.
-func (info *MetaInfo) RegisterTable(tbl *Table) error {
-	info.Lock()
-	defer info.Unlock()
-
-	_, ok := info.Tables[tbl.ID]
-	if ok {
-		return errors.New(fmt.Sprintf("Duplicate table %d found in info", tbl.ID))
-	}
-	_, ok = info.NameMap[tbl.Schema.Name]
-	if ok {
-		return errors.New(fmt.Sprintf("Duplicate table %s found in info", tbl.Schema.Name))
-	}
-	err := tbl.Attach()
-	if err != nil {
-		return err
-	}
-
-	info.Tables[tbl.ID] = tbl
-	info.NameMap[tbl.Schema.Name] = tbl.ID
-	info.NameTree.ReplaceOrInsert(tbl)
-	info.TableIds[tbl.ID] = true
-	atomic.AddUint64(&info.CheckPoint, uint64(1))
-	return nil
-}
-
-// CreateTableFromTableInfo creates a new table meta with the given table info.
-func (info *MetaInfo) CreateTableFromTableInfo(tinfo *aoe.TableInfo, ctx dbi.TableOpCtx) (*Table, error) {
-	schema := &Schema{
-		Name:      tinfo.Name,
-		ColDefs:   make([]*ColDef, 0),
-		Indices:   make([]*IndexInfo, 0),
-		NameIdMap: make(map[string]int),
-	}
-	for idx, colInfo := range tinfo.Columns {
-		newInfo := &ColDef{
-			Name: colInfo.Name,
-			Idx:  idx,
-			Type: colInfo.Type,
-		}
-		schema.NameIdMap[newInfo.Name] = len(schema.ColDefs)
-		schema.ColDefs = append(schema.ColDefs, newInfo)
-	}
-	for _, indexInfo := range tinfo.Indices {
-		newInfo := &IndexInfo{
-			ID:      info.Sequence.GetIndexID(),
-			Type:    IndexType(indexInfo.Type),
-			Columns: make([]uint16, 0),
-		}
-		for _, col := range indexInfo.Columns {
-			newInfo.Columns = append(newInfo.Columns, uint16(col))
-		}
-		schema.Indices = append(schema.Indices, newInfo)
-	}
-	tbl, err := info.CreateTable(ctx.OpIndex, schema)
-	if err != nil {
-		return nil, err
-	}
-	err = info.RegisterTable(tbl)
-	if err != nil {
-		return nil, err
-	}
-	return tbl, nil
-}
-
-func (info *MetaInfo) GetLastFileName() string {
-	return fmt.Sprintf("%d", info.CheckPoint-1)
-}
-
-func (info *MetaInfo) GetFileName() string {
-	return fmt.Sprintf("%d", info.CheckPoint)
-}
-
-func (info *MetaInfo) GetResourceType() ResourceType {
-	return ResInfo
-}
-
-func (info *MetaInfo) Unmarshal(buf []byte) error {
-	type Alias MetaInfo
-	v := &struct {
-		*Alias
-		Tables map[uint64]GenericTableWrapper
-	}{
-		Alias: (*Alias)(info),
-	}
-	err := json.Unmarshal(buf, v)
-	if err != nil {
-		return err
-	}
-	info.Tables = make(map[uint64]*Table)
-	for _, wrapped := range v.Tables {
-		info.Tables[wrapped.ID] = &Table{ID: wrapped.ID, TimeStamp: wrapped.TimeStamp}
-	}
-	return nil
-}
-
-func (info *MetaInfo) MarshalJSON() ([]byte, error) {
-	tables := make(map[uint64]GenericTableWrapper)
-	for _, tbl := range info.Tables {
-		tables[tbl.ID] = GenericTableWrapper{
-			ID:         tbl.ID,
-			TimeStamp:  tbl.TimeStamp,
-			LogHistory: tbl.LogHistory,
-		}
-	}
-	type Alias MetaInfo
-	return json.Marshal(&struct {
-		Tables map[uint64]GenericTableWrapper
-		*Alias
-	}{
-		Tables: tables,
-		Alias:  (*Alias)(info),
-	})
-}
-
-func (info *MetaInfo) ReadFrom(r io.Reader) (int64, error) {
-	decoder := dump.NewDecoder(r)
-	err := decoder.Decode(info)
-	return decoder.InputOffset(), err
-}
-
-func (info *MetaInfo) GetTableId() uint64 {
-	panic("logic error")
-}
-
-func (info *MetaInfo) Copy(ctx CopyCtx) *MetaInfo {
-	if ctx.Ts == 0 {
-		ctx.Ts = NowMicro()
-	}
-	newInfo := NewMetaInfo(info.RWMutex, info.Conf)
-	newInfo.CheckPoint = info.CheckPoint
-	newInfo.CkpTime = ctx.Ts
-	for k, v := range info.Tables {
-		var tbl *Table
-		if !v.Select(ctx.Ts) {
-			tbl = v.LiteCopy()
-		} else {
-			tbl = v.Copy(ctx)
-		}
-
-		newInfo.Tables[k] = tbl
-	}
-
-	return newInfo
-}
-
-func (info *MetaInfo) Serialize(w io.Writer) error {
-	return dump.NewEncoder(w).Encode(info)
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/info_test.go b/pkg/vm/engine/aoe/storage/metadata/v1/info_test.go
deleted file mode 100644
index d9adc6384aaaaa523388e58527b1536174d83114..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v1/info_test.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metadata
-
-import (
-	"matrixone/pkg/container/types"
-	"matrixone/pkg/vm/engine/aoe"
-	"matrixone/pkg/vm/engine/aoe/storage/dbi"
-	"os"
-	"strings"
-	"sync"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func TestInfo(t *testing.T) {
-	dir := "/tmp/tbl"
-	os.RemoveAll(dir)
-	mu := &sync.RWMutex{}
-	info := MockInfo(mu, blockRowCount, segmentBlockCount)
-	assert.NotNil(t, info)
-	info1 := NewMetaInfo(mu, &Configuration{
-		BlockMaxRows:     2,
-		SegmentMaxBlocks: 2,
-	})
-	schema1 := MockSchema(2)
-	schema1.Name = "tbl1"
-	schema2 := MockVarCharSchema(2)
-	schema2.Name = "tbl2"
-	tbl1, err := info1.CreateTable(NextGlobalSeqNum(), schema1)
-	assert.Nil(t, err)
-	_, err = info1.ReferenceTable(tbl1.ID)
-	assert.NotNil(t, err)
-	tbl2, err := info1.CreateTable(NextGlobalSeqNum(), schema2)
-	assert.Nil(t, err)
-	_, err = info1.CreateTable(NextGlobalSeqNum(), nil)
-	assert.NotNil(t, err)
-	assert.Equal(t, tbl1.GetBoundState(), Standalone)
-	err = info1.RegisterTable(tbl1)
-	assert.Nil(t, err)
-	assert.NotNil(t, info1.RegisterTable(tbl1))
-	delete(info1.Tables, tbl1.ID)
-	assert.NotNil(t, info1.RegisterTable(tbl1))
-	delete(info1.NameMap, "tbl1")
-	assert.NotNil(t, info1.RegisterTable(tbl1))
-	info1.Tables[tbl1.ID] = tbl1
-	info1.NameMap["tbl1"] = tbl1.ID
-	assert.Equal(t, tbl1.GetBoundState(), Attached)
-	err = info1.RegisterTable(tbl2)
-	assert.Nil(t, err)
-	assert.Equal(t, 2, len(info1.TableNames()))
-	assert.Equal(t, map[uint64]uint64{tbl1.ID: tbl1.ID, tbl2.ID: tbl2.ID}, info1.TableIDs())
-	assert.Equal(t, 2, len(info1.GetTablesByNamePrefix("tbl")))
-	assert.Panics(t, func() {
-		info1.GetTableId()
-	})
-
-	_, err = info1.SoftDeleteTable("xxxx", NextGlobalSeqNum())
-	assert.NotNil(t, err)
-	_, err = info1.SoftDeleteTable("tbl1", NextGlobalSeqNum())
-	assert.Nil(t, err)
-	_, err = info1.SoftDeleteTable("tbl1", NextGlobalSeqNum())
-	assert.NotNil(t, err)
-	info1.NameMap["tbl1"] = tbl1.ID
-	_, err = info1.SoftDeleteTable("tbl1", NextGlobalSeqNum())
-	assert.NotNil(t, err)
-	_, err = info1.ReferenceTableByName("tbl1")
-	assert.NotNil(t, err)
-	_, err = info1.ReferenceTable(tbl1.ID)
-	assert.Nil(t, err)
-	_, err = info1.ReferenceTable(tbl2.ID)
-	assert.Nil(t, err)
-	_, err = info1.ReferenceTableByName("tbl2")
-	assert.Nil(t, err)
-
-	_, err = info1.ReferenceBlock(3, 0, 0)
-	assert.NotNil(t, err)
-	_, err = info1.ReferenceBlock(1, 0, 0)
-	assert.NotNil(t, err)
-	_, err = info1.TableSegmentIDs(3)
-	assert.NotNil(t, err)
-	_, err = info1.TableSegmentIDs(1)
-	assert.Nil(t, err)
-	_, err = info1.TableSegmentIDs(1, NowMicro())
-	assert.Nil(t, err)
-
-	assert.Equal(t, 1, len(info1.GetTablesByNamePrefix("tbl")))
-	assert.Equal(t, 1, len(info1.TableNames()))
-	assert.Equal(t, 1, len(info1.TableNames(NowMicro())))
-	assert.Equal(t, 1, len(info1.TableIDs()))
-	assert.Equal(t, 1, len(info1.TableIDs(NowMicro())))
-
-	assert.Equal(t, 4, len(strings.Split(info1.String(), "\n")))
-	assert.Equal(t, "2", info1.GetLastFileName())
-	assert.Equal(t, "3", info1.GetFileName())
-	assert.Equal(t, ResInfo, info1.GetResourceType())
-
-	//buf := bytes.NewBuffer(make([]byte, 10000))
-	//bytes.NewReader()
-	f, _ := os.Create("/tmp/tbl")
-	assert.Nil(t, info1.Serialize(f))
-	assert.Nil(t, f.Close())
-	assert.Equal(t, 1, len(info1.Copy(CopyCtx{
-		Ts:       NowMicro(),
-		Attached: false,
-	}).TableNames()))
-	assert.Equal(t, 1, len(info1.Copy(CopyCtx{
-		Attached: false,
-	}).TableNames()))
-	var info3 MetaInfo
-	f, _ = os.Open("/tmp/tbl")
-	_, err = info3.ReadFrom(f)
-	assert.Nil(t, err)
-	assert.Nil(t, f.Close())
-
-	json, err := info3.MarshalJSON()
-	assert.Nil(t, err)
-	buf := make([]byte, 296)
-	f, _ = os.Open("/tmp/tbl")
-	n, err := f.Read(buf)
-	assert.Nil(t, err)
-	assert.Nil(t, f.Close())
-	assert.Equal(t, n, 296)
-	assert.Nil(t, err)
-	assert.Equal(t, json, buf)
-	var info4 MetaInfo
-	assert.Nil(t, info4.Unmarshal(buf))
-	assert.Equal(t, len(info4.Tables), len(info3.Tables))
-	assert.NotNil(t, info4.Unmarshal(nil))
-
-	assert.NotNil(t, info1.UpdateCheckpoint(info1.CheckPoint-1))
-	assert.Nil(t, info1.UpdateCheckpoint(info1.CheckPoint+1))
-	info1.UpdateCheckpointTime(NowMicro())
-	info1.UpdateCheckpointTime(NowMicro() - 10)
-	_, err = info1.CreateTableFromTableInfo(&aoe.TableInfo{}, dbi.TableOpCtx{})
-	assert.NotNil(t, err)
-	_, err = info1.CreateTableFromTableInfo(&aoe.TableInfo{Name: "tbl1", Columns: []aoe.ColumnInfo{{Name: "xxxx", Type: types.Type{}}}}, dbi.TableOpCtx{})
-	assert.Nil(t, err)
-	_, err = info1.CreateTableFromTableInfo(&aoe.TableInfo{Name: "tbl1", Columns: []aoe.ColumnInfo{{Name: "xxxx", Type: types.Type{}}}}, dbi.TableOpCtx{})
-	assert.NotNil(t, err)
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/logentry.go b/pkg/vm/engine/aoe/storage/metadata/v1/logentry.go
similarity index 100%
rename from pkg/vm/engine/aoe/storage/metadata/v2/logentry.go
rename to pkg/vm/engine/aoe/storage/metadata/v1/logentry.go
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/logindex.go b/pkg/vm/engine/aoe/storage/metadata/v1/logindex.go
deleted file mode 100644
index 4251b4339ebd1cfb91503c98c066793de56f3a2d..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v1/logindex.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metadata
-
-import (
-	"bytes"
-	"fmt"
-	"matrixone/pkg/encoding"
-)
-
-func MockLogBatchId(id uint64) LogBatchId {
-	return LogBatchId{
-		Id:     id,
-		Offset: uint32(0),
-		Size:   uint32(1),
-	}
-}
-
-func (id *LogBatchId) String() string {
-	return fmt.Sprintf("(%d,%d,%d)", id.Id, id.Offset, id.Size)
-}
-
-func (id *LogBatchId) IsEnd() bool {
-	return id.Offset == id.Size-1
-}
-
-func (idx *LogIndex) IsSameBatch(o *LogIndex) bool {
-	return idx.ID.Id == o.ID.Id
-}
-
-func (idx *LogIndex) String() string {
-	return fmt.Sprintf("(%s,%d,%d,%d)", idx.ID.String(), idx.Start, idx.Count, idx.Capacity)
-}
-
-func (idx *LogIndex) IsApplied() bool {
-	return idx.Capacity == idx.Start+idx.Count
-}
-
-func (idx *LogIndex) IsBatchApplied() bool {
-	return idx.Capacity == idx.Start+idx.Count && idx.ID.IsEnd()
-}
-
-func (idx *LogIndex) Marshal() ([]byte, error) {
-	var buf bytes.Buffer
-	buf.Write(encoding.EncodeUint64(idx.ID.Id))
-	buf.Write(encoding.EncodeUint32(uint32(idx.ID.Offset)))
-	buf.Write(encoding.EncodeUint32(uint32(idx.ID.Size)))
-	buf.Write(encoding.EncodeUint64(idx.Count))
-	buf.Write(encoding.EncodeUint64(idx.Start))
-	buf.Write(encoding.EncodeUint64(idx.Capacity))
-	return buf.Bytes(), nil
-}
-
-func (idx *LogIndex) UnMarshall(data []byte) error {
-	if len(data) == 0 {
-		return nil
-	}
-	buf := data
-	idx.ID.Id = encoding.DecodeUint64(buf[:8])
-	buf = buf[8:]
-	idx.ID.Offset = encoding.DecodeUint32(buf[:4])
-	buf = buf[4:]
-	idx.ID.Size = encoding.DecodeUint32(buf[:4])
-	buf = buf[4:]
-	idx.Count = encoding.DecodeUint64(buf[:8])
-	buf = buf[8:]
-	idx.Start = encoding.DecodeUint64(buf[:8])
-	buf = buf[8:]
-	idx.Capacity = encoding.DecodeUint64(buf[:8])
-	buf = buf[8:]
-	return nil
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/logindex_test.go b/pkg/vm/engine/aoe/storage/metadata/v1/logindex_test.go
deleted file mode 100644
index 83fa3cd5fe8c3ea67572e78154cfbecc1207b337..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v1/logindex_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metadata
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func TestLogIndex(t *testing.T) {
-	idx := LogIndex{
-		ID:       MockLogBatchId(1),
-		Start:    0,
-		Count:    0,
-		Capacity: 4,
-	}
-	assert.False(t, idx.IsApplied())
-	idx.Count = 4
-	assert.True(t, idx.IsApplied())
-	m, err := idx.Marshal()
-	assert.Nil(t, err)
-	var idx1 LogIndex
-	assert.Nil(t, idx1.UnMarshall(make([]byte, 0)))
-	assert.Nil(t, idx1.UnMarshall(m))
-	assert.Equal(t, idx.String(), "((1,0,1),0,4,4)")
-
-	size := uint32(4)
-	batchId := MockLogBatchId(uint64(2))
-	batchId.Size = size
-
-	for offset := uint32(0); offset < size-1; offset++ {
-		batchId.Offset = offset
-		assert.False(t, batchId.IsEnd())
-	}
-	batchId.Offset = size - 1
-	assert.True(t, batchId.IsEnd())
-}
-
-func TestBlockAppliedIndex(t *testing.T) {
-	blk := Block{}
-	id, ok := blk.GetAppliedIndex()
-	assert.False(t, ok)
-
-	idx := LogIndex{
-		ID:       MockLogBatchId(1),
-		Start:    0,
-		Count:    2,
-		Capacity: 2,
-	}
-	err := blk.SetIndex(idx)
-	assert.Nil(t, err)
-	id, ok = blk.GetAppliedIndex()
-	assert.True(t, ok)
-	assert.Equal(t, idx.ID.Id, id)
-
-	idx.ID.Id = uint64(2)
-	err = blk.SetIndex(idx)
-	assert.Nil(t, err)
-	id, ok = blk.GetAppliedIndex()
-	assert.True(t, ok)
-	assert.Equal(t, idx.ID.Id, id)
-
-	applied := id
-	idx.ID.Id = uint64(3)
-	idx.ID.Size = 2
-	err = blk.SetIndex(idx)
-	assert.Nil(t, err)
-	id, ok = blk.GetAppliedIndex()
-	assert.True(t, ok)
-	assert.Equal(t, applied, id)
-
-	idx.ID.Id = uint64(3)
-	idx.ID.Offset = 1
-	err = blk.SetIndex(idx)
-	assert.Nil(t, err)
-	id, ok = blk.GetAppliedIndex()
-	assert.True(t, ok)
-	assert.Equal(t, idx.ID.Id, id)
-	assert.Equal(t, blk.GetReplayIndex().ID, idx.ID)
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/metadata_test.go b/pkg/vm/engine/aoe/storage/metadata/v1/metadata_test.go
similarity index 100%
rename from pkg/vm/engine/aoe/storage/metadata/v2/metadata_test.go
rename to pkg/vm/engine/aoe/storage/metadata/v1/metadata_test.go
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/node.go b/pkg/vm/engine/aoe/storage/metadata/v1/node.go
similarity index 100%
rename from pkg/vm/engine/aoe/storage/metadata/v2/node.go
rename to pkg/vm/engine/aoe/storage/metadata/v1/node.go
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/replayer.go b/pkg/vm/engine/aoe/storage/metadata/v1/replayer.go
similarity index 100%
rename from pkg/vm/engine/aoe/storage/metadata/v2/replayer.go
rename to pkg/vm/engine/aoe/storage/metadata/v1/replayer.go
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/schema.go b/pkg/vm/engine/aoe/storage/metadata/v1/schema.go
index bbc862971c4374a401bb091310fa52ab202aa288..bcb63c690cc421f74bf7b6c9c15c5c2f835a78d3 100644
--- a/pkg/vm/engine/aoe/storage/metadata/v1/schema.go
+++ b/pkg/vm/engine/aoe/storage/metadata/v1/schema.go
@@ -10,16 +10,57 @@
 // distributed under the License is distributed on an "AS IS" BASIS,
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
-// limitations under the License.
 
 package metadata
 
 import (
+	"encoding/json"
 	"fmt"
 	"matrixone/pkg/container/types"
-	"matrixone/pkg/vm/engine/aoe"
 )
 
+type IndexT uint16
+
+const (
+	ZoneMap IndexT = iota
+	NumBsi
+	FixStrBsi
+)
+
+type IndexInfo struct {
+	Id      uint64
+	Type    IndexT
+	Columns []uint16
+}
+
+type ColDef struct {
+	Name string
+	Idx  int
+	Type types.Type
+}
+
+type Schema struct {
+	Name             string
+	Indices          []*IndexInfo
+	ColDefs          []*ColDef
+	NameIndex        map[string]int
+	BlockMaxRows     uint64
+	SegmentMaxBlocks uint64
+}
+
+func (s *Schema) String() string {
+	buf, _ := json.Marshal(s)
+	return string(buf)
+}
+
+func (s *Schema) Types() []types.Type {
+	ts := make([]types.Type, len(s.ColDefs))
+	for i, colDef := range s.ColDefs {
+		ts[i] = colDef.Type
+	}
+	return ts
+}
+
 func (s *Schema) Valid() bool {
 	if s == nil {
 		return false
@@ -42,40 +83,34 @@ func (s *Schema) Valid() bool {
 	return true
 }
 
-func (s *Schema) Types() (ts []types.Type) {
-	for _, colDef := range s.ColDefs {
-		ts = append(ts, colDef.Type)
-	}
-	return ts
-}
-
 // GetColIdx returns column index for the given column name
 // if found, otherwise returns -1.
 func (s *Schema) GetColIdx(attr string) int {
-	idx, ok := s.NameIdMap[attr]
+	idx, ok := s.NameIndex[attr]
 	if !ok {
 		return -1
 	}
 	return idx
 }
 
-func (s *Schema) Clone() *Schema {
-	newSchema := &Schema{
-		ColDefs:   make([]*ColDef, 0),
+func MockSchema(colCnt int) *Schema {
+	schema := &Schema{
+		ColDefs:   make([]*ColDef, colCnt),
 		Indices:   make([]*IndexInfo, 0),
-		NameIdMap: make(map[string]int),
-		Name:      s.Name,
-	}
-	for _, colDef := range s.ColDefs {
-		newColDef := *colDef
-		newSchema.NameIdMap[colDef.Name] = len(newSchema.ColDefs)
-		newSchema.ColDefs = append(newSchema.ColDefs, &newColDef)
+		NameIndex: make(map[string]int),
 	}
-	for _, indexInfo := range s.Indices {
-		newInfo := *indexInfo
-		newSchema.Indices = append(newSchema.Indices, &newInfo)
+	prefix := "mock_"
+	for i := 0; i < colCnt; i++ {
+		name := fmt.Sprintf("%s%d", prefix, i)
+		colDef := &ColDef{
+			Idx:  i,
+			Name: name,
+			Type: types.Type{Oid: types.T_int32, Size: 4, Width: 4},
+		}
+		schema.ColDefs[i] = colDef
+		schema.NameIndex[colDef.Name] = i
 	}
-	return newSchema
+	return schema
 }
 
 // MockSchemaAll if char/varchar is needed, colCnt = 14, otherwise colCnt = 12
@@ -83,7 +118,7 @@ func MockSchemaAll(colCnt int) *Schema {
 	schema := &Schema{
 		Indices:   make([]*IndexInfo, 0),
 		ColDefs:   make([]*ColDef, colCnt),
-		NameIdMap: make(map[string]int),
+		NameIndex: make(map[string]int),
 	}
 	prefix := "mock_"
 	for i := 0; i < colCnt; i++ {
@@ -93,197 +128,93 @@ func MockSchemaAll(colCnt int) *Schema {
 			Idx:  i,
 		}
 		schema.ColDefs[i] = colDef
-		schema.NameIdMap[colDef.Name] = i
+		schema.NameIndex[colDef.Name] = i
 		switch i {
 		case 0:
 			colDef.Type = types.Type{
-				Oid:       types.T_int8,
-				Size:      1,
-				Width:     8,
+				Oid:   types.T_int8,
+				Size:  1,
+				Width: 8,
 			}
 		case 1:
 			colDef.Type = types.Type{
-				Oid:       types.T_int16,
-				Size:      2,
-				Width:     16,
+				Oid:   types.T_int16,
+				Size:  2,
+				Width: 16,
 			}
 		case 2:
 			colDef.Type = types.Type{
-				Oid:       types.T_int32,
-				Size:      4,
-				Width:     32,
+				Oid:   types.T_int32,
+				Size:  4,
+				Width: 32,
 			}
 		case 3:
 			colDef.Type = types.Type{
-				Oid:       types.T_int64,
-				Size:      8,
-				Width:     64,
+				Oid:   types.T_int64,
+				Size:  8,
+				Width: 64,
 			}
 		case 4:
 			colDef.Type = types.Type{
-				Oid:       types.T_uint8,
-				Size:      1,
-				Width:     8,
+				Oid:   types.T_uint8,
+				Size:  1,
+				Width: 8,
 			}
 		case 5:
 			colDef.Type = types.Type{
-				Oid:       types.T_uint16,
-				Size:      2,
-				Width:     16,
+				Oid:   types.T_uint16,
+				Size:  2,
+				Width: 16,
 			}
 		case 6:
 			colDef.Type = types.Type{
-				Oid:       types.T_uint32,
-				Size:      4,
-				Width:     32,
+				Oid:   types.T_uint32,
+				Size:  4,
+				Width: 32,
 			}
 		case 7:
 			colDef.Type = types.Type{
-				Oid:       types.T_uint64,
-				Size:      8,
-				Width:     64,
+				Oid:   types.T_uint64,
+				Size:  8,
+				Width: 64,
 			}
 		case 8:
 			colDef.Type = types.Type{
-				Oid:       types.T_float32,
-				Size:      4,
-				Width:     32,
+				Oid:   types.T_float32,
+				Size:  4,
+				Width: 32,
 			}
 		case 9:
 			colDef.Type = types.Type{
-				Oid:       types.T_float64,
-				Size:      8,
-				Width:     64,
+				Oid:   types.T_float64,
+				Size:  8,
+				Width: 64,
 			}
 		case 10:
 			colDef.Type = types.Type{
-				Oid:       types.T_date,
-				Size:      4,
-				Width:     32,
+				Oid:   types.T_date,
+				Size:  4,
+				Width: 32,
 			}
 		case 11:
 			colDef.Type = types.Type{
-				Oid:       types.T_datetime,
-				Size:      8,
-				Width:     64,
+				Oid:   types.T_datetime,
+				Size:  8,
+				Width: 64,
 			}
 		case 12:
 			colDef.Type = types.Type{
-				Oid:       types.T_varchar,
-				Size:      24,
-				Width:     100,
+				Oid:   types.T_varchar,
+				Size:  24,
+				Width: 100,
 			}
 		case 13:
 			colDef.Type = types.Type{
-				Oid:       types.T_char,
-				Size:      24,
-				Width:     100,
+				Oid:   types.T_char,
+				Size:  24,
+				Width: 100,
 			}
 		}
 	}
 	return schema
 }
-
-func MockSchema(colCnt int) *Schema {
-	schema := &Schema{
-		ColDefs:   make([]*ColDef, colCnt),
-		Indices:   make([]*IndexInfo, 0),
-		NameIdMap: make(map[string]int),
-	}
-	prefix := "mock_"
-	for i := 0; i < colCnt; i++ {
-		name := fmt.Sprintf("%s%d", prefix, i)
-		colDef := &ColDef{
-			Idx:  i,
-			Name: name,
-			Type: types.Type{Oid: types.T_int32, Size: 4, Width: 32},
-		}
-		schema.ColDefs[i] = colDef
-		schema.NameIdMap[colDef.Name] = i
-	}
-	return schema
-}
-
-func MockVarCharSchema(colCnt int) *Schema {
-	schema := &Schema{
-		ColDefs:   make([]*ColDef, colCnt),
-		Indices:   make([]*IndexInfo, 0),
-		NameIdMap: make(map[string]int),
-	}
-	prefix := "mock_"
-	for i := 0; i < colCnt; i++ {
-		name := fmt.Sprintf("%s%d", prefix, i)
-		colDef := &ColDef{
-			Idx:  i,
-			Name: name,
-			Type: types.Type{Oid: types.T_varchar, Size: 24},
-		}
-		schema.ColDefs[i] = colDef
-		schema.NameIdMap[colDef.Name] = i
-	}
-	return schema
-}
-
-func MockDateSchema(colCnt int) *Schema {
-	schema := &Schema{
-		ColDefs:   make([]*ColDef, colCnt),
-		Indices:   make([]*IndexInfo, 0),
-		NameIdMap: make(map[string]int),
-	}
-	prefix := "mock_"
-	for i := 0; i < colCnt; i++ {
-		name := fmt.Sprintf("%s%d", prefix, i)
-		var colDef *ColDef
-		if i == 0 {
-			colDef = &ColDef{
-				Name: name,
-				Idx:  i,
-				Type: types.Type{
-					Oid:       types.T_date,
-					Size:      4,
-					Width:     32,
-					Precision: 0,
-				},
-			}
-		} else {
-			colDef = &ColDef{
-				Name: name,
-				Idx:  i,
-				Type: types.Type{
-					Oid:       types.T_datetime,
-					Size:      8,
-					Width:     64,
-					Precision: 0,
-				},
-			}
-		}
-		schema.ColDefs[i] = colDef
-		schema.NameIdMap[colDef.Name] = i
-	}
-	return schema
-}
-
-func MockTableInfo(colCnt int) *aoe.TableInfo {
-	tblInfo := &aoe.TableInfo{
-		Name:    "mocktbl",
-		Columns: make([]aoe.ColumnInfo, 0),
-		Indices: make([]aoe.IndexInfo, 0),
-	}
-	prefix := "mock_"
-	for i := 0; i < colCnt; i++ {
-		name := fmt.Sprintf("%s%d", prefix, i)
-		colInfo := aoe.ColumnInfo{
-			Name: name,
-		}
-		if i == 1 {
-			colInfo.Type = types.Type{Oid: types.T(types.T_varchar), Size: 24}
-		} else {
-			colInfo.Type = types.Type{Oid: types.T_int32, Size: 4, Width: 4}
-		}
-		indexInfo := aoe.IndexInfo{Type: uint64(ZoneMap), Columns: []uint64{uint64(i)}}
-		tblInfo.Columns = append(tblInfo.Columns, colInfo)
-		tblInfo.Indices = append(tblInfo.Indices, indexInfo)
-	}
-	return tblInfo
-}
-
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/schema_test.go b/pkg/vm/engine/aoe/storage/metadata/v1/schema_test.go
deleted file mode 100644
index 1b861ecc0232cd4b6e8cc0bb1dab8ac30bbf17a3..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v1/schema_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metadata
-
-import (
-	"matrixone/pkg/container/types"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func TestSchema(t *testing.T) {
-	schema0 := MockSchema(2)
-	schema1 := MockVarCharSchema(2)
-	schema2 := MockDateSchema(0)
-	assert.False(t, (*Schema)(nil).Valid())
-	assert.False(t, schema2.Valid())
-	schema2 = MockDateSchema(2)
-	assert.True(t, schema2.Valid())
-	schema2 = MockSchemaAll(14)
-	assert.Equal(t, 14, len(schema2.ColDefs))
-	assert.True(t, schema0.Valid())
-	schema1.ColDefs[0].Idx = 1
-	assert.False(t, schema1.Valid())
-	schema1.ColDefs[0].Idx = 0
-	schema1.ColDefs[0].Name = schema1.ColDefs[1].Name
-	assert.False(t, schema1.Valid())
-	typs := schema0.Types()
-	assert.True(t, typs[0].Eq(types.Type{
-		Oid:   types.T_int32,
-		Size:  4,
-		Width: 32,
-	}))
-	assert.Equal(t, -1, schema0.GetColIdx("xxxx"))
-	assert.Equal(t, 0, schema0.GetColIdx("mock_0"))
-	schema0.Indices = append(schema0.Indices, &IndexInfo{})
-	assert.Equal(t, "mock_1", schema0.Clone().ColDefs[1].Name)
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/segment.go b/pkg/vm/engine/aoe/storage/metadata/v1/segment.go
index aaf2f8c5878b0f51c6901cae3519ea9e95598781..dd56c74a9b78ccd503b41f102947e82ce1c7b3f5 100644
--- a/pkg/vm/engine/aoe/storage/metadata/v1/segment.go
+++ b/pkg/vm/engine/aoe/storage/metadata/v1/segment.go
@@ -10,7 +10,6 @@
 // distributed under the License is distributed on an "AS IS" BASIS,
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
-// limitations under the License.
 
 package metadata
 
@@ -18,286 +17,370 @@ import (
 	"encoding/json"
 	"errors"
 	"fmt"
-	"matrixone/pkg/logutil"
 	"matrixone/pkg/vm/engine/aoe/storage/common"
+	"matrixone/pkg/vm/engine/aoe/storage/logstore"
+	"sync"
 )
 
-func NewSegment(table *Table, id uint64) *Segment {
-	seg := &Segment{
-		ID:            id,
-		Table:         table,
-		Blocks:        make([]*Block, 0),
-		IdMap:         make(map[uint64]int),
-		TimeStamp:     *NewTimeStamp(),
-		MaxBlockCount: table.Conf.SegmentMaxBlocks,
-	}
-	return seg
+var (
+	UpgradeInfullSegmentErr = errors.New("aoe: upgrade infull segment")
+	UpgradeNotNeededErr     = errors.New("aoe: already upgraded")
+)
+
+type segmentLogEntry struct {
+	*BaseEntry
+	TableId uint64
+	Catalog *Catalog `json:"-"`
 }
 
-func (seg *Segment) GetTableID() uint64 {
-	return seg.Table.ID
+func (e *segmentLogEntry) Marshal() ([]byte, error) {
+	return json.Marshal(e)
 }
 
-func (seg *Segment) AsCommonID() *common.ID {
-	return &common.ID{
-		TableID:   seg.Table.ID,
-		SegmentID: seg.ID,
-	}
+func (e *segmentLogEntry) Unmarshal(buf []byte) error {
+	return json.Unmarshal(buf, e)
 }
 
-func (seg *Segment) GetID() uint64 {
-	return seg.ID
+type Segment struct {
+	BaseEntry
+	Table    *Table         `json:"-"`
+	Catalog  *Catalog       `json:"-"`
+	IdIndex  map[uint64]int `json:"-"`
+	BlockSet []*Block
 }
 
-// BlockIDList returns the ID list of blocks lived in args[0], which is a timestamp.
-func (seg *Segment) BlockIDList(args ...interface{}) []uint64 {
-	var ts int64
-	if len(args) == 0 {
-		ts = NowMicro()
-	} else {
-		ts = args[0].(int64)
+func newSegmentEntry(catalog *Catalog, table *Table, tranId uint64, exIndex *ExternalIndex) *Segment {
+	e := &Segment{
+		Catalog:  catalog,
+		Table:    table,
+		BlockSet: make([]*Block, 0),
+		IdIndex:  make(map[uint64]int),
+		BaseEntry: BaseEntry{
+			Id: table.Catalog.NextSegmentId(),
+			CommitInfo: &CommitInfo{
+				CommitId:      tranId,
+				TranId:        tranId,
+				SSLLNode:      *common.NewSSLLNode(),
+				Op:            OpCreate,
+				ExternalIndex: exIndex,
+			},
+		},
 	}
-	ids := make([]uint64, 0)
-	seg.RLock()
-	defer seg.RUnlock()
-	for _, blk := range seg.Blocks {
-		if !blk.Select(ts) {
-			continue
-		}
-		ids = append(ids, blk.ID)
-	}
-	return ids
+	return e
 }
 
-// BlockIDs returns the ID map of blocks lived in args[0], which is a timestamp.
-func (seg *Segment) BlockIDs(args ...int64) map[uint64]uint64 {
-	var ts int64
-	if len(args) == 0 {
-		ts = NowMicro()
-	} else {
-		ts = args[0]
+func newCommittedSegmentEntry(catalog *Catalog, table *Table, base *BaseEntry) *Segment {
+	e := &Segment{
+		Catalog:   catalog,
+		Table:     table,
+		BlockSet:  make([]*Block, 0),
+		IdIndex:   make(map[uint64]int),
+		BaseEntry: *base,
 	}
-	ids := make(map[uint64]uint64)
-	seg.RLock()
-	defer seg.RUnlock()
-	for _, blk := range seg.Blocks {
-		if !blk.Select(ts) {
-			continue
-		}
-		ids[blk.ID] = blk.ID
-	}
-	return ids
+	return e
 }
 
-func (seg *Segment) HasUncommitted() bool {
-	if seg.DataState >= CLOSED {
-		return false
-	}
-	if seg.DataState < FULL {
+func (e *Segment) LE(o *Segment) bool {
+	if e == nil {
 		return true
 	}
-	for _, blk := range seg.Blocks {
-		if blk.DataState != FULL {
-			logutil.Infof("xxx")
-			return true
-		}
-	}
-	return false
+	return e.Id <= o.Id
 }
 
-func (seg *Segment) GetActiveBlk() *Block {
-	if seg.ActiveBlk >= len(seg.Blocks) {
-		return nil
+func (e *Segment) rebuild(table *Table) {
+	e.Catalog = table.Catalog
+	e.Table = table
+	e.IdIndex = make(map[uint64]int)
+	for i, blk := range e.BlockSet {
+		e.Catalog.Sequence.TryUpdateBlockId(blk.Id)
+		blk.rebuild(e)
+		e.IdIndex[blk.Id] = i
 	}
-	return seg.Blocks[seg.ActiveBlk]
 }
 
-func (seg *Segment) NextActiveBlk() *Block {
-	var blk *Block
-	if seg.ActiveBlk >= len(seg.Blocks)-1 {
-		seg.ActiveBlk++
-		return blk
+// Safe
+func (e *Segment) AsCommonID() *common.ID {
+	return &common.ID{
+		TableID:   e.Table.Id,
+		SegmentID: e.Id,
 	}
-	blk = seg.Blocks[seg.ActiveBlk]
-	seg.ActiveBlk++
-	return blk
 }
 
-func (seg *Segment) Marshal() ([]byte, error) {
-	return json.Marshal(seg)
+// Safe
+func (e *Segment) CommittedView(id uint64) *Segment {
+	baseEntry := e.UseCommitted(id)
+	if baseEntry == nil {
+		return nil
+	}
+	view := &Segment{
+		BaseEntry: *baseEntry,
+		BlockSet:  make([]*Block, 0),
+	}
+	e.RLock()
+	blks := make([]*Block, 0, len(e.BlockSet))
+	for _, blk := range e.BlockSet {
+		blks = append(blks, blk)
+	}
+	e.RUnlock()
+	for _, blk := range blks {
+		blkView := blk.CommittedView(id)
+		if blkView == nil {
+			continue
+		}
+		view.BlockSet = append(view.BlockSet, blkView)
+	}
+	return view
 }
 
-// CreateBlock generates a new block id with its Sequence and
-// returns a new block meta with this id.
-func (seg *Segment) CreateBlock() (blk *Block, err error) {
-	blk = NewBlock(seg.Table.Info.Sequence.GetBlockID(), seg)
-	return blk, err
+func (e *Segment) Marshal() ([]byte, error) {
+	return json.Marshal(e)
 }
 
-func (seg *Segment) String() string {
-	s := fmt.Sprintf("Seg(%d-%d) [blkPos=%d][State=%d]", seg.Table.ID, seg.ID, seg.ActiveBlk, seg.DataState)
-	s += "["
-	pos := 0
-	for _, blk := range seg.Blocks {
-		if pos != 0 {
-			s += "<-->"
-		}
-		s += blk.String()
-		pos++
+func (e *Segment) toLogEntry() *segmentLogEntry {
+	return &segmentLogEntry{
+		BaseEntry: &e.BaseEntry,
+		TableId:   e.Table.Id,
 	}
-	s += "]"
-	return s
 }
 
-func (seg *Segment) cloneBlockNoLock(id uint64, ctx CopyCtx) (blk *Block, err error) {
-	idx, ok := seg.IdMap[id]
-	if !ok {
-		return nil, errors.New(fmt.Sprintf("block %d not found in segment %d", id, seg.ID))
+func (e *Segment) Unmarshal(buf []byte) error {
+	return json.Unmarshal(buf, e)
+}
+
+// Not safe
+func (e *Segment) PString(level PPLevel) string {
+	if e == nil {
+		return "null segment"
 	}
-	blk = seg.Blocks[idx].Copy()
-	if !ctx.Attached {
-		err = blk.Detach()
+	s := fmt.Sprintf("<Segment %s", e.BaseEntry.PString(level))
+	cnt := 0
+	if level > PPL0 {
+		for _, blk := range e.BlockSet {
+			cnt++
+			s = fmt.Sprintf("%s\n%s", s, blk.PString(level))
+		}
+	}
+	if cnt == 0 {
+		s = fmt.Sprintf("%s>", s)
+	} else {
+		s = fmt.Sprintf("%s\n>", s)
 	}
-	return blk, err
+	return s
 }
 
-// CloneBlock returns the clone of the block if exists, whose block id is id.
-func (seg *Segment) CloneBlock(id uint64, ctx CopyCtx) (blk *Block, err error) {
-	seg.RLock()
-	defer seg.RUnlock()
-	return seg.cloneBlockNoLock(id, ctx)
+// Not safe
+func (e *Segment) String() string {
+	buf, _ := e.Marshal()
+	return string(buf)
 }
 
-func (seg *Segment) ReferenceBlock(id uint64) (blk *Block, err error) {
-	seg.RLock()
-	defer seg.RUnlock()
-	idx, ok := seg.IdMap[id]
-	if !ok {
-		return nil, errors.New(fmt.Sprintf("block %d not found in segment %d", id, seg.ID))
+// Not safe
+func (e *Segment) ToLogEntry(eType LogEntryType) LogEntry {
+	switch eType {
+	case ETCreateSegment:
+		break
+	case ETUpgradeSegment:
+		break
+	case ETDropSegment:
+		if !e.IsSoftDeletedLocked() {
+			panic("logic error")
+		}
+		break
+	default:
+		panic("not supported")
 	}
-	return seg.Blocks[idx], nil
+	entry := e.toLogEntry()
+	buf, _ := entry.Marshal()
+	logEntry := logstore.NewAsyncBaseEntry()
+	logEntry.Meta.SetType(eType)
+	logEntry.Unmarshal(buf)
+	return logEntry
 }
 
-// RegisterBlock registers a block via an existing block meta.
-func (seg *Segment) RegisterBlock(blk *Block) error {
-	if blk.Segment.Table.ID != seg.Table.ID {
-		return errors.New(fmt.Sprintf("table id mismatch %d:%d", seg.Table.ID, blk.Segment.Table.ID))
-	}
-	if blk.GetSegmentID() != seg.GetID() {
-		return errors.New(fmt.Sprintf("segment id mismatch %d:%d", seg.GetID(), blk.GetSegmentID()))
+// Safe
+func (e *Segment) SimpleCreateBlock() *Block {
+	ctx := newCreateBlockCtx(e)
+	if err := e.Table.Catalog.onCommitRequest(ctx); err != nil {
+		return nil
 	}
-	seg.Lock()
-	defer seg.Unlock()
+	return ctx.block
+}
 
-	err := blk.Attach()
-	if err != nil {
-		return err
+// Safe
+func (e *Segment) Appendable() bool {
+	e.RLock()
+	defer e.RUnlock()
+	if e.HasMaxBlocks() {
+		return !e.BlockSet[len(e.BlockSet)-1].IsFull()
 	}
-	if len(seg.Blocks) == int(seg.MaxBlockCount) {
-		return errors.New(fmt.Sprintf("Cannot add block into full segment %d", seg.ID))
+	return true
+}
+
+func (e *Segment) prepareCreateBlock(ctx *createBlockCtx) (LogEntry, error) {
+	tranId := e.Catalog.NextUncommitId()
+	be := newBlockEntry(e, tranId, ctx.exIndex)
+	logEntry := be.ToLogEntry(ETCreateBlock)
+	e.Lock()
+	e.onNewBlock(be)
+	e.Unlock()
+	e.Table.Catalog.commitMu.Lock()
+	defer e.Table.Catalog.commitMu.Unlock()
+	e.Table.Catalog.prepareCommitLog(be, logEntry)
+	ctx.block = be
+	return logEntry, nil
+}
+
+// Safe
+func (e *Segment) GetAppliedIndex(rwmtx *sync.RWMutex) (uint64, bool) {
+	if rwmtx == nil {
+		e.RLock()
+		defer e.RUnlock()
 	}
-	_, ok := seg.IdMap[blk.ID]
-	if ok {
-		return errors.New(fmt.Sprintf("Duplicate block %d found in segment %d", blk.GetID(), seg.ID))
+	if e.IsSorted() {
+		return e.BaseEntry.GetAppliedIndex()
 	}
-	seg.IdMap[blk.GetID()] = len(seg.Blocks)
-	seg.Blocks = append(seg.Blocks, blk)
-	if len(seg.Blocks) == int(seg.MaxBlockCount) {
-		if blk.IsFull() {
-			seg.DataState = CLOSED
-		} else {
-			seg.DataState = FULL
+	return e.calcAppliedIndex()
+}
+
+// Not safe
+func (e *Segment) GetReplayIndex() *LogIndex {
+	for i := len(e.BlockSet) - 1; i >= 0; i-- {
+		blk := e.BlockSet[i]
+		if blk.CommitInfo.ExternalIndex != nil && (blk.Count > 0 || blk.IsFull()) {
+			return blk.CommitInfo.ExternalIndex
 		}
-	} else {
-		seg.DataState = PARTIAL
 	}
-	seg.Table.Lock()
-	seg.Table.UpdateVersion()
-	seg.Table.Unlock()
 	return nil
 }
 
-func (seg *Segment) TryClose() bool {
-	seg.Lock()
-	defer seg.Unlock()
-	if seg.DataState == CLOSED || seg.DataState == SORTED {
-		return true
-	}
-	if seg.DataState == FULL || len(seg.Blocks) == int(seg.MaxBlockCount) {
-		for _, blk := range seg.Blocks {
-			if !blk.IsFull() {
-				return false
-			}
+func (e *Segment) calcAppliedIndex() (id uint64, ok bool) {
+	for i := len(e.BlockSet) - 1; i >= 0; i-- {
+		blk := e.BlockSet[i]
+		id, ok = blk.GetAppliedIndex(nil)
+		if ok {
+			break
 		}
-		seg.DataState = CLOSED
-		return true
 	}
-	return false
+	return id, ok
 }
 
-func (seg *Segment) TrySorted() error {
-	seg.Lock()
-	defer seg.Unlock()
-	if seg.DataState != CLOSED {
-		return errors.New("segment not closed yet, can't be sorted")
-	}
-	seg.DataState = SORTED
-	return nil
+func (e *Segment) onNewBlock(entry *Block) {
+	e.IdIndex[entry.Id] = len(e.BlockSet)
+	e.BlockSet = append(e.BlockSet, entry)
+}
+
+// Safe
+func (e *Segment) SimpleUpgrade(exIndice []*ExternalIndex) error {
+	ctx := newUpgradeSegmentCtx(e, exIndice)
+	return e.Table.Catalog.onCommitRequest(ctx)
+	// return e.Upgrade(e.Table.Catalog.NextUncommitId(), exIndice, true)
 }
 
-func (seg *Segment) GetMaxBlkID() uint64 {
-	blkId := uint64(0)
-	for bid := range seg.IdMap {
-		if bid > blkId {
-			blkId = bid
+// Not safe
+func (e *Segment) FirstInFullBlock() *Block {
+	if len(e.BlockSet) == 0 {
+		return nil
+	}
+	var found *Block
+	for i := len(e.BlockSet) - 1; i >= 0; i-- {
+		if !e.BlockSet[i].IsFull() {
+			found = e.BlockSet[i]
+		} else {
+			break
 		}
 	}
+	return found
+}
 
-	return blkId
+// Not safe
+func (e *Segment) HasMaxBlocks() bool {
+	return e.IsSorted() || len(e.BlockSet) == int(e.Table.Schema.SegmentMaxBlocks)
 }
 
-func (seg *Segment) ReplayState() {
-	if seg.DataState >= CLOSED {
-		return
+// func (e *Segment) Upgrade(tranId uint64, exIndice []*ExternalIndex, autoCommit bool) error {
+func (e *Segment) prepareUpgrade(ctx *upgradeSegmentCtx) (LogEntry, error) {
+	tranId := e.Table.Catalog.NextUncommitId()
+	e.RLock()
+	if !e.HasMaxBlocks() {
+		e.RUnlock()
+		return nil, UpgradeInfullSegmentErr
 	}
-	if len(seg.Blocks) == 0 {
-		seg.DataState = EMPTY
-		return
+	if e.IsSorted() {
+		return nil, UpgradeNotNeededErr
 	}
-	fullBlkCnt := 0
-	for _, blk := range seg.Blocks {
-		if blk.DataState == FULL {
-			fullBlkCnt++
+	for _, blk := range e.BlockSet {
+		if !blk.IsFull() {
+			return nil, UpgradeInfullSegmentErr
 		}
 	}
-	if fullBlkCnt == 0 {
-		seg.DataState = EMPTY
-	} else if fullBlkCnt < int(seg.Table.Conf.SegmentMaxBlocks) {
-		seg.DataState = PARTIAL
+	e.RUnlock()
+	e.Lock()
+	defer e.Unlock()
+	var newOp OpT
+	switch e.CommitInfo.Op {
+	case OpCreate:
+		newOp = OpUpgradeSorted
+	default:
+		return nil, UpgradeNotNeededErr
+	}
+	cInfo := &CommitInfo{
+		TranId:   tranId,
+		CommitId: tranId,
+		Op:       newOp,
+	}
+	if ctx.exIndice == nil {
+		id, ok := e.calcAppliedIndex()
+		if ok {
+			cInfo.AppliedIndex = &ExternalIndex{
+				Id: SimpleBatchId(id),
+			}
+		}
 	} else {
-		seg.DataState = CLOSED
+		cInfo.ExternalIndex = ctx.exIndice[0]
+		if len(ctx.exIndice) > 1 {
+			cInfo.PrevIndex = ctx.exIndice[1]
+		}
 	}
+	e.onNewCommit(cInfo)
+	e.Table.Catalog.commitMu.Lock()
+	defer e.Table.Catalog.commitMu.Unlock()
+	logEntry := e.Table.Catalog.prepareCommitEntry(e, ETUpgradeSegment, e)
+	return logEntry, nil
 }
 
-func (seg *Segment) Copy(ctx CopyCtx) *Segment {
-	if ctx.Ts == 0 {
-		ctx.Ts = NowMicro()
-	}
-	seg.RLock()
-	defer seg.RUnlock()
-	newSeg := NewSegment(seg.Table, seg.ID)
-	newSeg.TimeStamp = seg.TimeStamp
-	newSeg.MaxBlockCount = seg.MaxBlockCount
-	newSeg.DataState = seg.DataState
-	newSeg.BoundSate = seg.BoundSate
-	for _, v := range seg.Blocks {
-		if !v.Select(ctx.Ts) {
-			continue
+// Not safe
+// One writer, multi-readers
+func (e *Segment) SimpleGetOrCreateNextBlock(from *Block) *Block {
+	if len(e.BlockSet) == 0 {
+		return e.SimpleCreateBlock()
+	}
+	var ret *Block
+	for i := len(e.BlockSet) - 1; i >= 0; i-- {
+		blk := e.BlockSet[i]
+		if !blk.IsFull() && from.Less(blk) {
+			ret = blk
+		} else {
+			break
 		}
-		blk, _ := seg.cloneBlockNoLock(v.ID, ctx)
-		newSeg.IdMap[v.GetID()] = len(newSeg.Blocks)
-		newSeg.Blocks = append(newSeg.Blocks, blk)
 	}
+	if ret != nil || e.HasMaxBlocks() {
+		return ret
+	}
+	return e.SimpleCreateBlock()
+}
 
-	return newSeg
+// Safe
+func (e *Segment) SimpleGetBlock(id uint64) *Block {
+	e.RLock()
+	defer e.RUnlock()
+	return e.GetBlock(id, MinUncommitId)
+}
+
+func (e *Segment) GetBlock(id, tranId uint64) *Block {
+	pos, ok := e.IdIndex[id]
+	if !ok {
+		return nil
+	}
+	entry := e.BlockSet[pos]
+	return entry
 }
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/segment_test.go b/pkg/vm/engine/aoe/storage/metadata/v1/segment_test.go
deleted file mode 100644
index db21e7ad1e4a30006c11864d67fb1b467df6169a..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v1/segment_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metadata
-
-import (
-	"sync"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func TestSegment(t *testing.T) {
-	mu := &sync.RWMutex{}
-	info := MockInfo(mu, blockRowCount, segmentBlockCount)
-	schema := MockSchema(2)
-	t1 := NowMicro()
-	tbl := NewTable(NextGlobalSeqNum(), info, schema)
-	seg1 := NewSegment(tbl, info.Sequence.GetSegmentID())
-	assert.Equal(t, seg1.Table.ID, seg1.GetTableID())
-	assert.Equal(t, seg1.GetID(), seg1.AsCommonID().SegmentID)
-	seg2 := NewSegment(tbl, info.Sequence.GetSegmentID())
-	seg2.ReplayState()
-	blk1 := NewBlock(info.Sequence.GetBlockID(), seg2)
-	err := seg1.RegisterBlock(blk1)
-	assert.NotNil(t, err)
-
-	for i := 0; i < int(seg1.MaxBlockCount); i++ {
-		blk1, err = seg1.CreateBlock()
-		assert.Nil(t, err)
-		err = seg1.RegisterBlock(blk1)
-		assert.Nil(t, err)
-	}
-	blk2 := NewBlock(info.Sequence.GetBlockID(), seg1)
-	err = seg1.RegisterBlock(blk2)
-	assert.NotNil(t, err)
-	seg2.Table = NewTable(NextGlobalSeqNum(), info, schema, 10)
-	assert.NotNil(t, seg2.RegisterBlock(blk2))
-	seg2.Table = tbl
-	//t.Log(err)
-
-	_, err = seg1.ReferenceBlock(blk1.ID)
-	assert.Nil(t, err)
-	_, err = seg1.ReferenceBlock(blk2.ID)
-	assert.NotNil(t, err)
-	_ = seg1.String()
-
-	ids := seg1.BlockIDs(t1)
-	assert.Equal(t, len(ids), 0)
-	// ts := NowMicro()
-	ids = seg1.BlockIDs()
-	assert.Equal(t, len(ids), int(seg1.MaxBlockCount))
-
-	list := seg1.BlockIDList(t1)
-	assert.Equal(t, len(list), 0)
-	list = seg1.BlockIDList()
-	assert.Equal(t, len(list), int(seg1.MaxBlockCount))
-
-	_, err = seg1.CloneBlock(1000, CopyCtx{})
-	assert.NotNil(t, err)
-	//for id := range seg1.IdMap {
-	//	t.Log(id)
-	//}
-	_, err = seg1.CloneBlock(blk2.ID-1, CopyCtx{})
-	assert.Nil(t, err)
-
-	for i := 0; i < int(seg2.MaxBlockCount)-1; i++ {
-		blk, err := seg2.CreateBlock()
-		assert.Nil(t, err)
-		err = seg2.RegisterBlock(blk)
-		assert.Nil(t, err)
-		assert.Nil(t, blk.SetCount(blockRowCount))
-		seg2.ReplayState()
-	}
-	assert.False(t, seg2.TryClose())
-	assert.NotNil(t, seg2.TrySorted())
-	blk3, err := seg2.CreateBlock()
-	assert.Nil(t, blk3.Attach())
-	assert.NotNil(t, seg2.RegisterBlock(blk3))
-	assert.Nil(t, blk3.Detach())
-	//t.Log(blk3.GetBoundState())
-	blk3.ID = blk3.ID - 1
-	assert.NotNil(t, seg2.RegisterBlock(blk3))
-	blk3.ID = blk3.ID + 1
-	assert.Nil(t, blk3.Detach())
-	assert.Nil(t, blk3.SetCount(blk3.MaxRowCount))
-	blk3.DataState = FULL
-	assert.True(t, seg2.HasUncommitted())
-	assert.Nil(t, seg2.RegisterBlock(blk3))
-	assert.Equal(t, seg2.DataState, seg2.Copy(CopyCtx{Ts: NowMicro()}).DataState)
-	assert.Nil(t, blk3.Delete(NowMicro()))
-	time.Sleep(time.Duration(1) * time.Microsecond)
-	assert.Equal(t, seg2.DataState, seg2.Copy(CopyCtx{}).DataState)
-	assert.True(t, seg2.TryClose())
-	assert.Nil(t, seg2.TrySorted())
-
-	assert.Equal(t, blk3.ID, seg2.GetMaxBlkID())
-	seg2.ReplayState()
-	m, err := seg2.Marshal()
-	assert.Nil(t, err)
-	assert.Equal(t, 795, len(m))
-	assert.NotNil(t, seg2.GetActiveBlk())
-	assert.NotNil(t, seg2.NextActiveBlk())
-	seg2.ActiveBlk = int(segmentBlockCount) - 1
-	assert.Nil(t, seg2.NextActiveBlk())
-	seg2.ActiveBlk = int(segmentBlockCount)
-	assert.Nil(t, seg2.GetActiveBlk())
-	assert.False(t, seg2.HasUncommitted())
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/table.go b/pkg/vm/engine/aoe/storage/metadata/v1/table.go
index dc0d0381ca06606a30a863d590da025c3cef90e2..22739e31fec7ccdbf92c44c44413637ef632276b 100644
--- a/pkg/vm/engine/aoe/storage/metadata/v1/table.go
+++ b/pkg/vm/engine/aoe/storage/metadata/v1/table.go
@@ -10,463 +10,459 @@
 // distributed under the License is distributed on an "AS IS" BASIS,
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
-// limitations under the License.
 
 package metadata
 
 import (
 	"encoding/json"
-	"errors"
 	"fmt"
-	"io"
 	"matrixone/pkg/logutil"
-	"strconv"
-	"strings"
-	"sync/atomic"
-	"unsafe"
-
-	"github.com/google/btree"
-)
-
-var (
-	GlobalSeqNum         uint64 = 0
-	ErrParseTableCkpFile        = errors.New("parse table file name error")
+	"matrixone/pkg/vm/engine/aoe/storage/common"
+	"matrixone/pkg/vm/engine/aoe/storage/logstore"
+	"sync"
 )
 
-//func MakeTableCkpFile(tid, version uint64) string {
-//	return fmt.Sprintf("%d_v%d", tid, version)
-//}
-
-func ParseTableCkpFile(name string) (tid, version uint64, err error) {
-	strs := strings.Split(name, "_v")
-	if len(strs) != 2 {
-		return tid, version, ErrParseTableCkpFile
-	}
-	if tid, err = strconv.ParseUint(strs[0], 10, 64); err != nil {
-		return tid, version, err
-	}
-	if version, err = strconv.ParseUint(strs[1], 10, 64); err != nil {
-		return tid, version, err
-	}
-	return tid, version, err
+type tableLogEntry struct {
+	BaseEntry
+	Prev    *Table
+	Catalog *Catalog `json:"-"`
 }
 
-func NextGlobalSeqNum() uint64 {
-	return atomic.AddUint64(&GlobalSeqNum, uint64(1))
+func (e *tableLogEntry) Marshal() ([]byte, error) {
+	return json.Marshal(e)
 }
 
-func GetGlobalSeqNum() uint64 {
-	return atomic.LoadUint64(&GlobalSeqNum)
+func (e *tableLogEntry) Unmarshal(buf []byte) error {
+	return json.Unmarshal(buf, e)
 }
 
-type GenericTableWrapper struct {
-	ID uint64
-	TimeStamp
-	LogHistory
+func (e *tableLogEntry) ToEntry() *Table {
+	e.BaseEntry.CommitInfo.SetNext(e.Prev.CommitInfo)
+	e.Prev.BaseEntry = e.BaseEntry
+	return e.Prev
 }
 
-func NewTable(logIdx uint64, info *MetaInfo, schema *Schema, ids ...uint64) *Table {
-	var id uint64
-	if len(ids) == 0 {
-		id = info.Sequence.GetTableID()
-	} else {
-		id = ids[0]
-	}
-	tbl := &Table{
-		ID:         id,
-		Segments:   make([]*Segment, 0),
-		IdMap:      make(map[uint64]int),
-		TimeStamp:  *NewTimeStamp(),
-		Info:       info,
-		Conf:       info.Conf,
-		Schema:     schema,
-		Stat:       new(Statistics),
-		LogHistory: LogHistory{CreatedIndex: logIdx},
-	}
-	return tbl
-}
+// func createTableHandle(r io.Reader, meta *LogEntryMeta) (LogEntry, int64, error) {
+// 	entry := Table{}
+// 	logEntry
+// 	// entry.Unmarshal()
 
-func (tbl *Table) Marshal() ([]byte, error) {
-	return json.Marshal(tbl)
-}
+// }
 
-func (tbl *Table) Unmarshal(buf []byte) error {
-	return json.Unmarshal(buf, tbl)
+type Table struct {
+	BaseEntry
+	Schema     *Schema
+	SegmentSet []*Segment
+	IdIndex    map[uint64]int `json:"-"`
+	Catalog    *Catalog       `json:"-"`
 }
 
-func (tbl *Table) ReadFrom(r io.Reader) (int64, error) {
-	decoder := json.NewDecoder(r)
-	err := decoder.Decode(tbl)
-	return decoder.InputOffset(), err
-}
-
-func (tbl *Table) GetID() uint64 {
-	return tbl.ID
-}
-
-func (tbl *Table) GetRows() uint64 {
-	ptr := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&tbl.Stat)))
-	return (*Statistics)(ptr).Rows
-}
-
-func (tbl *Table) Less(item btree.Item) bool {
-	return tbl.Schema.Name < (item.(*Table)).Schema.Name
-}
-
-func (tbl *Table) GetReplayIndex() *LogIndex {
-	ptr := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&tbl.ReplayIndex)))
-	if ptr == nil {
+func NewTableEntry(catalog *Catalog, schema *Schema, tranId uint64, exIndex *ExternalIndex) *Table {
+	schema.BlockMaxRows = catalog.Cfg.BlockMaxRows
+	schema.SegmentMaxBlocks = catalog.Cfg.SegmentMaxBlocks
+	e := &Table{
+		BaseEntry: BaseEntry{
+			Id: catalog.NextTableId(),
+			CommitInfo: &CommitInfo{
+				TranId:        tranId,
+				CommitId:      tranId,
+				SSLLNode:      *common.NewSSLLNode(),
+				Op:            OpCreate,
+				ExternalIndex: exIndex,
+			},
+		},
+		Schema:     schema,
+		Catalog:    catalog,
+		SegmentSet: make([]*Segment, 0),
+		IdIndex:    make(map[uint64]int),
+	}
+	return e
+}
+
+func NewEmptyTableEntry(catalog *Catalog) *Table {
+	e := &Table{
+		BaseEntry: BaseEntry{
+			CommitInfo: &CommitInfo{
+				SSLLNode: *common.NewSSLLNode(),
+			},
+		},
+		SegmentSet: make([]*Segment, 0),
+		IdIndex:    make(map[uint64]int),
+		Catalog:    catalog,
+	}
+	return e
+}
+
+// Threadsafe
+// It is used to take a snapshot of table base on a commit id. It goes through
+// the version chain to find a "safe" commit version and create a view base on
+// that version.
+// v2(commitId=7) -> v1(commitId=4) -> v0(commitId=2)
+//      |                 |                  |
+//      |                 |                   -------- CommittedView [0,2]
+//      |                  --------------------------- CommittedView [4,6]
+//       --------------------------------------------- CommittedView [7,+oo)
+func (e *Table) CommittedView(id uint64) *Table {
+	// TODO: if baseEntry op is drop, should introduce an index to
+	// indicate weather to return nil
+	baseEntry := e.UseCommitted(id)
+	if baseEntry == nil {
 		return nil
 	}
-	return (*LogIndex)(ptr)
-}
-
-func (tbl *Table) ResetReplayIndex() {
-	ptr := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&tbl.ReplayIndex)))
-	if ptr == nil {
-		panic("logic error")
-	}
-	var netIndex *LogIndex
-	nptr := (*unsafe.Pointer)(unsafe.Pointer(&netIndex))
-	if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&tbl.ReplayIndex)), ptr, *nptr) {
-		panic("logic error")
+	view := &Table{
+		Schema:     e.Schema,
+		BaseEntry:  *baseEntry,
+		SegmentSet: make([]*Segment, 0),
 	}
-}
-
-func (tbl *Table) AppendStat(rows, size uint64) *Statistics {
-	ptr := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&tbl.Stat)))
-	stat := (*Statistics)(ptr)
-	newStat := new(Statistics)
-	newStat.Rows = stat.Rows + rows
-	newStat.Size = stat.Size + size
-	nptr := (*unsafe.Pointer)(unsafe.Pointer(&newStat))
-	for !atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&tbl.Stat)), ptr, *nptr) {
-		ptr = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&tbl.Stat)))
-		stat = (*Statistics)(ptr)
-		newStat.Rows = stat.Rows + rows
-		newStat.Size = stat.Size + size
+	e.RLock()
+	segs := make([]*Segment, 0, len(e.SegmentSet))
+	for _, seg := range e.SegmentSet {
+		segs = append(segs, seg)
 	}
-	return newStat
-}
-
-func (tbl *Table) GetSize() uint64 {
-	ptr := atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&tbl.Stat)))
-	return (*Statistics)(ptr).Size
-}
-
-func (tbl *Table) CloneSegment(segmentId uint64, ctx CopyCtx) (seg *Segment, err error) {
-	tbl.RLock()
-	seg, err = tbl.referenceSegmentNoLock(segmentId)
-	if err != nil {
-		tbl.RUnlock()
-		return nil, err
-	}
-	tbl.RUnlock()
-	segCpy := seg.Copy(ctx)
-	if !ctx.Attached {
-		err = segCpy.Detach()
+	e.RUnlock()
+	for _, seg := range segs {
+		segView := seg.CommittedView(id)
+		if segView == nil {
+			continue
+		}
+		view.SegmentSet = append(view.SegmentSet, segView)
 	}
-	return segCpy, err
-}
-
-func (tbl *Table) ReferenceBlock(segmentId, blockId uint64) (blk *Block, err error) {
-	tbl.RLock()
-	seg, err := tbl.referenceSegmentNoLock(segmentId)
-	if err != nil {
-		tbl.RUnlock()
-		return nil, err
+	return view
+}
+
+// Not threadsafe, and not needed
+// Only used during data replay by the catalog replayer
+func (e *Table) rebuild(catalog *Catalog) {
+	e.Catalog = catalog
+	e.IdIndex = make(map[uint64]int)
+	for i, seg := range e.SegmentSet {
+		catalog.Sequence.TryUpdateSegmentId(seg.Id)
+		seg.rebuild(e)
+		e.IdIndex[seg.Id] = i
+	}
+}
+
+// Threadsafe
+// It should be applied on a table that was previously soft-deleted
+// It is always driven by engine internal scheduler. It means all the
+// table related data resources were deleted. A hard-deleted table will
+// be deleted from catalog later
+func (e *Table) HardDelete() error {
+	ctx := newDeleteTableCtx(e)
+	return e.Catalog.onCommitRequest(ctx)
+}
+
+func (e *Table) prepareHardDelete(ctx *deleteTableCtx) (LogEntry, error) {
+	cInfo := &CommitInfo{
+		CommitId: e.Catalog.NextUncommitId(),
+		Op:       OpHardDelete,
+		SSLLNode: *common.NewSSLLNode(),
+	}
+	e.Catalog.commitMu.Lock()
+	defer e.Catalog.commitMu.Unlock()
+	e.Lock()
+	defer e.Unlock()
+	if e.IsHardDeletedLocked() {
+		logutil.Warnf("HardDelete %d but already hard deleted", e.Id)
+		return nil, TableNotFoundErr
+	}
+	if !e.IsSoftDeletedLocked() {
+		panic("logic error: Cannot hard delete entry that not soft deleted")
+	}
+	e.onNewCommit(cInfo)
+	logEntry := e.Catalog.prepareCommitEntry(e, ETHardDeleteTable, e)
+	return logEntry, nil
+}
+
+// Simple* wrappes simple usage of wrapped operation
+// It is driven by external command. The engine then schedules a GC task to hard delete
+// related resources.
+func (e *Table) SimpleSoftDelete(exIndex *ExternalIndex) error {
+	ctx := newDropTableCtx(e.Schema.Name, exIndex)
+	ctx.table = e
+	return e.Catalog.onCommitRequest(ctx)
+}
+
+func (e *Table) prepareSoftDelete(ctx *dropTableCtx) (LogEntry, error) {
+	commitId := e.Catalog.NextUncommitId()
+	cInfo := &CommitInfo{
+		TranId:        commitId,
+		CommitId:      commitId,
+		ExternalIndex: ctx.exIndex,
+		Op:            OpSoftDelete,
+		SSLLNode:      *common.NewSSLLNode(),
+	}
+	e.Catalog.commitMu.Lock()
+	defer e.Catalog.commitMu.Unlock()
+	e.Lock()
+	defer e.Unlock()
+	if e.IsSoftDeletedLocked() {
+		return nil, TableNotFoundErr
+	}
+	e.onNewCommit(cInfo)
+	logEntry := e.Catalog.prepareCommitEntry(e, ETSoftDeleteTable, e)
+	return logEntry, nil
+}
+
+// Not safe
+func (e *Table) Marshal() ([]byte, error) {
+	return json.Marshal(e)
+}
+
+// Not safe
+func (e *Table) Unmarshal(buf []byte) error {
+	return json.Unmarshal(buf, e)
+}
+
+// Not safe
+func (e *Table) String() string {
+	buf, _ := e.Marshal()
+	return string(buf)
+}
+
+// Not safe
+// Usually it is used during creating a table. We need to commit the new table entry
+// to the store.
+func (e *Table) ToLogEntry(eType LogEntryType) LogEntry {
+	var buf []byte
+	switch eType {
+	case ETCreateTable:
+		buf, _ = e.Marshal()
+	case ETSoftDeleteTable:
+		if !e.IsSoftDeletedLocked() {
+			panic("logic error")
+		}
+		entry := tableLogEntry{
+			BaseEntry: e.BaseEntry,
+		}
+		buf, _ = entry.Marshal()
+	case ETHardDeleteTable:
+		if !e.IsHardDeletedLocked() {
+			panic("logic error")
+		}
+		entry := tableLogEntry{
+			BaseEntry: e.BaseEntry,
+		}
+		buf, _ = entry.Marshal()
+	default:
+		panic("not supported")
 	}
-	tbl.RUnlock()
-
-	blk, err = seg.ReferenceBlock(blockId)
-
-	return blk, err
+	logEntry := logstore.NewAsyncBaseEntry()
+	logEntry.Meta.SetType(eType)
+	logEntry.Unmarshal(buf)
+	return logEntry
 }
 
-func (tbl *Table) ReferenceSegment(segmentId uint64) (seg *Segment, err error) {
-	tbl.RLock()
-	defer tbl.RUnlock()
-	seg, err = tbl.referenceSegmentNoLock(segmentId)
-	return seg, err
-}
-
-func (tbl *Table) referenceSegmentNoLock(segmentId uint64) (seg *Segment, err error) {
-	idx, ok := tbl.IdMap[segmentId]
-	if !ok {
-		return nil, errors.New(fmt.Sprintf("specified segment %d not found in table %d", segmentId, tbl.ID))
+// Safe
+func (e *Table) SimpleGetCurrSegment() *Segment {
+	e.RLock()
+	if len(e.SegmentSet) == 0 {
+		e.RUnlock()
+		return nil
 	}
-	seg = tbl.Segments[idx]
-	return seg, nil
+	seg := e.SegmentSet[len(e.SegmentSet)-1]
+	e.RUnlock()
+	return seg
 }
 
-func (tbl *Table) GetSegmentBlockIDs(segmentId uint64, args ...int64) map[uint64]uint64 {
-	tbl.RLock()
-	seg, err := tbl.referenceSegmentNoLock(segmentId)
-	tbl.RUnlock()
-	if err != nil {
-		return make(map[uint64]uint64, 0)
+// Not safe and no need
+// Only used during data replay
+// TODO: Only compatible with v1. Remove later
+func (e *Table) GetReplayIndex() *LogIndex {
+	for i := len(e.SegmentSet) - 1; i >= 0; i-- {
+		seg := e.SegmentSet[i]
+		idx := seg.GetReplayIndex()
+		if idx != nil {
+			return idx
+		}
 	}
-	return seg.BlockIDs(args...)
+	return nil
 }
 
-func (tbl *Table) SegmentIDs(args ...int64) map[uint64]uint64 {
-	var ts int64
-	if len(args) == 0 {
-		ts = NowMicro()
-	} else {
-		ts = args[0]
-	}
-	ids := make(map[uint64]uint64)
-	tbl.RLock()
-	defer tbl.RUnlock()
-	for _, seg := range tbl.Segments {
-		if !seg.Select(ts) {
-			continue
+// Safe
+// TODO: Only compatible with v1. Remove later
+func (e *Table) GetAppliedIndex(rwmtx *sync.RWMutex) (uint64, bool) {
+	if rwmtx == nil {
+		e.RLock()
+		defer e.RUnlock()
+	}
+	if e.IsDeletedLocked() {
+		return e.BaseEntry.GetAppliedIndex()
+	}
+	var (
+		id uint64
+		ok bool
+	)
+	for i := len(e.SegmentSet) - 1; i >= 0; i-- {
+		seg := e.SegmentSet[i]
+		id, ok = seg.GetAppliedIndex(nil)
+		if ok {
+			break
 		}
-		ids[seg.ID] = seg.ID
 	}
-	return ids
-}
-
-func (tbl *Table) CreateSegment() (seg *Segment, err error) {
-	seg = NewSegment(tbl, tbl.Info.Sequence.GetSegmentID())
-	return seg, err
-}
-
-func (tbl *Table) NextActiveSegment() *Segment {
-	var seg *Segment
-	if tbl.ActiveSegment >= len(tbl.Segments) {
-		return seg
+	if !ok {
+		return e.BaseEntry.GetAppliedIndex()
 	}
-	tbl.ActiveSegment++
-	return tbl.GetActiveSegment()
+	return id, ok
 }
 
-func (tbl *Table) GetActiveSegment() *Segment {
-	if tbl.ActiveSegment >= len(tbl.Segments) {
-		return nil
+// Not safe. One writer, multi-readers
+func (e *Table) SimpleCreateBlock() (*Block, *Segment) {
+	var prevSeg *Segment
+	currSeg := e.SimpleGetCurrSegment()
+	if currSeg == nil || currSeg.HasMaxBlocks() {
+		prevSeg = currSeg
+		currSeg = e.SimpleCreateSegment()
 	}
-	seg := tbl.Segments[tbl.ActiveSegment]
-	blk := seg.GetActiveBlk()
-	if blk == nil && uint64(len(seg.Blocks)) == tbl.Info.Conf.SegmentMaxBlocks {
-		return nil
-	}
-	return seg
+	blk := currSeg.SimpleCreateBlock()
+	return blk, prevSeg
 }
 
-func (tbl *Table) GetInfullSegment() (seg *Segment, err error) {
-	tbl.RLock()
-	defer tbl.RUnlock()
-	for _, seg := range tbl.Segments {
-		if seg.DataState == EMPTY || seg.DataState == PARTIAL {
-			return seg, nil
-		}
+func (e *Table) getFirstInfullSegment(from *Segment) (*Segment, *Segment) {
+	if len(e.SegmentSet) == 0 {
+		return nil, nil
 	}
-	return nil, errors.New(fmt.Sprintf("no infull segment found in table %d", tbl.ID))
-}
-
-func (tbl *Table) String() string {
-	s := fmt.Sprintf("Tbl(%d) %d", tbl.ID, tbl.ActiveSegment)
-	s += "["
-	for i, seg := range tbl.Segments {
-		if i != 0 {
-			s += "\n"
+	var curr, next *Segment
+	for i := len(e.SegmentSet) - 1; i >= 0; i-- {
+		seg := e.SegmentSet[i]
+		if seg.Appendable() && from.LE(seg) {
+			curr, next = seg, curr
+		} else {
+			break
 		}
-		s += seg.String()
 	}
-	if len(tbl.Segments) > 0 {
-		s += "\n"
-	}
-	s += "]"
-	return s
+	return curr, next
 }
 
-func (tbl *Table) RegisterSegment(seg *Segment) error {
-	if tbl.ID != seg.GetTableID() {
-		return errors.New(fmt.Sprintf("table id mismatch %d:%d", tbl.ID, seg.GetTableID()))
+// Not safe. One writer, multi-readers
+func (e *Table) SimpleGetOrCreateNextBlock(from *Block) *Block {
+	var fromSeg *Segment
+	if from != nil {
+		fromSeg = from.Segment
 	}
-	tbl.Lock()
-	defer tbl.Unlock()
-
-	err := seg.Attach()
-	if err != nil {
-		return err
+	curr, next := e.getFirstInfullSegment(fromSeg)
+	// logutil.Infof("%s, %s", curr.PString(PPL0), fromSeg.PString(PPL1))
+	if curr == nil {
+		curr = e.SimpleCreateSegment()
 	}
-
-	_, ok := tbl.IdMap[seg.ID]
-	if ok {
-		return errors.New(fmt.Sprintf("Duplicate segment %d found in table %d", seg.GetID(), tbl.ID))
+	blk := curr.SimpleGetOrCreateNextBlock(from)
+	if blk != nil {
+		return blk
 	}
-	tbl.IdMap[seg.GetID()] = len(tbl.Segments)
-	tbl.Segments = append(tbl.Segments, seg)
-	atomic.StoreUint64(&tbl.SegmentCnt, uint64(len(tbl.Segments)))
-	tbl.UpdateVersion()
-	return nil
+	if next == nil {
+		next = e.SimpleCreateSegment()
+	}
+	return next.SimpleGetOrCreateNextBlock(nil)
 }
 
-func (tbl *Table) GetSegmentCount() uint64 {
-	return atomic.LoadUint64(&tbl.SegmentCnt)
+func (e *Table) SimpleCreateSegment() *Segment {
+	ctx := newCreateSegmentCtx(e)
+	if err := e.Catalog.onCommitRequest(ctx); err != nil {
+		return nil
+	}
+	return ctx.segment
 }
 
-func (tbl *Table) GetMaxSegIDAndBlkID() (uint64, uint64) {
-	blkid := uint64(0)
-	segid := uint64(0)
-	for _, seg := range tbl.Segments {
-		sid := seg.GetID()
-		maxBlkId := seg.GetMaxBlkID()
-		if maxBlkId > blkid {
-			blkid = maxBlkId
-		}
-		if sid > segid {
-			segid = sid
-		}
+// Safe
+func (e *Table) SimpleGetSegmentIds() []uint64 {
+	e.RLock()
+	defer e.RUnlock()
+	arrLen := len(e.SegmentSet)
+	ret := make([]uint64, arrLen)
+	for i, seg := range e.SegmentSet {
+		ret[i] = seg.Id
 	}
-
-	return segid, blkid
+	return ret
 }
 
-func (tbl *Table) UpdateVersion() {
-	atomic.AddUint64(&tbl.CheckPoint, uint64(1))
+// Safe
+func (e *Table) SimpleGetSegmentCount() int {
+	e.RLock()
+	defer e.RUnlock()
+	return len(e.SegmentSet)
 }
 
-func (tbl *Table) GetFileName() string {
-	return fmt.Sprintf("%d_v%d", tbl.ID, tbl.CheckPoint)
+func (e *Table) prepareCreateSegment(ctx *createSegmentCtx) (LogEntry, error) {
+	se := newSegmentEntry(e.Catalog, e, e.Catalog.NextUncommitId(), ctx.exIndex)
+	logEntry := se.ToLogEntry(ETCreateSegment)
+	e.Catalog.commitMu.Lock()
+	defer e.Catalog.commitMu.Unlock()
+	e.Lock()
+	e.onNewSegment(se)
+	e.Unlock()
+	e.Catalog.prepareCommitLog(se, logEntry)
+	ctx.segment = se
+	return logEntry, nil
 }
 
-func (tbl *Table) GetLastFileName() string {
-	return fmt.Sprintf("%d_v%d", tbl.ID, tbl.CheckPoint-1)
+func (e *Table) onNewSegment(entry *Segment) {
+	e.IdIndex[entry.Id] = len(e.SegmentSet)
+	e.SegmentSet = append(e.SegmentSet, entry)
 }
 
-func (tbl *Table) Serialize(w io.Writer) error {
-	bytes, err := tbl.Marshal()
-	if err != nil {
-		return err
+// Safe
+func (e *Table) SimpleGetBlock(segId, blkId uint64) (*Block, error) {
+	seg := e.SimpleGetSegment(segId)
+	if seg == nil {
+		return nil, SegmentNotFoundErr
 	}
-	_, err = w.Write(bytes)
-	return err
-}
-
-func (tbl *Table) GetResourceType() ResourceType {
-	return ResTable
-}
-
-func (tbl *Table) GetTableId() uint64 {
-	return tbl.ID
+	blk := seg.SimpleGetBlock(blkId)
+	if blk == nil {
+		return nil, BlockNotFoundErr
+	}
+	return blk, nil
 }
 
-func (tbl *Table) LiteCopy() *Table {
-	newTbl := &Table{
-		ID:         tbl.ID,
-		TimeStamp:  tbl.TimeStamp,
-		LogHistory: tbl.LogHistory,
-	}
-	return newTbl
+// Safe
+func (e *Table) SimpleGetSegment(id uint64) *Segment {
+	e.RLock()
+	defer e.RUnlock()
+	return e.GetSegment(id, MinUncommitId)
 }
 
-func (tbl *Table) Copy(ctx CopyCtx) *Table {
-	if ctx.Ts == 0 {
-		ctx.Ts = NowMicro()
-	}
-	newTbl := NewTable(tbl.CreatedIndex, tbl.Info, tbl.Schema, tbl.ID)
-	newTbl.TimeStamp = tbl.TimeStamp
-	newTbl.CheckPoint = tbl.CheckPoint
-	newTbl.BoundSate = tbl.BoundSate
-	newTbl.LogHistory = tbl.LogHistory
-	newTbl.Conf = tbl.Conf
-	for _, v := range tbl.Segments {
-		if !v.Select(ctx.Ts) {
-			continue
-		}
-		seg, _ := tbl.CloneSegment(v.ID, ctx)
-		newTbl.IdMap[seg.GetID()] = len(newTbl.Segments)
-		newTbl.Segments = append(newTbl.Segments, seg)
+func (e *Table) GetSegment(id, tranId uint64) *Segment {
+	pos, ok := e.IdIndex[id]
+	if !ok {
+		return nil
 	}
-	newTbl.SegmentCnt = uint64(len(newTbl.Segments))
-
-	return newTbl
+	entry := e.SegmentSet[pos]
+	return entry
 }
 
-func (tbl *Table) Replay() {
-	ts := NowMicro()
-	if len(tbl.Schema.Indices) > 0 {
-		if tbl.Schema.Indices[len(tbl.Schema.Indices)-1].ID > tbl.Info.Sequence.NextIndexID {
-			tbl.Info.Sequence.NextIndexID = tbl.Schema.Indices[len(tbl.Schema.Indices)-1].ID
-		}
-	}
-	maxTblSegId, maxTblBlkId := tbl.GetMaxSegIDAndBlkID()
-	if tbl.ID > tbl.Info.Sequence.NextTableID {
-		tbl.Info.Sequence.NextTableID = tbl.ID
-	}
-	if maxTblSegId > tbl.Info.Sequence.NextSegmentID {
-		tbl.Info.Sequence.NextSegmentID = maxTblSegId
-	}
-	if maxTblBlkId > tbl.Info.Sequence.NextBlockID {
-		tbl.Info.Sequence.NextBlockID = maxTblBlkId
-	}
-	if tbl.IsDeleted(ts) {
-		tbl.Info.Tombstone[tbl.ID] = true
-	} else {
-		tbl.Info.TableIds[tbl.ID] = true
-		tbl.Info.NameMap[tbl.Schema.Name] = tbl.ID
-		tbl.Info.NameTree.ReplaceOrInsert(tbl)
-	}
-	tbl.IdMap = make(map[uint64]int)
-	segFound := false
-	for idx, seg := range tbl.Segments {
-		tbl.IdMap[seg.GetID()] = idx
-		seg.Table = tbl
-		blkFound := false
-		for iblk, blk := range seg.Blocks {
-			if !blkFound {
-				if blk.DataState < FULL {
-					blkFound = true
-					seg.ActiveBlk = iblk
-				} else {
-					seg.ActiveBlk++
-				}
-			}
-			blk.Segment = seg
-		}
-		if !segFound {
-			if seg.DataState < FULL {
-				segFound = true
-				tbl.ActiveSegment = idx
-			} else if seg.DataState == FULL {
-				blk := seg.GetActiveBlk()
-				if blk != nil {
-					tbl.ActiveSegment = idx
-					segFound = true
-				}
-			} else {
-				tbl.ActiveSegment++
-			}
+// Not safe
+func (e *Table) PString(level PPLevel) string {
+	s := fmt.Sprintf("<Table[%s]>(%s)(Cnt=%d)", e.Schema.Name, e.BaseEntry.PString(level), len(e.SegmentSet))
+	if level > PPL0 && len(e.SegmentSet) > 0 {
+		s = fmt.Sprintf("%s{", s)
+		for _, seg := range e.SegmentSet {
+			s = fmt.Sprintf("%s\n%s", s, seg.PString(level))
 		}
+		s = fmt.Sprintf("%s\n}", s)
 	}
+	return s
 }
 
-func MockTable(info *MetaInfo, schema *Schema, blkCnt uint64) *Table {
+func MockTable(catalog *Catalog, schema *Schema, blkCnt uint64, idx *LogIndex) *Table {
 	if schema == nil {
 		schema = MockSchema(2)
 	}
-	tbl, _ := info.CreateTable(atomic.AddUint64(&GlobalSeqNum, uint64(1)), schema)
-	if err := info.RegisterTable(tbl); err != nil {
+	if idx == nil {
+		idx = &LogIndex{
+			Id: SimpleBatchId(common.NextGlobalSeqNum()),
+		}
+	}
+	tbl, err := catalog.SimpleCreateTable(schema, idx)
+	if err != nil {
 		panic(err)
 	}
+
 	var activeSeg *Segment
 	for i := uint64(0); i < blkCnt; i++ {
 		if activeSeg == nil {
-			activeSeg, _ = tbl.CreateSegment()
-			if err := tbl.RegisterSegment(activeSeg); err != nil {
-				panic(err)
-			}
-		}
-		blk, _ := activeSeg.CreateBlock()
-		err := activeSeg.RegisterBlock(blk)
-		if err != nil {
-			logutil.Errorf("seg blks = %d, maxBlks = %d", len(activeSeg.Blocks), activeSeg.MaxBlockCount)
-			panic(err)
+			activeSeg = tbl.SimpleCreateSegment()
 		}
-		if len(activeSeg.Blocks) == int(info.Conf.SegmentMaxBlocks) {
+		activeSeg.SimpleCreateBlock()
+		if len(activeSeg.BlockSet) == int(tbl.Schema.SegmentMaxBlocks) {
 			activeSeg = nil
 		}
 	}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/table_test.go b/pkg/vm/engine/aoe/storage/metadata/v1/table_test.go
deleted file mode 100644
index ad1b88871c7a9789b2434ea99a3b8330d592e8c7..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v1/table_test.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metadata
-
-import (
-	"bytes"
-	"encoding/json"
-	"matrixone/pkg/vm/engine/aoe/storage/dbi"
-	"sync"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func TestTable(t *testing.T) {
-	mu := &sync.RWMutex{}
-	info := MockInfo(mu, blockRowCount, segmentBlockCount)
-	tbl1 := NewTable(NextGlobalSeqNum(), info, MockSchema(2))
-	tbl1.Schema.Name = "tbl1"
-	assert.Nil(t, info.RegisterTable(tbl1))
-	tbl2 := NewTable(NextGlobalSeqNum(), info, MockSchema(2), uint64(999))
-	tbl2.Schema.Name = "tbl2"
-	assert.Nil(t, info.RegisterTable(tbl2))
-	assert.Equal(t, tbl2.GetID(), uint64(999))
-	schema := MockSchema(2)
-	schema.Name = "tbl3"
-	tbl3 := MockTable(info, schema, segmentBlockCount*10)
-	tbl4 := MockTable(info, nil, segmentBlockCount)
-	for i := 1; i < 11; i++ {
-		assert.Equal(t, uint64(i), tbl3.GetActiveSegment().GetID())
-		tbl3.NextActiveSegment()
-	}
-	assert.Equal(t, tbl4.GetTableId(), uint64(3))
-
-	assert.Nil(t, tbl1.GetReplayIndex())
-	assert.Panics(t, func() {
-		tbl1.ResetReplayIndex()
-	})
-	tbl1.ReplayIndex = &LogIndex{}
-	assert.NotNil(t, tbl1.GetReplayIndex())
-	assert.NotPanics(t, func() {
-		tbl1.ResetReplayIndex()
-	})
-	var wg sync.WaitGroup
-	for i := 0; i < 1000; i++ {
-		wg.Add(1)
-		go func() {
-			tbl1.AppendStat(1, 1)
-			wg.Done()
-		}()
-	}
-	wg.Wait()
-	assert.Equal(t, tbl1.GetRows(), uint64(1000))
-	assert.Equal(t, tbl1.GetSize(), uint64(1000))
-
-	assert.Nil(t, tbl2.GetActiveSegment())
-	assert.Nil(t, tbl2.NextActiveSegment())
-
-	seg1, err := tbl2.CreateSegment()
-	assert.Nil(t, err)
-	assert.Nil(t, tbl2.RegisterSegment(seg1))
-	_, err = tbl2.ReferenceSegment(seg1.GetID() + 1)
-	assert.NotNil(t, err)
-	_, err = tbl2.CloneSegment(seg1.GetID()+1, CopyCtx{})
-	assert.NotNil(t, err)
-	_, err = tbl2.ReferenceBlock(seg1.GetID()+1, 0)
-	assert.NotNil(t, err)
-	assert.Equal(t, 0, len(tbl2.GetSegmentBlockIDs(seg1.GetID()+1)))
-	assert.Equal(t, 0, len(tbl2.GetSegmentBlockIDs(seg1.GetID())))
-	_, err = tbl2.ReferenceSegment(seg1.GetID())
-	assert.Nil(t, err)
-	assert.Equal(t, uint64(1), tbl2.GetSegmentCount())
-	assert.NotNil(t, tbl2.RegisterSegment(seg1))
-	assert.NotNil(t, tbl2.RegisterSegment(&Segment{Table: &Table{ID: uint64(1000)}}))
-	assert.NotNil(t, tbl2.RegisterSegment(&Segment{BoundSate: Attached, Table: &Table{ID: uint64(999)}}))
-	assert.NotNil(t, tbl2.RegisterSegment(&Segment{ID: seg1.GetID(), Table: &Table{ID: uint64(999)}}))
-	assert.Equal(t, "Tbl(999) 0[Seg(999-12) [blkPos=0][State=0][]\n]", tbl2.String())
-	ts1 := NowMicro()
-	seg2, err := tbl2.CreateSegment()
-	assert.Nil(t, err)
-	assert.Nil(t, tbl2.RegisterSegment(seg2))
-	assert.Equal(t, "Tbl(999) 0[Seg(999-12) [blkPos=0][State=0][]\nSeg(999-13) [blkPos=0][State=0][]\n]", tbl2.String())
-	time.Sleep(time.Microsecond)
-	assert.Equal(t, 2, len(tbl2.SegmentIDs()))
-	assert.Equal(t, 2, len(tbl2.SegmentIDs(NowMicro())))
-	assert.Equal(t, 1, len(tbl2.SegmentIDs(ts1)))
-	for i := 0; i < int(segmentBlockCount); i++ {
-		blk, err := seg2.CreateBlock()
-		assert.Nil(t, err)
-		assert.Nil(t, seg2.RegisterBlock(blk))
-		seg2.NextActiveBlk()
-	}
-	assert.Nil(t, tbl2.NextActiveSegment())
-	tbl2_ := tbl2.Copy(CopyCtx{})
-	assert.Equal(t, tbl2_.GetID(), tbl2.GetID())
-	tbl2_ = tbl2.Copy(CopyCtx{Ts: ts1})
-	assert.Equal(t, uint64(1), tbl2_.GetSegmentCount())
-	assert.Equal(t, tbl2.GetID(), tbl2.LiteCopy().GetID())
-	s, err := tbl2.GetInfullSegment()
-	assert.Equal(t, uint64(12), s.GetID())
-	ms, mb := tbl2.GetMaxSegIDAndBlkID()
-	assert.Equal(t, uint64(13), ms)
-	assert.Equal(t, uint64(12*4), mb)
-	tbl2.Replay() // TODO: test replay
-	assert.Equal(t, "999_v6", tbl2.GetFileName())
-	assert.Equal(t, "999_v5", tbl2.GetLastFileName())
-	assert.Nil(t, tbl2.Serialize(bytes.NewBuffer(make([]byte, 100))))
-	n, err := tbl2.Marshal()
-	assert.Nil(t, err)
-	var tbl5 Table
-	assert.Nil(t, tbl5.Unmarshal(n))
-	assert.Equal(t, tbl5.GetID(), tbl2.GetID())
-	buf := bytes.NewBuffer(n)
-	var tbl6 Table
-	_, err = tbl6.ReadFrom(buf)
-	assert.Nil(t, err)
-	assert.Equal(t, tbl5.GetID(), tbl6.GetID())
-}
-
-func TestCreateDropTable(t *testing.T) {
-	colCnt := 2
-	tblInfo := MockTableInfo(colCnt)
-
-	mu := &sync.RWMutex{}
-	info := MockInfo(mu, blockRowCount, segmentBlockCount)
-	info.Conf.Dir = "/tmp"
-	tbl, err := info.CreateTableFromTableInfo(tblInfo, dbi.TableOpCtx{TableName: tblInfo.Name, OpIndex: NextGlobalSeqNum()})
-	assert.Nil(t, err)
-	assert.Equal(t, tblInfo.Name, tbl.Schema.Name)
-
-	assert.Equal(t, len(tblInfo.Indices), len(tbl.Schema.Indices))
-	for idx, indexInfo := range tblInfo.Indices {
-		assert.Equal(t, indexInfo.Type, uint64(tbl.Schema.Indices[idx].Type))
-		for iidx := range indexInfo.Columns {
-			assert.Equal(t, indexInfo.Columns[iidx], uint64(tbl.Schema.Indices[idx].Columns[iidx]))
-		}
-	}
-	for idx, colInfo := range tblInfo.Columns {
-		assert.Equal(t, colInfo.Type, tbl.Schema.ColDefs[idx].Type)
-		assert.Equal(t, colInfo.Name, tbl.Schema.ColDefs[idx].Name)
-		assert.Equal(t, idx, tbl.Schema.ColDefs[idx].Idx)
-	}
-
-	rTbl, err := info.ReferenceTableByName(tbl.Schema.Name)
-	assert.Nil(t, err)
-	assert.NotNil(t, rTbl)
-
-	ts := NowMicro()
-	assert.False(t, rTbl.IsDeleted(ts))
-
-	tid, err := info.SoftDeleteTable(tbl.Schema.Name, NextGlobalSeqNum())
-	assert.Nil(t, err)
-	assert.Equal(t, rTbl.ID, tid)
-
-	_, err = info.SoftDeleteTable(tbl.Schema.Name, NextGlobalSeqNum())
-	assert.NotNil(t, err)
-
-	rTbl2, err := info.ReferenceTableByName(tbl.Schema.Name)
-	assert.NotNil(t, err)
-	assert.Nil(t, rTbl2)
-
-	ts = NowMicro()
-	assert.True(t, rTbl.IsDeleted(ts))
-
-	rTbl3, err := info.ReferenceTable(tid)
-	assert.Nil(t, err)
-	assert.Equal(t, rTbl3.ID, tid)
-	assert.True(t, rTbl3.IsDeleted(ts))
-
-	_, err = rTbl3.Marshal()
-	assert.Nil(t, err)
-	//t.Log(string(tblBytes))
-
-	infoBytes, err := json.Marshal(info)
-	assert.Nil(t, err)
-	//t.Log(string(infoBytes))
-
-	newInfo := new(MetaInfo)
-	err = newInfo.Unmarshal(infoBytes)
-	assert.Nil(t, err)
-	assert.Equal(t, newInfo.Tables[tid].ID, tid)
-	assert.Equal(t, newInfo.Tables[tid].TimeStamp, rTbl3.TimeStamp)
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v1/types.go b/pkg/vm/engine/aoe/storage/metadata/v1/types.go
index f5ef4b0621115e8d0f771f5c48ceaef8bdfd6a0f..0ccecbab026805480e2ecae40bdf0360b6fc25c0 100644
--- a/pkg/vm/engine/aoe/storage/metadata/v1/types.go
+++ b/pkg/vm/engine/aoe/storage/metadata/v1/types.go
@@ -10,216 +10,214 @@
 // distributed under the License is distributed on an "AS IS" BASIS,
 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 // See the License for the specific language governing permissions and
-// limitations under the License.
 
 package metadata
 
 import (
-	"io"
+	"errors"
+	"fmt"
 	"matrixone/pkg/container/types"
 	"matrixone/pkg/vm/engine/aoe/storage/common"
-	"sync"
-
-	"github.com/google/btree"
+	"sync/atomic"
 )
 
 const (
-	MAX_SEGMENTID = common.MAX_UINT64
-	MAX_TABLEID   = common.MAX_UINT64
+	MinUncommitId = ^uint64(0) / 2
 )
 
-// for test use
-const (
-	blockRowCount     = uint64(16)
-	segmentBlockCount = uint64(4)
-)
+var uncommitId = MinUncommitId
 
-// Resource is an abstraction for two key types of resources
-// currently, MetaInfo and Table.
-type Resource interface {
-	GetResourceType() ResourceType
-	GetFileName() string
-	GetLastFileName() string
-	Serialize(io.Writer) error
-	GetTableId() uint64
+func nextUncommitId() uint64 {
+	return atomic.AddUint64(&uncommitId, uint64(1)) - 1
 }
 
-type ResourceType uint8
+func IsTransientCommitId(id uint64) bool {
+	return id >= MinUncommitId
+}
+
+type State = uint8
 
 const (
-	ResInfo ResourceType = iota
-	ResTable
+	STInited State = iota
+	STFull
+	STClosed
+	STSorted
 )
 
-// IndexType tells the type of the index in schema.
-type IndexType uint16
+type OpT uint8
 
 const (
-	ZoneMap IndexType = iota
-	NumBsi
-	FixStrBsi
+	OpReserved OpT = iota
+	OpCreate
+	OpUpgradeFull
+	OpUpgradeClose
+	OpUpgradeSorted
+	OpSoftDelete
+	OpHardDelete
 )
 
-type LogBatchId struct {
-	Id     uint64
-	Offset uint32
-	Size   uint32
+var OpNames = map[OpT]string{
+	OpCreate:        "Create",
+	OpUpgradeFull:   "UpgradeFull",
+	OpUpgradeClose:  "UpgradeClose",
+	OpUpgradeSorted: "UpgradeSorted",
+	OpSoftDelete:    "SoftDelete",
+	OpHardDelete:    "HardDelete",
+}
+
+func OpName(op OpT) string {
+	return OpNames[op]
+}
+
+type CommitInfo struct {
+	common.SSLLNode `json:"-"`
+	CommitId        uint64
+	TranId          uint64
+	Op              OpT
+	ExternalIndex   *ExternalIndex
+	PrevIndex       *ExternalIndex
+	AppliedIndex    *ExternalIndex
+}
+
+func (info *CommitInfo) IsHardDeleted() bool {
+	return info.Op == OpHardDelete
+}
+
+func (info *CommitInfo) IsSoftDeleted() bool {
+	return info.Op == OpSoftDelete
+}
+
+func (info *CommitInfo) PString(level PPLevel) string {
+	s := fmt.Sprintf("CInfo: ")
+	var curr, prev common.ISSLLNode
+	curr = info
+	for curr != nil {
+		if prev != nil {
+			s = fmt.Sprintf("%s -> ", s)
+		}
+		cInfo := curr.(*CommitInfo)
+		s = fmt.Sprintf("%s(%s,%d", s, OpName(cInfo.Op), cInfo.CommitId)
+		if level >= PPL1 {
+			id, _ := info.GetAppliedIndex()
+			s = fmt.Sprintf("%s,%d-%s)", s, id, cInfo.ExternalIndex.String())
+		} else {
+			s = fmt.Sprintf("%s)", s)
+		}
+		// s = fmt.Sprintf("%s(%s,%d,%d)", s, OpName(info.Op), info.TranId-MinUncommitId, info.CommitId)
+		prev = curr
+		curr = curr.GetNext()
+	}
+	return s
+}
+
+// TODO: remove it. Not be used later
+func (info *CommitInfo) GetAppliedIndex() (uint64, bool) {
+	if info.AppliedIndex != nil {
+		return info.AppliedIndex.Id.Id, true
+	}
+	if info.ExternalIndex != nil && info.ExternalIndex.IsBatchApplied() {
+		return info.ExternalIndex.Id.Id, true
+	}
+
+	if info.PrevIndex != nil && info.PrevIndex.IsBatchApplied() {
+		return info.PrevIndex.Id.Id, true
+	}
+	return 0, false
+}
+
+// SetIndex changes the current index to previous index if exists, and
+// sets the current index to idx.
+func (info *CommitInfo) SetIndex(idx LogIndex) error {
+	if info.ExternalIndex != nil {
+		if !info.ExternalIndex.IsApplied() {
+			return errors.New(fmt.Sprintf("already has applied index: %d", info.ExternalIndex.Id))
+		}
+		info.PrevIndex = info.ExternalIndex
+		info.ExternalIndex = &idx
+	} else {
+		if info.PrevIndex != nil {
+			return errors.New(fmt.Sprintf("no index but has prev index: %d", info.PrevIndex.Id))
+		}
+		info.ExternalIndex = &idx
+	}
+	return nil
 }
 
-// LogIndex records some block related info.
-// Used for replay.
-type LogIndex struct {
-	ID       LogBatchId
-	Start    uint64
-	Count    uint64
-	Capacity uint64
+type Sequence struct {
+	nextTableId   uint64
+	nextSegmentId uint64
+	nextBlockId   uint64
+	nextCommitId  uint64
+	nextIndexId   uint64
 }
 
-type LogHistory struct {
-	CreatedIndex uint64
-	DeletedIndex uint64
-	AppliedIndex uint64
+func (s *Sequence) NextTableId() uint64 {
+	return atomic.AddUint64(&s.nextTableId, uint64(1))
 }
 
-// TimeStamp contains the C/U/D time of a ts.
-type TimeStamp struct {
-	CreatedOn int64
-	UpdatedOn int64
-	DeletedOn int64
+func (s *Sequence) NextSegmentId() uint64 {
+	return atomic.AddUint64(&s.nextSegmentId, uint64(1))
 }
 
-type BoundSate uint8
+func (s *Sequence) NextBlockId() uint64 {
+	return atomic.AddUint64(&s.nextBlockId, uint64(1))
+}
 
-const (
-	Standalone BoundSate = iota
-	Attached
-	Detached
-)
+func (s *Sequence) NextCommitId() uint64 {
+	return atomic.AddUint64(&s.nextCommitId, uint64(1))
+}
 
-// DataState is the general representation for Block and Segment.
-// On its changing, some operations like flush would be triggered.
-type DataState = uint8
+func (s *Sequence) NextIndexId() uint64 {
+	return atomic.AddUint64(&s.nextIndexId, uint64(1))
+}
 
-const (
-	EMPTY   DataState = iota
-	PARTIAL           // Block: 0 < Count < MaxRowCount, Segment: 0 < len(Blocks) < MaxBlockCount
-	FULL              // Block: Count == MaxRowCount, Segment: len(Blocks) == MaxBlockCount
-	CLOSED            // Segment only. Already FULL and all blocks are FULL
-	SORTED            // Segment only. Merge sorted
-)
+func (s *Sequence) NextUncommitId() uint64 {
+	return nextUncommitId()
+}
 
-// Block contains metadata for block.
-type Block struct {
-	sync.RWMutex
-	BoundSate
-	TimeStamp
-	ID          uint64
-	MaxRowCount uint64
-	Count       uint64
-	Index       *LogIndex
-	PrevIndex   *LogIndex
-	DataState   DataState
-	Segment     *Segment `json:"-"`
+func (s *Sequence) TryUpdateTableId(id uint64) {
+	if s.nextTableId < id {
+		s.nextTableId = id
+	}
 }
 
-type Sequence struct {
-	NextBlockID     uint64
-	NextSegmentID   uint64
-	NextPartitionID uint64
-	NextTableID     uint64
-	NextIndexID     uint64
-}
-
-// Segment contains metadata for segment.
-type Segment struct {
-	BoundSate
-	sync.RWMutex
-	TimeStamp
-	ID            uint64
-	MaxBlockCount uint64
-	Blocks        []*Block
-	ActiveBlk     int
-	IdMap         map[uint64]int
-	DataState     DataState
-	Table         *Table `json:"-"`
-}
-
-// ColDef defines a column in schema.
-type ColDef struct {
-	// Column name
-	Name string
-	// Column index in schema
-	Idx int
-	// Column type
-	Type types.Type
-}
-
-// Schema is in representation of a table schema.
-type Schema struct {
-	// Table name
-	Name string
-	// Indices' info
-	Indices []*IndexInfo
-	// Column definitions
-	ColDefs []*ColDef
-	// Column name -> column index mapping
-	NameIdMap map[string]int
-}
-
-// IndexInfo contains metadata for an index.
-type IndexInfo struct {
-	Type IndexType
-	// Columns that the index works on
-	Columns []uint16
-	ID      uint64
-}
-
-type Statistics struct {
-	Rows uint64
-	Size uint64
-}
-
-// Table contains metadata for a table.
-type Table struct {
-	BoundSate
-	sync.RWMutex
-	TimeStamp
-	LogHistory
-	ID            uint64
-	Segments      []*Segment
-	SegmentCnt    uint64
-	ActiveSegment int            `json:"-"`
-	IdMap         map[uint64]int `json:"-"`
-	Info          *MetaInfo      `json:"-"`
-	Stat          *Statistics    `json:"-"`
-	ReplayIndex   *LogIndex      `json:"-"`
-	Schema        *Schema
-	Conf          *Configuration
-	CheckPoint    uint64
-}
-
-// Configuration contains some basic configs for global DB.
-type Configuration struct {
-	Dir              string
-	BlockMaxRows     uint64 `toml:"block-max-rows"`
-	SegmentMaxBlocks uint64 `toml:"segment-max-blocks"`
-}
-
-// MetaInfo contains some basic metadata for global DB.
-type MetaInfo struct {
-	*sync.RWMutex
-	Sequence   Sequence       `json:"-"`
-	Conf       *Configuration `json:"-"`
-	CheckPoint uint64
-	Tables     map[uint64]*Table
-	TableIds   map[uint64]bool   `json:"-"`
-	NameMap    map[string]uint64 `json:"-"`
-	NameTree   *btree.BTree      `json:"-"`
-	Tombstone  map[uint64]bool   `json:"-"`
-	CkpTime    int64
-}
-
-type CopyCtx struct {
-	Ts       int64
-	Attached bool
+func (s *Sequence) TryUpdateCommitId(id uint64) {
+	if s.nextCommitId < id {
+		s.nextCommitId = id
+	}
+}
+
+func (s *Sequence) TryUpdateSegmentId(id uint64) {
+	if s.nextSegmentId < id {
+		s.nextSegmentId = id
+	}
+}
+
+func (s *Sequence) TryUpdateBlockId(id uint64) {
+	if s.nextBlockId < id {
+		s.nextBlockId = id
+	}
+}
+
+func (s *Sequence) TryUpdateIndexId(id uint64) {
+	if s.nextIndexId < id {
+		s.nextIndexId = id
+	}
+}
+
+func EstimateColumnBlockSize(colIdx int, meta *Block) uint64 {
+	switch meta.Segment.Table.Schema.ColDefs[colIdx].Type.Oid {
+	case types.T_json, types.T_char, types.T_varchar:
+		return meta.Segment.Table.Schema.BlockMaxRows * 2 * 4
+	default:
+		return meta.Segment.Table.Schema.BlockMaxRows * uint64(meta.Segment.Table.Schema.ColDefs[colIdx].Type.Size)
+	}
+}
+
+func EstimateBlockSize(meta *Block) uint64 {
+	size := uint64(0)
+	for colIdx, _ := range meta.Segment.Table.Schema.ColDefs {
+		size += EstimateColumnBlockSize(colIdx, meta)
+	}
+	return size
 }
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/base.go b/pkg/vm/engine/aoe/storage/metadata/v2/base.go
deleted file mode 100644
index 1199d9d2eea8edac2b802a543c7f6358110dba56..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v2/base.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-
-package metadata
-
-import (
-	"fmt"
-	"matrixone/pkg/vm/engine/aoe/storage/common"
-	"sync"
-)
-
-type PPLevel uint8
-
-const (
-	PPL0 PPLevel = iota
-	PPL1
-	PPL2
-)
-
-type BaseEntry struct {
-	sync.RWMutex
-	Id         uint64
-	CommitInfo *CommitInfo
-}
-
-func (e *BaseEntry) GetFirstCommit() *CommitInfo {
-	prev := e.CommitInfo
-	curr := prev.GetNext()
-	for curr != nil {
-		prev = curr.(*CommitInfo)
-		curr = curr.GetNext()
-	}
-	return prev
-}
-
-func (e *BaseEntry) GetCommit() *CommitInfo {
-	e.RLock()
-	defer e.RUnlock()
-	return e.CommitInfo
-}
-
-// Should be guarded
-func (e *BaseEntry) IsFull() bool {
-	return e.CommitInfo.Op == OpUpgradeFull
-}
-
-// Should be guarded
-func (e *BaseEntry) IsClose() bool {
-	return e.CommitInfo.Op == OpUpgradeClose
-}
-
-// Should be guarded
-func (e *BaseEntry) IsSorted() bool {
-	return e.CommitInfo.Op == OpUpgradeSorted
-}
-
-func (e *BaseEntry) onNewCommit(info *CommitInfo) {
-	info.SetNext(e.CommitInfo)
-	e.CommitInfo = info
-}
-
-func (e *BaseEntry) PString(level PPLevel) string {
-	s := fmt.Sprintf("Id=%d,%s", e.Id, e.CommitInfo.PString(level))
-	return s
-}
-
-func (e *BaseEntry) GetAppliedIndex() (uint64, bool) {
-	curr := e.CommitInfo
-	id, ok := curr.GetAppliedIndex()
-	if ok {
-		return id, ok
-	}
-	next := curr.GetNext()
-	for next != nil {
-		id, ok = next.(*CommitInfo).GetAppliedIndex()
-		if ok {
-			return id, ok
-		}
-		next = next.GetNext()
-	}
-	return id, ok
-}
-
-// Guarded by entry mutex
-func (e *BaseEntry) HasCommittedLocked() bool {
-	return !IsTransientCommitId(e.CommitInfo.CommitId)
-}
-
-func (e *BaseEntry) HasCommitted() bool {
-	e.RLock()
-	defer e.RUnlock()
-	return !IsTransientCommitId(e.CommitInfo.CommitId)
-}
-
-func (e *BaseEntry) CanUse(tranId uint64) bool {
-	e.RLock()
-	defer e.RUnlock()
-	if e.HasCommittedLocked() && e.CommitInfo.TranId > tranId {
-		return true
-	}
-	return tranId == e.CommitInfo.TranId
-}
-
-func (e *BaseEntry) onCommitted(id uint64) *BaseEntry {
-	if e.CommitInfo.CommitId > id {
-		return nil
-	}
-	be := *e
-	return &be
-}
-
-func (e *BaseEntry) UseCommitted(id uint64) *BaseEntry {
-	e.RLock()
-	defer e.RUnlock()
-	// if e.HasCommittedLocked() {
-	// 	return e.onCommitted(id)
-	// }
-	var curr common.ISSLLNode
-	curr = e.CommitInfo
-	for curr != nil {
-		info := curr.(*CommitInfo)
-		// if info.IsHardDeleted() {
-		// 	return nil
-		// }
-		if !IsTransientCommitId(info.CommitId) && info.CommitId <= id {
-			cInfo := *info
-			return &BaseEntry{
-				Id:         e.Id,
-				CommitInfo: &cInfo,
-			}
-		}
-		curr = curr.GetNext()
-	}
-	return nil
-}
-
-// Guarded by e.Lock()
-func (e *BaseEntry) IsSoftDeletedLocked() bool {
-	return e.CommitInfo.IsSoftDeleted()
-}
-
-func (e *BaseEntry) IsDeletedLocked() bool {
-	return e.IsSoftDeletedLocked() || e.IsHardDeletedLocked()
-}
-
-func (e *BaseEntry) IsDeleted() bool {
-	e.RLock()
-	defer e.RUnlock()
-	return e.IsSoftDeletedLocked() || e.IsHardDeletedLocked()
-}
-
-func (e *BaseEntry) IsSoftDeleted() bool {
-	e.RLock()
-	defer e.RUnlock()
-	return e.CommitInfo.IsSoftDeleted()
-}
-
-func (e *BaseEntry) IsHardDeletedLocked() bool {
-	return e.CommitInfo.IsHardDeleted()
-}
-
-func (e *BaseEntry) IsHardDeleted() bool {
-	e.RLock()
-	defer e.RUnlock()
-	return e.CommitInfo.IsHardDeleted()
-}
-
-func (e *BaseEntry) CommitLocked(id uint64) {
-	if IsTransientCommitId(id) {
-		panic(fmt.Sprintf("Cannot commit transient id %d", id))
-	}
-	if e.HasCommittedLocked() {
-		panic(fmt.Sprintf("Cannot commit committed entry"))
-	}
-	e.CommitInfo.CommitId = id
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/block.go b/pkg/vm/engine/aoe/storage/metadata/v2/block.go
deleted file mode 100644
index e6606df46df540f3d924ba08fc0199985251942b..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v2/block.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-
-package metadata
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	"matrixone/pkg/vm/engine/aoe/storage/common"
-	"matrixone/pkg/vm/engine/aoe/storage/logstore"
-	"runtime"
-	"sync"
-	"sync/atomic"
-)
-
-var (
-	UpgradeInfullBlockErr = errors.New("aoe: upgrade infull block")
-)
-
-type blockLogEntry struct {
-	BaseEntry
-	Catalog   *Catalog `json:"-"`
-	TableId   uint64
-	SegmentId uint64
-}
-
-func (e *blockLogEntry) Marshal() ([]byte, error) {
-	return json.Marshal(e)
-}
-
-func (e *blockLogEntry) Unmarshal(buf []byte) error {
-	return json.Unmarshal(buf, e)
-}
-
-func (e *blockLogEntry) ToEntry() *Block {
-	entry := &Block{
-		BaseEntry: e.BaseEntry,
-	}
-	table := e.Catalog.TableSet[e.TableId]
-	entry.Segment = table.GetSegment(e.SegmentId, MinUncommitId)
-	return entry
-}
-
-type Block struct {
-	BaseEntry
-	Segment     *Segment `json:"-"`
-	Count       uint64
-	SegmentedId uint64
-}
-
-func newBlockEntry(segment *Segment, tranId uint64, exIndex *ExternalIndex) *Block {
-	e := &Block{
-		Segment: segment,
-		BaseEntry: BaseEntry{
-			Id: segment.Table.Catalog.NextBlockId(),
-			CommitInfo: &CommitInfo{
-				CommitId:      tranId,
-				TranId:        tranId,
-				SSLLNode:      *common.NewSSLLNode(),
-				Op:            OpCreate,
-				ExternalIndex: exIndex,
-			},
-		},
-	}
-	return e
-}
-
-func newCommittedBlockEntry(segment *Segment, base *BaseEntry) *Block {
-	e := &Block{
-		Segment:   segment,
-		BaseEntry: *base,
-	}
-	return e
-}
-
-func (e *Block) View() (view *Block) {
-	e.RLock()
-	view = &Block{
-		BaseEntry:   BaseEntry{Id: e.Id, CommitInfo: e.CommitInfo},
-		Segment:     e.Segment,
-		Count:       e.Count,
-		SegmentedId: e.SegmentedId,
-	}
-	e.RUnlock()
-	return
-}
-
-// Safe
-func (e *Block) Less(o *Block) bool {
-	if e == nil {
-		return true
-	}
-	return e.Id < o.Id
-}
-
-func (e *Block) rebuild(segment *Segment) {
-	e.Segment = segment
-}
-
-// Safe
-func (e *Block) AsCommonID() *common.ID {
-	return &common.ID{
-		TableID:   e.Segment.Table.Id,
-		SegmentID: e.Segment.Id,
-		BlockID:   e.Id,
-	}
-}
-
-// Not safe
-// One writer, multi-readers
-func (e *Block) SetSegmentedId(id uint64) error {
-	atomic.StoreUint64(&e.SegmentedId, id)
-	return nil
-}
-
-// Safe
-func (e *Block) GetAppliedIndex(rwmtx *sync.RWMutex) (uint64, bool) {
-	if rwmtx == nil {
-		e.RLock()
-		defer e.RUnlock()
-	}
-	if !e.IsFull() {
-		id := atomic.LoadUint64(&e.SegmentedId)
-		if id == 0 {
-			return id, false
-		}
-		return id, true
-	}
-	return e.BaseEntry.GetAppliedIndex()
-}
-
-// Not safe
-func (e *Block) HasMaxRows() bool {
-	return e.Count == e.Segment.Table.Schema.BlockMaxRows
-}
-
-// Not safe
-func (e *Block) SetIndex(idx LogIndex) error {
-	return e.CommitInfo.SetIndex(idx)
-}
-
-// Not safe
-// TODO: should be safe
-func (e *Block) GetCount() uint64 {
-	if e.IsFull() {
-		return e.Segment.Table.Schema.BlockMaxRows
-	}
-	return atomic.LoadUint64(&e.Count)
-}
-
-// Not safe
-// TODO: should be safe
-func (e *Block) AddCount(n uint64) (uint64, error) {
-	curCnt := e.GetCount()
-	if curCnt+n > e.Segment.Table.Schema.BlockMaxRows {
-		return 0, errors.New(fmt.Sprintf("block row count %d > block max rows %d", curCnt+n, e.Segment.Table.Schema.BlockMaxRows))
-	}
-	for !atomic.CompareAndSwapUint64(&e.Count, curCnt, curCnt+n) {
-		runtime.Gosched()
-		curCnt = e.GetCount()
-		if curCnt+n > e.Segment.Table.Schema.BlockMaxRows {
-			return 0, errors.New(fmt.Sprintf("block row count %d > block max rows %d", curCnt+n, e.Segment.Table.Schema.BlockMaxRows))
-		}
-	}
-	return curCnt + n, nil
-}
-
-// TODO: remove it. Should not needed
-func (e *Block) SetCount(count uint64) error {
-	if count > e.Segment.Table.Schema.BlockMaxRows {
-		return errors.New("SetCount exceeds max limit")
-	}
-	if count < e.Count {
-		return errors.New("SetCount cannot set smaller count")
-	}
-	e.Count = count
-	return nil
-}
-
-// Safe
-func (e *Block) CommittedView(id uint64) *Block {
-	baseEntry := e.UseCommitted(id)
-	if baseEntry == nil {
-		return nil
-	}
-	return &Block{
-		BaseEntry: *baseEntry,
-	}
-}
-
-// Safe
-func (e *Block) SimpleUpgrade(exIndice []*ExternalIndex) error {
-	ctx := newUpgradeBlockCtx(e, exIndice)
-	return e.Segment.Table.Catalog.onCommitRequest(ctx)
-	// return e.Upgrade(e.Segment.Table.Catalog.NextUncommitId(), exIndice, true)
-}
-
-// func (e *Block) Upgrade(tranId uint64, exIndice []*ExternalIndex, autoCommit bool) error {
-func (e *Block) prepareUpgrade(ctx *upgradeBlockCtx) (LogEntry, error) {
-	if e.GetCount() != e.Segment.Table.Schema.BlockMaxRows {
-		return nil, UpgradeInfullBlockErr
-	}
-	tranId := e.Segment.Table.Catalog.NextUncommitId()
-	e.Lock()
-	defer e.Unlock()
-	var newOp OpT
-	switch e.CommitInfo.Op {
-	case OpCreate:
-		newOp = OpUpgradeFull
-	default:
-		return nil, UpgradeNotNeededErr
-	}
-	cInfo := &CommitInfo{
-		TranId:   tranId,
-		CommitId: tranId,
-		Op:       newOp,
-	}
-	if ctx.exIndice != nil {
-		cInfo.ExternalIndex = ctx.exIndice[0]
-		if len(ctx.exIndice) > 1 {
-			cInfo.PrevIndex = ctx.exIndice[1]
-		}
-	} else {
-		cInfo.ExternalIndex = e.CommitInfo.ExternalIndex
-		id, ok := e.BaseEntry.GetAppliedIndex()
-		if ok {
-			cInfo.AppliedIndex = &ExternalIndex{
-				Id: SimpleBatchId(id),
-			}
-		}
-	}
-	e.onNewCommit(cInfo)
-	logEntry := e.Segment.Catalog.prepareCommitEntry(e, ETUpgradeBlock, e)
-	return logEntry, nil
-}
-
-func (e *Block) toLogEntry() *blockLogEntry {
-	return &blockLogEntry{
-		BaseEntry: e.BaseEntry,
-		Catalog:   e.Segment.Catalog,
-		TableId:   e.Segment.Table.Id,
-		SegmentId: e.Segment.Id,
-	}
-}
-
-func (e *Block) Marshal() ([]byte, error) {
-	return json.Marshal(e)
-}
-
-func (e *Block) Unmarshal(buf []byte) error {
-	return json.Unmarshal(buf, e)
-}
-
-// Not safe
-func (e *Block) PString(level PPLevel) string {
-	s := fmt.Sprintf("<Block %s>", e.BaseEntry.PString(level))
-	return s
-}
-
-// Not safe
-func (e *Block) String() string {
-	buf, _ := e.Marshal()
-	return string(buf)
-}
-
-// Not safe
-func (e *Block) ToLogEntry(eType LogEntryType) LogEntry {
-	switch eType {
-	case ETCreateBlock:
-		break
-	case ETUpgradeBlock:
-		break
-	case ETDropBlock:
-		if !e.IsSoftDeletedLocked() {
-			panic("logic error")
-		}
-		break
-	default:
-		panic("not supported")
-	}
-	entry := e.toLogEntry()
-	buf, _ := entry.Marshal()
-	logEntry := logstore.NewAsyncBaseEntry()
-	logEntry.Meta.SetType(eType)
-	logEntry.Unmarshal(buf)
-	return logEntry
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/schema.go b/pkg/vm/engine/aoe/storage/metadata/v2/schema.go
deleted file mode 100644
index bcb63c690cc421f74bf7b6c9c15c5c2f835a78d3..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v2/schema.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-
-package metadata
-
-import (
-	"encoding/json"
-	"fmt"
-	"matrixone/pkg/container/types"
-)
-
-type IndexT uint16
-
-const (
-	ZoneMap IndexT = iota
-	NumBsi
-	FixStrBsi
-)
-
-type IndexInfo struct {
-	Id      uint64
-	Type    IndexT
-	Columns []uint16
-}
-
-type ColDef struct {
-	Name string
-	Idx  int
-	Type types.Type
-}
-
-type Schema struct {
-	Name             string
-	Indices          []*IndexInfo
-	ColDefs          []*ColDef
-	NameIndex        map[string]int
-	BlockMaxRows     uint64
-	SegmentMaxBlocks uint64
-}
-
-func (s *Schema) String() string {
-	buf, _ := json.Marshal(s)
-	return string(buf)
-}
-
-func (s *Schema) Types() []types.Type {
-	ts := make([]types.Type, len(s.ColDefs))
-	for i, colDef := range s.ColDefs {
-		ts[i] = colDef.Type
-	}
-	return ts
-}
-
-func (s *Schema) Valid() bool {
-	if s == nil {
-		return false
-	}
-	if len(s.ColDefs) == 0 {
-		return false
-	}
-
-	names := make(map[string]bool)
-	for idx, colDef := range s.ColDefs {
-		if idx != colDef.Idx {
-			return false
-		}
-		_, ok := names[colDef.Name]
-		if ok {
-			return false
-		}
-		names[colDef.Name] = true
-	}
-	return true
-}
-
-// GetColIdx returns column index for the given column name
-// if found, otherwise returns -1.
-func (s *Schema) GetColIdx(attr string) int {
-	idx, ok := s.NameIndex[attr]
-	if !ok {
-		return -1
-	}
-	return idx
-}
-
-func MockSchema(colCnt int) *Schema {
-	schema := &Schema{
-		ColDefs:   make([]*ColDef, colCnt),
-		Indices:   make([]*IndexInfo, 0),
-		NameIndex: make(map[string]int),
-	}
-	prefix := "mock_"
-	for i := 0; i < colCnt; i++ {
-		name := fmt.Sprintf("%s%d", prefix, i)
-		colDef := &ColDef{
-			Idx:  i,
-			Name: name,
-			Type: types.Type{Oid: types.T_int32, Size: 4, Width: 4},
-		}
-		schema.ColDefs[i] = colDef
-		schema.NameIndex[colDef.Name] = i
-	}
-	return schema
-}
-
-// MockSchemaAll if char/varchar is needed, colCnt = 14, otherwise colCnt = 12
-func MockSchemaAll(colCnt int) *Schema {
-	schema := &Schema{
-		Indices:   make([]*IndexInfo, 0),
-		ColDefs:   make([]*ColDef, colCnt),
-		NameIndex: make(map[string]int),
-	}
-	prefix := "mock_"
-	for i := 0; i < colCnt; i++ {
-		name := fmt.Sprintf("%s%d", prefix, i)
-		colDef := &ColDef{
-			Name: name,
-			Idx:  i,
-		}
-		schema.ColDefs[i] = colDef
-		schema.NameIndex[colDef.Name] = i
-		switch i {
-		case 0:
-			colDef.Type = types.Type{
-				Oid:   types.T_int8,
-				Size:  1,
-				Width: 8,
-			}
-		case 1:
-			colDef.Type = types.Type{
-				Oid:   types.T_int16,
-				Size:  2,
-				Width: 16,
-			}
-		case 2:
-			colDef.Type = types.Type{
-				Oid:   types.T_int32,
-				Size:  4,
-				Width: 32,
-			}
-		case 3:
-			colDef.Type = types.Type{
-				Oid:   types.T_int64,
-				Size:  8,
-				Width: 64,
-			}
-		case 4:
-			colDef.Type = types.Type{
-				Oid:   types.T_uint8,
-				Size:  1,
-				Width: 8,
-			}
-		case 5:
-			colDef.Type = types.Type{
-				Oid:   types.T_uint16,
-				Size:  2,
-				Width: 16,
-			}
-		case 6:
-			colDef.Type = types.Type{
-				Oid:   types.T_uint32,
-				Size:  4,
-				Width: 32,
-			}
-		case 7:
-			colDef.Type = types.Type{
-				Oid:   types.T_uint64,
-				Size:  8,
-				Width: 64,
-			}
-		case 8:
-			colDef.Type = types.Type{
-				Oid:   types.T_float32,
-				Size:  4,
-				Width: 32,
-			}
-		case 9:
-			colDef.Type = types.Type{
-				Oid:   types.T_float64,
-				Size:  8,
-				Width: 64,
-			}
-		case 10:
-			colDef.Type = types.Type{
-				Oid:   types.T_date,
-				Size:  4,
-				Width: 32,
-			}
-		case 11:
-			colDef.Type = types.Type{
-				Oid:   types.T_datetime,
-				Size:  8,
-				Width: 64,
-			}
-		case 12:
-			colDef.Type = types.Type{
-				Oid:   types.T_varchar,
-				Size:  24,
-				Width: 100,
-			}
-		case 13:
-			colDef.Type = types.Type{
-				Oid:   types.T_char,
-				Size:  24,
-				Width: 100,
-			}
-		}
-	}
-	return schema
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/segment.go b/pkg/vm/engine/aoe/storage/metadata/v2/segment.go
deleted file mode 100644
index dd56c74a9b78ccd503b41f102947e82ce1c7b3f5..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v2/segment.go
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-
-package metadata
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	"matrixone/pkg/vm/engine/aoe/storage/common"
-	"matrixone/pkg/vm/engine/aoe/storage/logstore"
-	"sync"
-)
-
-var (
-	UpgradeInfullSegmentErr = errors.New("aoe: upgrade infull segment")
-	UpgradeNotNeededErr     = errors.New("aoe: already upgraded")
-)
-
-type segmentLogEntry struct {
-	*BaseEntry
-	TableId uint64
-	Catalog *Catalog `json:"-"`
-}
-
-func (e *segmentLogEntry) Marshal() ([]byte, error) {
-	return json.Marshal(e)
-}
-
-func (e *segmentLogEntry) Unmarshal(buf []byte) error {
-	return json.Unmarshal(buf, e)
-}
-
-type Segment struct {
-	BaseEntry
-	Table    *Table         `json:"-"`
-	Catalog  *Catalog       `json:"-"`
-	IdIndex  map[uint64]int `json:"-"`
-	BlockSet []*Block
-}
-
-func newSegmentEntry(catalog *Catalog, table *Table, tranId uint64, exIndex *ExternalIndex) *Segment {
-	e := &Segment{
-		Catalog:  catalog,
-		Table:    table,
-		BlockSet: make([]*Block, 0),
-		IdIndex:  make(map[uint64]int),
-		BaseEntry: BaseEntry{
-			Id: table.Catalog.NextSegmentId(),
-			CommitInfo: &CommitInfo{
-				CommitId:      tranId,
-				TranId:        tranId,
-				SSLLNode:      *common.NewSSLLNode(),
-				Op:            OpCreate,
-				ExternalIndex: exIndex,
-			},
-		},
-	}
-	return e
-}
-
-func newCommittedSegmentEntry(catalog *Catalog, table *Table, base *BaseEntry) *Segment {
-	e := &Segment{
-		Catalog:   catalog,
-		Table:     table,
-		BlockSet:  make([]*Block, 0),
-		IdIndex:   make(map[uint64]int),
-		BaseEntry: *base,
-	}
-	return e
-}
-
-func (e *Segment) LE(o *Segment) bool {
-	if e == nil {
-		return true
-	}
-	return e.Id <= o.Id
-}
-
-func (e *Segment) rebuild(table *Table) {
-	e.Catalog = table.Catalog
-	e.Table = table
-	e.IdIndex = make(map[uint64]int)
-	for i, blk := range e.BlockSet {
-		e.Catalog.Sequence.TryUpdateBlockId(blk.Id)
-		blk.rebuild(e)
-		e.IdIndex[blk.Id] = i
-	}
-}
-
-// Safe
-func (e *Segment) AsCommonID() *common.ID {
-	return &common.ID{
-		TableID:   e.Table.Id,
-		SegmentID: e.Id,
-	}
-}
-
-// Safe
-func (e *Segment) CommittedView(id uint64) *Segment {
-	baseEntry := e.UseCommitted(id)
-	if baseEntry == nil {
-		return nil
-	}
-	view := &Segment{
-		BaseEntry: *baseEntry,
-		BlockSet:  make([]*Block, 0),
-	}
-	e.RLock()
-	blks := make([]*Block, 0, len(e.BlockSet))
-	for _, blk := range e.BlockSet {
-		blks = append(blks, blk)
-	}
-	e.RUnlock()
-	for _, blk := range blks {
-		blkView := blk.CommittedView(id)
-		if blkView == nil {
-			continue
-		}
-		view.BlockSet = append(view.BlockSet, blkView)
-	}
-	return view
-}
-
-func (e *Segment) Marshal() ([]byte, error) {
-	return json.Marshal(e)
-}
-
-func (e *Segment) toLogEntry() *segmentLogEntry {
-	return &segmentLogEntry{
-		BaseEntry: &e.BaseEntry,
-		TableId:   e.Table.Id,
-	}
-}
-
-func (e *Segment) Unmarshal(buf []byte) error {
-	return json.Unmarshal(buf, e)
-}
-
-// Not safe
-func (e *Segment) PString(level PPLevel) string {
-	if e == nil {
-		return "null segment"
-	}
-	s := fmt.Sprintf("<Segment %s", e.BaseEntry.PString(level))
-	cnt := 0
-	if level > PPL0 {
-		for _, blk := range e.BlockSet {
-			cnt++
-			s = fmt.Sprintf("%s\n%s", s, blk.PString(level))
-		}
-	}
-	if cnt == 0 {
-		s = fmt.Sprintf("%s>", s)
-	} else {
-		s = fmt.Sprintf("%s\n>", s)
-	}
-	return s
-}
-
-// Not safe
-func (e *Segment) String() string {
-	buf, _ := e.Marshal()
-	return string(buf)
-}
-
-// Not safe
-func (e *Segment) ToLogEntry(eType LogEntryType) LogEntry {
-	switch eType {
-	case ETCreateSegment:
-		break
-	case ETUpgradeSegment:
-		break
-	case ETDropSegment:
-		if !e.IsSoftDeletedLocked() {
-			panic("logic error")
-		}
-		break
-	default:
-		panic("not supported")
-	}
-	entry := e.toLogEntry()
-	buf, _ := entry.Marshal()
-	logEntry := logstore.NewAsyncBaseEntry()
-	logEntry.Meta.SetType(eType)
-	logEntry.Unmarshal(buf)
-	return logEntry
-}
-
-// Safe
-func (e *Segment) SimpleCreateBlock() *Block {
-	ctx := newCreateBlockCtx(e)
-	if err := e.Table.Catalog.onCommitRequest(ctx); err != nil {
-		return nil
-	}
-	return ctx.block
-}
-
-// Safe
-func (e *Segment) Appendable() bool {
-	e.RLock()
-	defer e.RUnlock()
-	if e.HasMaxBlocks() {
-		return !e.BlockSet[len(e.BlockSet)-1].IsFull()
-	}
-	return true
-}
-
-func (e *Segment) prepareCreateBlock(ctx *createBlockCtx) (LogEntry, error) {
-	tranId := e.Catalog.NextUncommitId()
-	be := newBlockEntry(e, tranId, ctx.exIndex)
-	logEntry := be.ToLogEntry(ETCreateBlock)
-	e.Lock()
-	e.onNewBlock(be)
-	e.Unlock()
-	e.Table.Catalog.commitMu.Lock()
-	defer e.Table.Catalog.commitMu.Unlock()
-	e.Table.Catalog.prepareCommitLog(be, logEntry)
-	ctx.block = be
-	return logEntry, nil
-}
-
-// Safe
-func (e *Segment) GetAppliedIndex(rwmtx *sync.RWMutex) (uint64, bool) {
-	if rwmtx == nil {
-		e.RLock()
-		defer e.RUnlock()
-	}
-	if e.IsSorted() {
-		return e.BaseEntry.GetAppliedIndex()
-	}
-	return e.calcAppliedIndex()
-}
-
-// Not safe
-func (e *Segment) GetReplayIndex() *LogIndex {
-	for i := len(e.BlockSet) - 1; i >= 0; i-- {
-		blk := e.BlockSet[i]
-		if blk.CommitInfo.ExternalIndex != nil && (blk.Count > 0 || blk.IsFull()) {
-			return blk.CommitInfo.ExternalIndex
-		}
-	}
-	return nil
-}
-
-func (e *Segment) calcAppliedIndex() (id uint64, ok bool) {
-	for i := len(e.BlockSet) - 1; i >= 0; i-- {
-		blk := e.BlockSet[i]
-		id, ok = blk.GetAppliedIndex(nil)
-		if ok {
-			break
-		}
-	}
-	return id, ok
-}
-
-func (e *Segment) onNewBlock(entry *Block) {
-	e.IdIndex[entry.Id] = len(e.BlockSet)
-	e.BlockSet = append(e.BlockSet, entry)
-}
-
-// Safe
-func (e *Segment) SimpleUpgrade(exIndice []*ExternalIndex) error {
-	ctx := newUpgradeSegmentCtx(e, exIndice)
-	return e.Table.Catalog.onCommitRequest(ctx)
-	// return e.Upgrade(e.Table.Catalog.NextUncommitId(), exIndice, true)
-}
-
-// Not safe
-func (e *Segment) FirstInFullBlock() *Block {
-	if len(e.BlockSet) == 0 {
-		return nil
-	}
-	var found *Block
-	for i := len(e.BlockSet) - 1; i >= 0; i-- {
-		if !e.BlockSet[i].IsFull() {
-			found = e.BlockSet[i]
-		} else {
-			break
-		}
-	}
-	return found
-}
-
-// Not safe
-func (e *Segment) HasMaxBlocks() bool {
-	return e.IsSorted() || len(e.BlockSet) == int(e.Table.Schema.SegmentMaxBlocks)
-}
-
-// func (e *Segment) Upgrade(tranId uint64, exIndice []*ExternalIndex, autoCommit bool) error {
-func (e *Segment) prepareUpgrade(ctx *upgradeSegmentCtx) (LogEntry, error) {
-	tranId := e.Table.Catalog.NextUncommitId()
-	e.RLock()
-	if !e.HasMaxBlocks() {
-		e.RUnlock()
-		return nil, UpgradeInfullSegmentErr
-	}
-	if e.IsSorted() {
-		return nil, UpgradeNotNeededErr
-	}
-	for _, blk := range e.BlockSet {
-		if !blk.IsFull() {
-			return nil, UpgradeInfullSegmentErr
-		}
-	}
-	e.RUnlock()
-	e.Lock()
-	defer e.Unlock()
-	var newOp OpT
-	switch e.CommitInfo.Op {
-	case OpCreate:
-		newOp = OpUpgradeSorted
-	default:
-		return nil, UpgradeNotNeededErr
-	}
-	cInfo := &CommitInfo{
-		TranId:   tranId,
-		CommitId: tranId,
-		Op:       newOp,
-	}
-	if ctx.exIndice == nil {
-		id, ok := e.calcAppliedIndex()
-		if ok {
-			cInfo.AppliedIndex = &ExternalIndex{
-				Id: SimpleBatchId(id),
-			}
-		}
-	} else {
-		cInfo.ExternalIndex = ctx.exIndice[0]
-		if len(ctx.exIndice) > 1 {
-			cInfo.PrevIndex = ctx.exIndice[1]
-		}
-	}
-	e.onNewCommit(cInfo)
-	e.Table.Catalog.commitMu.Lock()
-	defer e.Table.Catalog.commitMu.Unlock()
-	logEntry := e.Table.Catalog.prepareCommitEntry(e, ETUpgradeSegment, e)
-	return logEntry, nil
-}
-
-// Not safe
-// One writer, multi-readers
-func (e *Segment) SimpleGetOrCreateNextBlock(from *Block) *Block {
-	if len(e.BlockSet) == 0 {
-		return e.SimpleCreateBlock()
-	}
-	var ret *Block
-	for i := len(e.BlockSet) - 1; i >= 0; i-- {
-		blk := e.BlockSet[i]
-		if !blk.IsFull() && from.Less(blk) {
-			ret = blk
-		} else {
-			break
-		}
-	}
-	if ret != nil || e.HasMaxBlocks() {
-		return ret
-	}
-	return e.SimpleCreateBlock()
-}
-
-// Safe
-func (e *Segment) SimpleGetBlock(id uint64) *Block {
-	e.RLock()
-	defer e.RUnlock()
-	return e.GetBlock(id, MinUncommitId)
-}
-
-func (e *Segment) GetBlock(id, tranId uint64) *Block {
-	pos, ok := e.IdIndex[id]
-	if !ok {
-		return nil
-	}
-	entry := e.BlockSet[pos]
-	return entry
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/table.go b/pkg/vm/engine/aoe/storage/metadata/v2/table.go
deleted file mode 100644
index 22739e31fec7ccdbf92c44c44413637ef632276b..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v2/table.go
+++ /dev/null
@@ -1,470 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-
-package metadata
-
-import (
-	"encoding/json"
-	"fmt"
-	"matrixone/pkg/logutil"
-	"matrixone/pkg/vm/engine/aoe/storage/common"
-	"matrixone/pkg/vm/engine/aoe/storage/logstore"
-	"sync"
-)
-
-type tableLogEntry struct {
-	BaseEntry
-	Prev    *Table
-	Catalog *Catalog `json:"-"`
-}
-
-func (e *tableLogEntry) Marshal() ([]byte, error) {
-	return json.Marshal(e)
-}
-
-func (e *tableLogEntry) Unmarshal(buf []byte) error {
-	return json.Unmarshal(buf, e)
-}
-
-func (e *tableLogEntry) ToEntry() *Table {
-	e.BaseEntry.CommitInfo.SetNext(e.Prev.CommitInfo)
-	e.Prev.BaseEntry = e.BaseEntry
-	return e.Prev
-}
-
-// func createTableHandle(r io.Reader, meta *LogEntryMeta) (LogEntry, int64, error) {
-// 	entry := Table{}
-// 	logEntry
-// 	// entry.Unmarshal()
-
-// }
-
-type Table struct {
-	BaseEntry
-	Schema     *Schema
-	SegmentSet []*Segment
-	IdIndex    map[uint64]int `json:"-"`
-	Catalog    *Catalog       `json:"-"`
-}
-
-func NewTableEntry(catalog *Catalog, schema *Schema, tranId uint64, exIndex *ExternalIndex) *Table {
-	schema.BlockMaxRows = catalog.Cfg.BlockMaxRows
-	schema.SegmentMaxBlocks = catalog.Cfg.SegmentMaxBlocks
-	e := &Table{
-		BaseEntry: BaseEntry{
-			Id: catalog.NextTableId(),
-			CommitInfo: &CommitInfo{
-				TranId:        tranId,
-				CommitId:      tranId,
-				SSLLNode:      *common.NewSSLLNode(),
-				Op:            OpCreate,
-				ExternalIndex: exIndex,
-			},
-		},
-		Schema:     schema,
-		Catalog:    catalog,
-		SegmentSet: make([]*Segment, 0),
-		IdIndex:    make(map[uint64]int),
-	}
-	return e
-}
-
-func NewEmptyTableEntry(catalog *Catalog) *Table {
-	e := &Table{
-		BaseEntry: BaseEntry{
-			CommitInfo: &CommitInfo{
-				SSLLNode: *common.NewSSLLNode(),
-			},
-		},
-		SegmentSet: make([]*Segment, 0),
-		IdIndex:    make(map[uint64]int),
-		Catalog:    catalog,
-	}
-	return e
-}
-
-// Threadsafe
-// It is used to take a snapshot of table base on a commit id. It goes through
-// the version chain to find a "safe" commit version and create a view base on
-// that version.
-// v2(commitId=7) -> v1(commitId=4) -> v0(commitId=2)
-//      |                 |                  |
-//      |                 |                   -------- CommittedView [0,2]
-//      |                  --------------------------- CommittedView [4,6]
-//       --------------------------------------------- CommittedView [7,+oo)
-func (e *Table) CommittedView(id uint64) *Table {
-	// TODO: if baseEntry op is drop, should introduce an index to
-	// indicate weather to return nil
-	baseEntry := e.UseCommitted(id)
-	if baseEntry == nil {
-		return nil
-	}
-	view := &Table{
-		Schema:     e.Schema,
-		BaseEntry:  *baseEntry,
-		SegmentSet: make([]*Segment, 0),
-	}
-	e.RLock()
-	segs := make([]*Segment, 0, len(e.SegmentSet))
-	for _, seg := range e.SegmentSet {
-		segs = append(segs, seg)
-	}
-	e.RUnlock()
-	for _, seg := range segs {
-		segView := seg.CommittedView(id)
-		if segView == nil {
-			continue
-		}
-		view.SegmentSet = append(view.SegmentSet, segView)
-	}
-	return view
-}
-
-// Not threadsafe, and not needed
-// Only used during data replay by the catalog replayer
-func (e *Table) rebuild(catalog *Catalog) {
-	e.Catalog = catalog
-	e.IdIndex = make(map[uint64]int)
-	for i, seg := range e.SegmentSet {
-		catalog.Sequence.TryUpdateSegmentId(seg.Id)
-		seg.rebuild(e)
-		e.IdIndex[seg.Id] = i
-	}
-}
-
-// Threadsafe
-// It should be applied on a table that was previously soft-deleted
-// It is always driven by engine internal scheduler. It means all the
-// table related data resources were deleted. A hard-deleted table will
-// be deleted from catalog later
-func (e *Table) HardDelete() error {
-	ctx := newDeleteTableCtx(e)
-	return e.Catalog.onCommitRequest(ctx)
-}
-
-func (e *Table) prepareHardDelete(ctx *deleteTableCtx) (LogEntry, error) {
-	cInfo := &CommitInfo{
-		CommitId: e.Catalog.NextUncommitId(),
-		Op:       OpHardDelete,
-		SSLLNode: *common.NewSSLLNode(),
-	}
-	e.Catalog.commitMu.Lock()
-	defer e.Catalog.commitMu.Unlock()
-	e.Lock()
-	defer e.Unlock()
-	if e.IsHardDeletedLocked() {
-		logutil.Warnf("HardDelete %d but already hard deleted", e.Id)
-		return nil, TableNotFoundErr
-	}
-	if !e.IsSoftDeletedLocked() {
-		panic("logic error: Cannot hard delete entry that not soft deleted")
-	}
-	e.onNewCommit(cInfo)
-	logEntry := e.Catalog.prepareCommitEntry(e, ETHardDeleteTable, e)
-	return logEntry, nil
-}
-
-// Simple* wrappes simple usage of wrapped operation
-// It is driven by external command. The engine then schedules a GC task to hard delete
-// related resources.
-func (e *Table) SimpleSoftDelete(exIndex *ExternalIndex) error {
-	ctx := newDropTableCtx(e.Schema.Name, exIndex)
-	ctx.table = e
-	return e.Catalog.onCommitRequest(ctx)
-}
-
-func (e *Table) prepareSoftDelete(ctx *dropTableCtx) (LogEntry, error) {
-	commitId := e.Catalog.NextUncommitId()
-	cInfo := &CommitInfo{
-		TranId:        commitId,
-		CommitId:      commitId,
-		ExternalIndex: ctx.exIndex,
-		Op:            OpSoftDelete,
-		SSLLNode:      *common.NewSSLLNode(),
-	}
-	e.Catalog.commitMu.Lock()
-	defer e.Catalog.commitMu.Unlock()
-	e.Lock()
-	defer e.Unlock()
-	if e.IsSoftDeletedLocked() {
-		return nil, TableNotFoundErr
-	}
-	e.onNewCommit(cInfo)
-	logEntry := e.Catalog.prepareCommitEntry(e, ETSoftDeleteTable, e)
-	return logEntry, nil
-}
-
-// Not safe
-func (e *Table) Marshal() ([]byte, error) {
-	return json.Marshal(e)
-}
-
-// Not safe
-func (e *Table) Unmarshal(buf []byte) error {
-	return json.Unmarshal(buf, e)
-}
-
-// Not safe
-func (e *Table) String() string {
-	buf, _ := e.Marshal()
-	return string(buf)
-}
-
-// Not safe
-// Usually it is used during creating a table. We need to commit the new table entry
-// to the store.
-func (e *Table) ToLogEntry(eType LogEntryType) LogEntry {
-	var buf []byte
-	switch eType {
-	case ETCreateTable:
-		buf, _ = e.Marshal()
-	case ETSoftDeleteTable:
-		if !e.IsSoftDeletedLocked() {
-			panic("logic error")
-		}
-		entry := tableLogEntry{
-			BaseEntry: e.BaseEntry,
-		}
-		buf, _ = entry.Marshal()
-	case ETHardDeleteTable:
-		if !e.IsHardDeletedLocked() {
-			panic("logic error")
-		}
-		entry := tableLogEntry{
-			BaseEntry: e.BaseEntry,
-		}
-		buf, _ = entry.Marshal()
-	default:
-		panic("not supported")
-	}
-	logEntry := logstore.NewAsyncBaseEntry()
-	logEntry.Meta.SetType(eType)
-	logEntry.Unmarshal(buf)
-	return logEntry
-}
-
-// Safe
-func (e *Table) SimpleGetCurrSegment() *Segment {
-	e.RLock()
-	if len(e.SegmentSet) == 0 {
-		e.RUnlock()
-		return nil
-	}
-	seg := e.SegmentSet[len(e.SegmentSet)-1]
-	e.RUnlock()
-	return seg
-}
-
-// Not safe and no need
-// Only used during data replay
-// TODO: Only compatible with v1. Remove later
-func (e *Table) GetReplayIndex() *LogIndex {
-	for i := len(e.SegmentSet) - 1; i >= 0; i-- {
-		seg := e.SegmentSet[i]
-		idx := seg.GetReplayIndex()
-		if idx != nil {
-			return idx
-		}
-	}
-	return nil
-}
-
-// Safe
-// TODO: Only compatible with v1. Remove later
-func (e *Table) GetAppliedIndex(rwmtx *sync.RWMutex) (uint64, bool) {
-	if rwmtx == nil {
-		e.RLock()
-		defer e.RUnlock()
-	}
-	if e.IsDeletedLocked() {
-		return e.BaseEntry.GetAppliedIndex()
-	}
-	var (
-		id uint64
-		ok bool
-	)
-	for i := len(e.SegmentSet) - 1; i >= 0; i-- {
-		seg := e.SegmentSet[i]
-		id, ok = seg.GetAppliedIndex(nil)
-		if ok {
-			break
-		}
-	}
-	if !ok {
-		return e.BaseEntry.GetAppliedIndex()
-	}
-	return id, ok
-}
-
-// Not safe. One writer, multi-readers
-func (e *Table) SimpleCreateBlock() (*Block, *Segment) {
-	var prevSeg *Segment
-	currSeg := e.SimpleGetCurrSegment()
-	if currSeg == nil || currSeg.HasMaxBlocks() {
-		prevSeg = currSeg
-		currSeg = e.SimpleCreateSegment()
-	}
-	blk := currSeg.SimpleCreateBlock()
-	return blk, prevSeg
-}
-
-func (e *Table) getFirstInfullSegment(from *Segment) (*Segment, *Segment) {
-	if len(e.SegmentSet) == 0 {
-		return nil, nil
-	}
-	var curr, next *Segment
-	for i := len(e.SegmentSet) - 1; i >= 0; i-- {
-		seg := e.SegmentSet[i]
-		if seg.Appendable() && from.LE(seg) {
-			curr, next = seg, curr
-		} else {
-			break
-		}
-	}
-	return curr, next
-}
-
-// Not safe. One writer, multi-readers
-func (e *Table) SimpleGetOrCreateNextBlock(from *Block) *Block {
-	var fromSeg *Segment
-	if from != nil {
-		fromSeg = from.Segment
-	}
-	curr, next := e.getFirstInfullSegment(fromSeg)
-	// logutil.Infof("%s, %s", curr.PString(PPL0), fromSeg.PString(PPL1))
-	if curr == nil {
-		curr = e.SimpleCreateSegment()
-	}
-	blk := curr.SimpleGetOrCreateNextBlock(from)
-	if blk != nil {
-		return blk
-	}
-	if next == nil {
-		next = e.SimpleCreateSegment()
-	}
-	return next.SimpleGetOrCreateNextBlock(nil)
-}
-
-func (e *Table) SimpleCreateSegment() *Segment {
-	ctx := newCreateSegmentCtx(e)
-	if err := e.Catalog.onCommitRequest(ctx); err != nil {
-		return nil
-	}
-	return ctx.segment
-}
-
-// Safe
-func (e *Table) SimpleGetSegmentIds() []uint64 {
-	e.RLock()
-	defer e.RUnlock()
-	arrLen := len(e.SegmentSet)
-	ret := make([]uint64, arrLen)
-	for i, seg := range e.SegmentSet {
-		ret[i] = seg.Id
-	}
-	return ret
-}
-
-// Safe
-func (e *Table) SimpleGetSegmentCount() int {
-	e.RLock()
-	defer e.RUnlock()
-	return len(e.SegmentSet)
-}
-
-func (e *Table) prepareCreateSegment(ctx *createSegmentCtx) (LogEntry, error) {
-	se := newSegmentEntry(e.Catalog, e, e.Catalog.NextUncommitId(), ctx.exIndex)
-	logEntry := se.ToLogEntry(ETCreateSegment)
-	e.Catalog.commitMu.Lock()
-	defer e.Catalog.commitMu.Unlock()
-	e.Lock()
-	e.onNewSegment(se)
-	e.Unlock()
-	e.Catalog.prepareCommitLog(se, logEntry)
-	ctx.segment = se
-	return logEntry, nil
-}
-
-func (e *Table) onNewSegment(entry *Segment) {
-	e.IdIndex[entry.Id] = len(e.SegmentSet)
-	e.SegmentSet = append(e.SegmentSet, entry)
-}
-
-// Safe
-func (e *Table) SimpleGetBlock(segId, blkId uint64) (*Block, error) {
-	seg := e.SimpleGetSegment(segId)
-	if seg == nil {
-		return nil, SegmentNotFoundErr
-	}
-	blk := seg.SimpleGetBlock(blkId)
-	if blk == nil {
-		return nil, BlockNotFoundErr
-	}
-	return blk, nil
-}
-
-// Safe
-func (e *Table) SimpleGetSegment(id uint64) *Segment {
-	e.RLock()
-	defer e.RUnlock()
-	return e.GetSegment(id, MinUncommitId)
-}
-
-func (e *Table) GetSegment(id, tranId uint64) *Segment {
-	pos, ok := e.IdIndex[id]
-	if !ok {
-		return nil
-	}
-	entry := e.SegmentSet[pos]
-	return entry
-}
-
-// Not safe
-func (e *Table) PString(level PPLevel) string {
-	s := fmt.Sprintf("<Table[%s]>(%s)(Cnt=%d)", e.Schema.Name, e.BaseEntry.PString(level), len(e.SegmentSet))
-	if level > PPL0 && len(e.SegmentSet) > 0 {
-		s = fmt.Sprintf("%s{", s)
-		for _, seg := range e.SegmentSet {
-			s = fmt.Sprintf("%s\n%s", s, seg.PString(level))
-		}
-		s = fmt.Sprintf("%s\n}", s)
-	}
-	return s
-}
-
-func MockTable(catalog *Catalog, schema *Schema, blkCnt uint64, idx *LogIndex) *Table {
-	if schema == nil {
-		schema = MockSchema(2)
-	}
-	if idx == nil {
-		idx = &LogIndex{
-			Id: SimpleBatchId(common.NextGlobalSeqNum()),
-		}
-	}
-	tbl, err := catalog.SimpleCreateTable(schema, idx)
-	if err != nil {
-		panic(err)
-	}
-
-	var activeSeg *Segment
-	for i := uint64(0); i < blkCnt; i++ {
-		if activeSeg == nil {
-			activeSeg = tbl.SimpleCreateSegment()
-		}
-		activeSeg.SimpleCreateBlock()
-		if len(activeSeg.BlockSet) == int(tbl.Schema.SegmentMaxBlocks) {
-			activeSeg = nil
-		}
-	}
-	return tbl
-}
diff --git a/pkg/vm/engine/aoe/storage/metadata/v2/types.go b/pkg/vm/engine/aoe/storage/metadata/v2/types.go
deleted file mode 100644
index 0ccecbab026805480e2ecae40bdf0360b6fc25c0..0000000000000000000000000000000000000000
--- a/pkg/vm/engine/aoe/storage/metadata/v2/types.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2021 Matrix Origin
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-
-package metadata
-
-import (
-	"errors"
-	"fmt"
-	"matrixone/pkg/container/types"
-	"matrixone/pkg/vm/engine/aoe/storage/common"
-	"sync/atomic"
-)
-
-const (
-	MinUncommitId = ^uint64(0) / 2
-)
-
-var uncommitId = MinUncommitId
-
-func nextUncommitId() uint64 {
-	return atomic.AddUint64(&uncommitId, uint64(1)) - 1
-}
-
-func IsTransientCommitId(id uint64) bool {
-	return id >= MinUncommitId
-}
-
-type State = uint8
-
-const (
-	STInited State = iota
-	STFull
-	STClosed
-	STSorted
-)
-
-type OpT uint8
-
-const (
-	OpReserved OpT = iota
-	OpCreate
-	OpUpgradeFull
-	OpUpgradeClose
-	OpUpgradeSorted
-	OpSoftDelete
-	OpHardDelete
-)
-
-var OpNames = map[OpT]string{
-	OpCreate:        "Create",
-	OpUpgradeFull:   "UpgradeFull",
-	OpUpgradeClose:  "UpgradeClose",
-	OpUpgradeSorted: "UpgradeSorted",
-	OpSoftDelete:    "SoftDelete",
-	OpHardDelete:    "HardDelete",
-}
-
-func OpName(op OpT) string {
-	return OpNames[op]
-}
-
-type CommitInfo struct {
-	common.SSLLNode `json:"-"`
-	CommitId        uint64
-	TranId          uint64
-	Op              OpT
-	ExternalIndex   *ExternalIndex
-	PrevIndex       *ExternalIndex
-	AppliedIndex    *ExternalIndex
-}
-
-func (info *CommitInfo) IsHardDeleted() bool {
-	return info.Op == OpHardDelete
-}
-
-func (info *CommitInfo) IsSoftDeleted() bool {
-	return info.Op == OpSoftDelete
-}
-
-func (info *CommitInfo) PString(level PPLevel) string {
-	s := fmt.Sprintf("CInfo: ")
-	var curr, prev common.ISSLLNode
-	curr = info
-	for curr != nil {
-		if prev != nil {
-			s = fmt.Sprintf("%s -> ", s)
-		}
-		cInfo := curr.(*CommitInfo)
-		s = fmt.Sprintf("%s(%s,%d", s, OpName(cInfo.Op), cInfo.CommitId)
-		if level >= PPL1 {
-			id, _ := info.GetAppliedIndex()
-			s = fmt.Sprintf("%s,%d-%s)", s, id, cInfo.ExternalIndex.String())
-		} else {
-			s = fmt.Sprintf("%s)", s)
-		}
-		// s = fmt.Sprintf("%s(%s,%d,%d)", s, OpName(info.Op), info.TranId-MinUncommitId, info.CommitId)
-		prev = curr
-		curr = curr.GetNext()
-	}
-	return s
-}
-
-// TODO: remove it. Not be used later
-func (info *CommitInfo) GetAppliedIndex() (uint64, bool) {
-	if info.AppliedIndex != nil {
-		return info.AppliedIndex.Id.Id, true
-	}
-	if info.ExternalIndex != nil && info.ExternalIndex.IsBatchApplied() {
-		return info.ExternalIndex.Id.Id, true
-	}
-
-	if info.PrevIndex != nil && info.PrevIndex.IsBatchApplied() {
-		return info.PrevIndex.Id.Id, true
-	}
-	return 0, false
-}
-
-// SetIndex changes the current index to previous index if exists, and
-// sets the current index to idx.
-func (info *CommitInfo) SetIndex(idx LogIndex) error {
-	if info.ExternalIndex != nil {
-		if !info.ExternalIndex.IsApplied() {
-			return errors.New(fmt.Sprintf("already has applied index: %d", info.ExternalIndex.Id))
-		}
-		info.PrevIndex = info.ExternalIndex
-		info.ExternalIndex = &idx
-	} else {
-		if info.PrevIndex != nil {
-			return errors.New(fmt.Sprintf("no index but has prev index: %d", info.PrevIndex.Id))
-		}
-		info.ExternalIndex = &idx
-	}
-	return nil
-}
-
-type Sequence struct {
-	nextTableId   uint64
-	nextSegmentId uint64
-	nextBlockId   uint64
-	nextCommitId  uint64
-	nextIndexId   uint64
-}
-
-func (s *Sequence) NextTableId() uint64 {
-	return atomic.AddUint64(&s.nextTableId, uint64(1))
-}
-
-func (s *Sequence) NextSegmentId() uint64 {
-	return atomic.AddUint64(&s.nextSegmentId, uint64(1))
-}
-
-func (s *Sequence) NextBlockId() uint64 {
-	return atomic.AddUint64(&s.nextBlockId, uint64(1))
-}
-
-func (s *Sequence) NextCommitId() uint64 {
-	return atomic.AddUint64(&s.nextCommitId, uint64(1))
-}
-
-func (s *Sequence) NextIndexId() uint64 {
-	return atomic.AddUint64(&s.nextIndexId, uint64(1))
-}
-
-func (s *Sequence) NextUncommitId() uint64 {
-	return nextUncommitId()
-}
-
-func (s *Sequence) TryUpdateTableId(id uint64) {
-	if s.nextTableId < id {
-		s.nextTableId = id
-	}
-}
-
-func (s *Sequence) TryUpdateCommitId(id uint64) {
-	if s.nextCommitId < id {
-		s.nextCommitId = id
-	}
-}
-
-func (s *Sequence) TryUpdateSegmentId(id uint64) {
-	if s.nextSegmentId < id {
-		s.nextSegmentId = id
-	}
-}
-
-func (s *Sequence) TryUpdateBlockId(id uint64) {
-	if s.nextBlockId < id {
-		s.nextBlockId = id
-	}
-}
-
-func (s *Sequence) TryUpdateIndexId(id uint64) {
-	if s.nextIndexId < id {
-		s.nextIndexId = id
-	}
-}
-
-func EstimateColumnBlockSize(colIdx int, meta *Block) uint64 {
-	switch meta.Segment.Table.Schema.ColDefs[colIdx].Type.Oid {
-	case types.T_json, types.T_char, types.T_varchar:
-		return meta.Segment.Table.Schema.BlockMaxRows * 2 * 4
-	default:
-		return meta.Segment.Table.Schema.BlockMaxRows * uint64(meta.Segment.Table.Schema.ColDefs[colIdx].Type.Size)
-	}
-}
-
-func EstimateBlockSize(meta *Block) uint64 {
-	size := uint64(0)
-	for colIdx, _ := range meta.Segment.Table.Schema.ColDefs {
-		size += EstimateColumnBlockSize(colIdx, meta)
-	}
-	return size
-}
diff --git a/pkg/vm/engine/aoe/storage/mutation/base/types.go b/pkg/vm/engine/aoe/storage/mutation/base/types.go
index f654a94ca399fd0d1fc2db4b899bfd0e48486956..782a6790751a88c8ee72bf093f84b2aeae020c53 100644
--- a/pkg/vm/engine/aoe/storage/mutation/base/types.go
+++ b/pkg/vm/engine/aoe/storage/mutation/base/types.go
@@ -16,7 +16,7 @@ package base
 import (
 	"matrixone/pkg/vm/engine/aoe/storage/container/batch"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mutation/buffer/base"
 )
 
diff --git a/pkg/vm/engine/aoe/storage/mutation/blk.go b/pkg/vm/engine/aoe/storage/mutation/blk.go
index dbed5a15c4d5d0c873f59cdfd03a99908e27b492..cc6f3986b7e3dbd2f65c8cc52d2a88ecf47a9ff0 100644
--- a/pkg/vm/engine/aoe/storage/mutation/blk.go
+++ b/pkg/vm/engine/aoe/storage/mutation/blk.go
@@ -18,7 +18,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/container/vector"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1/iface"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	mb "matrixone/pkg/vm/engine/aoe/storage/mutation/base"
 	"matrixone/pkg/vm/engine/aoe/storage/mutation/buffer"
 	"matrixone/pkg/vm/engine/aoe/storage/mutation/buffer/base"
diff --git a/pkg/vm/engine/aoe/storage/mutation/blk_test.go b/pkg/vm/engine/aoe/storage/mutation/blk_test.go
index 7e7a78f8d1dad9d615245d7bf35619ec23cabdca..3343bfa3217898c941b426e2bf865889114b678d 100644
--- a/pkg/vm/engine/aoe/storage/mutation/blk_test.go
+++ b/pkg/vm/engine/aoe/storage/mutation/blk_test.go
@@ -22,7 +22,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	ldio "matrixone/pkg/vm/engine/aoe/storage/layout/dataio"
 	"matrixone/pkg/vm/engine/aoe/storage/layout/table/v1"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/mock"
 	"matrixone/pkg/vm/engine/aoe/storage/mutation/buffer"
 	"matrixone/pkg/vm/engine/aoe/storage/testutils/config"
diff --git a/pkg/vm/engine/aoe/storage/options.go b/pkg/vm/engine/aoe/storage/options.go
index 265f87e2451429ce2f2a906e4c24f921a9be84c9..96694ed11ccda49b8176b596790b8b78d5355332 100644
--- a/pkg/vm/engine/aoe/storage/options.go
+++ b/pkg/vm/engine/aoe/storage/options.go
@@ -19,7 +19,7 @@ import (
 	"matrixone/pkg/vm/engine/aoe/storage/event"
 	"matrixone/pkg/vm/engine/aoe/storage/gc"
 	"matrixone/pkg/vm/engine/aoe/storage/gc/gci"
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/sched"
 	"matrixone/pkg/vm/engine/aoe/storage/wal"
 	"matrixone/pkg/vm/engine/aoe/storage/wal/shard"
@@ -92,9 +92,8 @@ type Options struct {
 	Wal wal.Wal
 
 	Meta struct {
-		CKFactory *checkpointerFactory
-		Conf      *MetaCfg
-		Catalog   *metadata.Catalog
+		Conf    *MetaCfg
+		Catalog *metadata.Catalog
 	}
 
 	GC struct {
@@ -157,10 +156,6 @@ func (o *Options) FillDefaults(dirname string) *Options {
 		o.Meta.Catalog.Start()
 	}
 
-	if o.Meta.CKFactory == nil {
-		o.Meta.CKFactory = NewCheckpointerFactory(dirname)
-	}
-
 	if o.CacheCfg == nil {
 		o.CacheCfg = &CacheCfg{
 			IndexCapacity:  DefaultIndexCacheSize,
diff --git a/pkg/vm/engine/aoe/storage/wal/shard/types.go b/pkg/vm/engine/aoe/storage/wal/shard/types.go
index d0d2b0612f2e81de9431801eedd646751e7ca06c..27ec43699da1c52ee7e87d01a24dc6b6a7142ec8 100644
--- a/pkg/vm/engine/aoe/storage/wal/shard/types.go
+++ b/pkg/vm/engine/aoe/storage/wal/shard/types.go
@@ -15,7 +15,7 @@
 package shard
 
 import (
-	"matrixone/pkg/vm/engine/aoe/storage/metadata/v2"
+	"matrixone/pkg/vm/engine/aoe/storage/metadata/v1"
 	"matrixone/pkg/vm/engine/aoe/storage/wal"
 )