From 47d9563dba10731ecc33c03e64d4ff16008df555 Mon Sep 17 00:00:00 2001
From: Jackson <xzxiong@yeah.net>
Date: Fri, 12 Aug 2022 14:25:24 +0800
Subject: [PATCH] Implement  Trace/Log/Error framework, able to collect and
 store into in standalone mode. (#4388)

- Make statement/trace/log/error interrelated by statement_id/span_id.
- store in db, make it can be search by sql cmd.
- error framework provide a unified format to display and optimize error info

Approved by: @fengttt, @daviszhen, @aptend, @yingfeng, @aressu1985
---
 cmd/db-server/main.go                         |   43 +-
 cmd/generate-config/system_vars_def.toml      |   30 +
 go.mod                                        |    5 +-
 go.sum                                        |    9 +-
 pkg/frontend/load_test.go                     |    1 +
 pkg/frontend/mysql_cmd_executor.go            |   41 +-
 pkg/frontend/mysql_cmd_executor_test.go       |    7 +
 pkg/frontend/mysql_protocol_predefines.go     |    2 +-
 pkg/logutil/api.go                            |   13 +-
 pkg/logutil/dragonboat.go                     |   89 ++
 pkg/logutil/internal.go                       |   36 +-
 pkg/logutil/logutil2/api.go                   |  122 ++
 pkg/logutil/report.go                         |  160 +++
 pkg/util/batchpipe/batch_pipe.go              |  365 ++++++
 pkg/util/batchpipe/batch_pipe_test.go         |  382 ++++++
 pkg/util/batchpipe/doc.go                     |   16 +
 pkg/util/errors/context.go                    |   79 ++
 pkg/util/errors/context_test.go               |  302 +++++
 pkg/util/errors/errors.go                     |  130 ++
 pkg/util/errors/errors_test.go                |  374 ++++++
 pkg/util/errors/message.go                    |   86 ++
 pkg/util/errors/message_test.go               |  124 ++
 pkg/util/errors/panic.go                      |   41 +
 pkg/util/errors/panic_test.go                 |  122 ++
 pkg/util/errors/stack.go                      |   90 ++
 pkg/util/errors/stack_test.go                 |  217 ++++
 pkg/util/export/batch_processor.go            |  374 ++++++
 pkg/util/export/batch_processor_test.go       |  263 ++++
 pkg/util/export/type.go                       |   66 +
 pkg/util/export/type_test.go                  |   99 ++
 .../internalExecutor/internal_executor.go     |    6 +-
 pkg/util/metric/batch_collector.go            |    4 +-
 pkg/util/metric/m_hardware.go                 |    6 +-
 pkg/util/metric/m_process.go                  |   10 +-
 pkg/util/metric/m_register.go                 |    4 +-
 pkg/util/metric/metric.go                     |   32 +-
 pkg/util/metric/metric_collector.go           |  200 +--
 pkg/util/metric/metric_collector_test.go      |   64 +-
 pkg/util/metric/metric_exporter.go            |   11 +-
 pkg/util/metric/metric_exporter_test.go       |    2 +-
 pkg/util/metric/metric_test.go                |    4 +-
 pkg/util/metric/mtype_rawhist_test.go         |    6 +-
 pkg/util/metric/testutil_test.go              |    8 +-
 pkg/util/rand.go                              |   33 +
 pkg/util/rand_test.go                         |   58 +
 pkg/util/stack.go                             |   65 +
 pkg/util/stack_test.go                        |   62 +
 pkg/util/time.go                              |   65 +
 pkg/util/time_test.go                         |   65 +
 pkg/util/trace/batch_span_processor.go        |   88 ++
 pkg/util/trace/batch_span_processor_test.go   |  135 ++
 pkg/util/trace/buffer_pipe_sql.go             |  528 ++++++++
 pkg/util/trace/buffer_pipe_sql_test.go        | 1105 +++++++++++++++++
 pkg/util/trace/context.go                     |   39 +
 pkg/util/trace/example/main.go                |  237 ++++
 pkg/util/trace/mo_trace.go                    |  259 ++++
 pkg/util/trace/noop_trace.go                  |   64 +
 pkg/util/trace/noop_trace_test.go             |  214 ++++
 pkg/util/trace/ptrace.go                      |  180 +++
 pkg/util/trace/report_error.go                |   64 +
 pkg/util/trace/report_log.go                  |  163 +++
 pkg/util/trace/report_log_test.go             |  113 ++
 pkg/util/trace/report_statement.go            |   84 ++
 pkg/util/trace/resource.go                    |   86 ++
 pkg/util/trace/schema.go                      |  124 ++
 pkg/util/trace/schema_test.go                 |   96 ++
 pkg/util/trace/trace.go                       |  136 ++
 pkg/util/trace/trace_test.go                  |  120 ++
 pkg/util/trace/type.go                        |   51 +
 test/result/database/create_database.result   |    4 +-
 test/result/database/drop_database.result     |    1 +
 test/result/database/new_database.result      |    2 +
 72 files changed, 7995 insertions(+), 291 deletions(-)
 create mode 100644 pkg/logutil/dragonboat.go
 create mode 100644 pkg/logutil/logutil2/api.go
 create mode 100644 pkg/logutil/report.go
 create mode 100644 pkg/util/batchpipe/batch_pipe.go
 create mode 100644 pkg/util/batchpipe/batch_pipe_test.go
 create mode 100644 pkg/util/batchpipe/doc.go
 create mode 100644 pkg/util/errors/context.go
 create mode 100644 pkg/util/errors/context_test.go
 create mode 100644 pkg/util/errors/errors.go
 create mode 100644 pkg/util/errors/errors_test.go
 create mode 100644 pkg/util/errors/message.go
 create mode 100644 pkg/util/errors/message_test.go
 create mode 100644 pkg/util/errors/panic.go
 create mode 100644 pkg/util/errors/panic_test.go
 create mode 100644 pkg/util/errors/stack.go
 create mode 100644 pkg/util/errors/stack_test.go
 create mode 100644 pkg/util/export/batch_processor.go
 create mode 100644 pkg/util/export/batch_processor_test.go
 create mode 100644 pkg/util/export/type.go
 create mode 100644 pkg/util/export/type_test.go
 create mode 100644 pkg/util/rand.go
 create mode 100644 pkg/util/rand_test.go
 create mode 100644 pkg/util/stack.go
 create mode 100644 pkg/util/stack_test.go
 create mode 100644 pkg/util/time.go
 create mode 100644 pkg/util/time_test.go
 create mode 100644 pkg/util/trace/batch_span_processor.go
 create mode 100644 pkg/util/trace/batch_span_processor_test.go
 create mode 100644 pkg/util/trace/buffer_pipe_sql.go
 create mode 100644 pkg/util/trace/buffer_pipe_sql_test.go
 create mode 100644 pkg/util/trace/context.go
 create mode 100644 pkg/util/trace/example/main.go
 create mode 100644 pkg/util/trace/mo_trace.go
 create mode 100644 pkg/util/trace/noop_trace.go
 create mode 100644 pkg/util/trace/noop_trace_test.go
 create mode 100644 pkg/util/trace/ptrace.go
 create mode 100644 pkg/util/trace/report_error.go
 create mode 100644 pkg/util/trace/report_log.go
 create mode 100644 pkg/util/trace/report_log_test.go
 create mode 100644 pkg/util/trace/report_statement.go
 create mode 100644 pkg/util/trace/resource.go
 create mode 100644 pkg/util/trace/schema.go
 create mode 100644 pkg/util/trace/schema_test.go
 create mode 100644 pkg/util/trace/trace.go
 create mode 100644 pkg/util/trace/trace_test.go
 create mode 100644 pkg/util/trace/type.go

diff --git a/cmd/db-server/main.go b/cmd/db-server/main.go
index ffd7da116..54c78be50 100644
--- a/cmd/db-server/main.go
+++ b/cmd/db-server/main.go
@@ -15,6 +15,7 @@
 package main
 
 import (
+	"context"
 	"flag"
 	"fmt"
 	"os"
@@ -23,6 +24,7 @@ import (
 
 	ie "github.com/matrixorigin/matrixone/pkg/util/internalExecutor"
 	"github.com/matrixorigin/matrixone/pkg/util/metric"
+	"github.com/matrixorigin/matrixone/pkg/util/trace"
 	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/db"
 	"github.com/matrixorigin/matrixone/pkg/vm/engine/tae/moengine"
 
@@ -31,6 +33,8 @@ import (
 	"github.com/matrixorigin/matrixone/pkg/logutil"
 	"github.com/matrixorigin/matrixone/pkg/vm/engine"
 	"github.com/matrixorigin/matrixone/pkg/vm/mmu/host"
+
+	"github.com/google/gops/agent"
 )
 
 const (
@@ -61,6 +65,21 @@ func createMOServer() {
 	address := fmt.Sprintf("%s:%d", config.GlobalSystemVariables.GetHost(), config.GlobalSystemVariables.GetPort())
 	pu := config.NewParameterUnit(&config.GlobalSystemVariables, config.HostMmu, config.Mempool, config.StorageEngine, config.ClusterNodes)
 	mo = frontend.NewMOServer(address, pu)
+	{
+		// init trace/log/error framework
+		if _, err := trace.Init(context.Background(),
+			trace.WithMOVersion(MoVersion),
+			trace.WithNode(0, trace.NodeTypeNode),
+			trace.EnableTracer(config.GlobalSystemVariables.GetEnableTrace()),
+			trace.WithBatchProcessMode(config.GlobalSystemVariables.GetTraceBatchProcessor()),
+			trace.DebugMode(config.GlobalSystemVariables.GetEnableTraceDebug()),
+			trace.WithSQLExecutor(func() ie.InternalExecutor {
+				return frontend.NewInternalExecutor(pu)
+			}),
+		); err != nil {
+			panic(err)
+		}
+	}
 	if config.GlobalSystemVariables.GetEnableMetric() {
 		ieFactory := func() ie.InternalExecutor {
 			return frontend.NewInternalExecutor(pu)
@@ -75,6 +94,10 @@ func runMOServer() error {
 }
 
 func serverShutdown(isgraceful bool) error {
+	// flush trace/log/error framework
+	if err := trace.Shutdown(trace.DefaultContext()); err != nil {
+		logutil.Errorf("Shutdown trace err: %v", err)
+	}
 	return mo.Stop()
 }
 
@@ -165,12 +188,13 @@ func main() {
 	}
 
 	logConf := logutil.LogConfig{
-		Level:      config.GlobalSystemVariables.GetLogLevel(),
-		Format:     config.GlobalSystemVariables.GetLogFormat(),
-		Filename:   config.GlobalSystemVariables.GetLogFilename(),
-		MaxSize:    int(config.GlobalSystemVariables.GetLogMaxSize()),
-		MaxDays:    int(config.GlobalSystemVariables.GetLogMaxDays()),
-		MaxBackups: int(config.GlobalSystemVariables.GetLogMaxBackups()),
+		Level:       config.GlobalSystemVariables.GetLogLevel(),
+		Format:      config.GlobalSystemVariables.GetLogFormat(),
+		Filename:    config.GlobalSystemVariables.GetLogFilename(),
+		MaxSize:     int(config.GlobalSystemVariables.GetLogMaxSize()),
+		MaxDays:     int(config.GlobalSystemVariables.GetLogMaxDays()),
+		MaxBackups:  int(config.GlobalSystemVariables.GetLogMaxBackups()),
+		EnableStore: config.GlobalSystemVariables.GetEnableTrace(),
 	}
 
 	logutil.SetupMOLogger(&logConf)
@@ -220,6 +244,11 @@ func main() {
 		os.Exit(LoadConfigExit)
 	}
 
+	if err := agent.Listen(agent.Options{}); err != nil {
+		logutil.Errorf("listen gops agent failed: %s", err)
+		os.Exit(StartMOExit)
+	}
+
 	createMOServer()
 
 	err := runMOServer()
@@ -235,6 +264,8 @@ func main() {
 		os.Exit(ShutdownExit)
 	}
 
+	agent.Close()
+
 	cleanup()
 
 	if engineName == "tae" {
diff --git a/cmd/generate-config/system_vars_def.toml b/cmd/generate-config/system_vars_def.toml
index 0e7b9f48b..b0f8a7dd0 100644
--- a/cmd/generate-config/system_vars_def.toml
+++ b/cmd/generate-config/system_vars_def.toml
@@ -317,6 +317,36 @@ values = ["true", "false"]
 comment = "default is true. if true, enable metric at booting"
 update-mode = "dynamic"
 
+[[parameter]]
+name = "enableTrace"
+scope = ["global"]
+access = ["file"]
+type = "bool"
+domain-type = "set"
+values = ["true", "false"]
+comment = "default is true. if true, enable trace at booting"
+update-mode = "dynamic"
+
+[[parameter]]
+name = "traceBatchProcessor"
+scope = ["global"]
+access = ["file"]
+type = "string"
+domain-type = "set"
+values = ["InternalExecutor", "FileService"]
+comment = "default is InternalExecutor. if InternalExecutor, use internal sql executor, FileService will implement soon."
+update-mode = "dynamic"
+
+[[parameter]]
+name = "enableTraceDebug"
+scope = ["global"]
+access = ["file"]
+type = "bool"
+domain-type = "set"
+values = ["false", "true"]
+comment = "default is false. With true, system will check all the children span is ended, which belong to the closing span."
+update-mode = "dynamic"
+
 [[parameter]]
 name = "logLevel"
 scope = ["global"]
diff --git a/go.mod b/go.mod
index cf8c2ad51..863813adf 100644
--- a/go.mod
+++ b/go.mod
@@ -20,6 +20,7 @@ require (
 	github.com/golang/mock v1.6.0
 	github.com/google/btree v1.0.1
 	github.com/google/gofuzz v1.2.0
+	github.com/google/gops v0.3.25
 	github.com/google/uuid v1.3.0
 	github.com/lni/dragonboat/v4 v4.0.0-20220803152440-a83f853de8b1
 	github.com/lni/goutils v1.3.1-0.20220604063047-388d67b4dbc4
@@ -97,7 +98,7 @@ require (
 	github.com/mschoch/smat v0.2.0 // indirect
 	github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
 	github.com/pingcap/errors v0.11.5-0.20201029093017-5a7df2af2ac7 // indirect
-	github.com/pkg/errors v0.9.1 // indirect
+	github.com/pkg/errors v0.9.1
 	github.com/pmezard/go-difflib v1.0.0 // indirect
 	github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
 	github.com/prometheus/client_golang v1.11.0
@@ -105,7 +106,7 @@ require (
 	github.com/prometheus/common v0.26.0 // indirect
 	github.com/prometheus/procfs v0.6.0 // indirect
 	github.com/rogpeppe/go-internal v1.8.1 // indirect
-	github.com/shirou/gopsutil/v3 v3.22.3
+	github.com/shirou/gopsutil/v3 v3.22.4
 	github.com/tklauser/go-sysconf v0.3.10 // indirect
 	github.com/tklauser/numcpus v0.4.0 // indirect
 	github.com/yusufpapurcu/wmi v1.2.2 // indirect
diff --git a/go.sum b/go.sum
index c7c810013..023ce440f 100644
--- a/go.sum
+++ b/go.sum
@@ -261,6 +261,8 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
 github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gops v0.3.25 h1:Pf6uw+cO6pDhc7HJ71NiG0x8dyQTeQcmg3HQFF39qVw=
+github.com/google/gops v0.3.25/go.mod h1:8A7ebAm0id9K3H0uOggeRVGxszSvnlURun9mg3GdYDw=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
 github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
 github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -352,6 +354,7 @@ github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2
 github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0=
 github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro=
 github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8=
+github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ=
 github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
 github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
 github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -522,8 +525,9 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt
 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
 github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
 github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/shirou/gopsutil/v3 v3.22.3 h1:UebRzEomgMpv61e3hgD1tGooqX5trFbdU/ehphbHd00=
 github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM=
+github.com/shirou/gopsutil/v3 v3.22.4 h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI=
+github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM=
 github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -587,6 +591,7 @@ github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2
 github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
 github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
 github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
 github.com/yireyun/go-queue v0.0.0-20220725040158-a4dd64810e1e h1:6BhwJMDB4lTjHfNKdf1O/IuhVS3Mw85/mnqpoQlngRk=
@@ -782,6 +787,7 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc
 golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664 h1:wEZYwx+kK+KlZ0hpvP2Ls1Xr4+RWnlzGFwPP0aiDjIU=
 golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -922,4 +928,5 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/goversion v1.2.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo=
 sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/pkg/frontend/load_test.go b/pkg/frontend/load_test.go
index 608c102ed..58fa8b199 100644
--- a/pkg/frontend/load_test.go
+++ b/pkg/frontend/load_test.go
@@ -56,6 +56,7 @@ func Test_load(t *testing.T) {
 		eng := mock_frontend.NewMockTxnEngine(ctrl)
 		txn := mock_frontend.NewMockTxn(ctrl)
 		txn.EXPECT().GetCtx().Return(nil).AnyTimes()
+		txn.EXPECT().GetID().Return(uint64(0)).AnyTimes()
 		txn.EXPECT().Commit().Return(nil).AnyTimes()
 		txn.EXPECT().Rollback().Return(nil).AnyTimes()
 		txn.EXPECT().String().Return("txn0").AnyTimes()
diff --git a/pkg/frontend/mysql_cmd_executor.go b/pkg/frontend/mysql_cmd_executor.go
index d3ae2f763..f4e1e5bd4 100644
--- a/pkg/frontend/mysql_cmd_executor.go
+++ b/pkg/frontend/mysql_cmd_executor.go
@@ -19,9 +19,6 @@ import (
 	goErrors "errors"
 	"fmt"
 
-	"github.com/matrixorigin/matrixone/pkg/container/bytejson"
-	"github.com/matrixorigin/matrixone/pkg/encoding"
-
 	"os"
 	"runtime/pprof"
 	"sort"
@@ -40,19 +37,25 @@ import (
 	"github.com/matrixorigin/matrixone/pkg/defines"
 	"github.com/matrixorigin/matrixone/pkg/errno"
 	"github.com/matrixorigin/matrixone/pkg/logutil"
+	"github.com/matrixorigin/matrixone/pkg/logutil/logutil2"
 	plan3 "github.com/matrixorigin/matrixone/pkg/pb/plan"
 	"github.com/matrixorigin/matrixone/pkg/sql/errors"
 	"github.com/matrixorigin/matrixone/pkg/sql/parsers"
+	"github.com/matrixorigin/matrixone/pkg/util"
 	"github.com/matrixorigin/matrixone/pkg/util/metric"
 	"github.com/matrixorigin/matrixone/pkg/vm/engine"
 	"github.com/matrixorigin/matrixone/pkg/vm/mheap"
 
 	"github.com/matrixorigin/matrixone/pkg/container/batch"
+	"github.com/matrixorigin/matrixone/pkg/container/bytejson"
 	"github.com/matrixorigin/matrixone/pkg/container/types"
+	"github.com/matrixorigin/matrixone/pkg/encoding"
 	"github.com/matrixorigin/matrixone/pkg/sql/parsers/dialect"
 	"github.com/matrixorigin/matrixone/pkg/sql/parsers/dialect/mysql"
 	"github.com/matrixorigin/matrixone/pkg/sql/parsers/tree"
 	"github.com/matrixorigin/matrixone/pkg/vm/process"
+
+	"github.com/matrixorigin/matrixone/pkg/util/trace"
 )
 
 func onlyCreateStatementErrorInfo() string {
@@ -133,6 +136,32 @@ func (mce *MysqlCmdExecutor) GetRoutineManager() *RoutineManager {
 	return mce.routineMgr
 }
 
+func (mce *MysqlCmdExecutor) RecordStatement(ctx context.Context, ses *Session, proc *process.Process, sql string, beginIns time.Time) context.Context {
+	statementId := util.Fastrand64()
+	sessInfo := proc.SessionInfo
+	txnID := uint64(0)
+	if ses.GetTxnHandler().IsValidTxn() { // fixme: how Could I get an valid txn ID.
+		txnID = ses.GetTxnHandler().GetTxn().GetID()
+	}
+	trace.ReportStatement(
+		ctx,
+		&trace.StatementInfo{
+			StatementID:          statementId,
+			SessionID:            sessInfo.GetConnectionID(),
+			TransactionID:        txnID,
+			Account:              "account", //fixme: sessInfo.GetAccount()
+			User:                 sessInfo.GetUser(),
+			Host:                 sessInfo.GetHost(),
+			Database:             sessInfo.GetDatabase(),
+			Statement:            sql,
+			StatementFingerprint: "", // fixme
+			StatementTag:         "", // fixme
+			RequestAt:            util.NowNS(),
+		},
+	)
+	return trace.ContextWithSpanContext(ctx, trace.SpanContextWithID(trace.TraceID(statementId)))
+}
+
 // outputPool outputs the data
 type outputPool interface {
 	resetLineStr()
@@ -1712,6 +1741,10 @@ func (mce *MysqlCmdExecutor) doComQuery(sql string) (retErr error) {
 		Version:      serverVersion,
 	}
 
+	var rootCtx = mce.RecordStatement(trace.DefaultContext(), ses, proc, sql, beginInstant)
+	ctx, span := trace.Start(rootCtx, "doComQuery")
+	defer span.End()
+
 	cws, err := GetComputationWrapper(ses.GetDatabaseName(),
 		sql,
 		ses.GetUserName(),
@@ -1910,7 +1943,7 @@ func (mce *MysqlCmdExecutor) doComQuery(sql string) (retErr error) {
 
 		runner = ret.(ComputationRunner)
 		if ses.Pu.SV.GetRecordTimeElapsedOfSqlRequest() {
-			logutil.Infof("time of Exec.Build : %s", time.Since(cmpBegin).String())
+			logutil2.Infof(ctx, "time of Exec.Build : %s", time.Since(cmpBegin).String())
 		}
 
 		// cw.Compile might rewrite sql, here we fetch the latest version
diff --git a/pkg/frontend/mysql_cmd_executor_test.go b/pkg/frontend/mysql_cmd_executor_test.go
index 5c843ad16..008017b3f 100644
--- a/pkg/frontend/mysql_cmd_executor_test.go
+++ b/pkg/frontend/mysql_cmd_executor_test.go
@@ -18,6 +18,7 @@ import (
 	"context"
 	"fmt"
 	"github.com/matrixorigin/matrixone/pkg/config"
+	"github.com/matrixorigin/matrixone/pkg/util/trace"
 	"testing"
 
 	"github.com/matrixorigin/matrixone/pkg/sql/parsers/tree"
@@ -39,6 +40,10 @@ import (
 	"github.com/smartystreets/goconvey/convey"
 )
 
+func init() {
+	trace.Init(context.Background(), trace.EnableTracer(false))
+}
+
 func Test_mce(t *testing.T) {
 	ctx := context.TODO()
 	convey.Convey("boot mce succ", t, func() {
@@ -50,6 +55,7 @@ func Test_mce(t *testing.T) {
 
 		txn := mock_frontend.NewMockTxn(ctrl)
 		txn.EXPECT().GetCtx().Return(nil).AnyTimes()
+		txn.EXPECT().GetID().Return(uint64(0)).AnyTimes()
 		txn.EXPECT().Commit().Return(nil).AnyTimes()
 		txn.EXPECT().Rollback().Return(nil).AnyTimes()
 		txn.EXPECT().String().Return("txn0").AnyTimes()
@@ -279,6 +285,7 @@ func Test_mce_selfhandle(t *testing.T) {
 
 		txn := mock_frontend.NewMockTxn(ctrl)
 		txn.EXPECT().GetCtx().Return(nil).AnyTimes()
+		txn.EXPECT().GetID().Return(uint64(0)).AnyTimes()
 		txn.EXPECT().Commit().Return(nil).AnyTimes()
 		txn.EXPECT().Rollback().Return(nil).AnyTimes()
 		txn.EXPECT().String().Return("txn0").AnyTimes()
diff --git a/pkg/frontend/mysql_protocol_predefines.go b/pkg/frontend/mysql_protocol_predefines.go
index a494dfc8b..3f91b1004 100644
--- a/pkg/frontend/mysql_protocol_predefines.go
+++ b/pkg/frontend/mysql_protocol_predefines.go
@@ -10933,7 +10933,7 @@ var errorMsgRefer = map[uint16]errorMsgItem{
 	ER_GRP_RPL_RECOVERY_START_FALLBACK:                      {13472, []string{"HY000"}, "Due to some issue on the previous step distributed recovery is now executing: %s"},
 	ER_GRP_RPL_RECOVERY_START_NO_FALLBACK:                   {13473, []string{"HY000"}, "Due to a critical cloning error or lack of donors, distributed recovery cannot be executed. The member will now leave the group."},
 	ER_GRP_RPL_SLAVE_THREAD_ERROR_ON_CLONE:                  {13474, []string{"HY000"}, "The '%s' thread of channel '%s' will error out as the server will attempt to clone another server"},
-	ER_UNKNOWN_TABLE_IN_UPGRADE:                             {13475, []string{"HY000"}, "Unknown table '%-.129s'"},
+	ER_UNKNOWN_TABLE_IN_UPGRADE:                             {13475, []string{"42S02"}, "Unknown table '%-.129s'"},
 	ER_IDENT_CAUSES_TOO_LONG_PATH_IN_UPGRADE:                {13476, []string{"HY000"}, "Long database name and identifier for object resulted in path length exceeding %d characters. Path: '%s'."},
 	ER_XA_CANT_CREATE_MDL_BACKUP:                            {13477, []string{"HY000"}, "XA: Failed to take MDL Lock backup of PREPARED XA transaction during client disconnect."},
 	ER_AUDIT_LOG_SUPER_PRIVILEGE_REQUIRED:                   {13478, []string{"HY000"}, "SUPER privilege or AUDIT_ADMIN role required for '%s'@'%s' user."},
diff --git a/pkg/logutil/api.go b/pkg/logutil/api.go
index 39dfdeaff..40fd1c98c 100644
--- a/pkg/logutil/api.go
+++ b/pkg/logutil/api.go
@@ -60,7 +60,7 @@ func Warnf(msg string, fields ...interface{}) {
 
 // Errorf only use in develop mode
 func Errorf(msg string, fields ...interface{}) {
-	GetGlobalLogger().WithOptions(zap.AddStacktrace(zap.ErrorLevel)).Sugar().Errorf(msg, fields...)
+	GetGlobalLogger().WithOptions(zap.AddCallerSkip(1), zap.AddStacktrace(zap.ErrorLevel)).Sugar().Errorf(msg, fields...)
 }
 
 // Panicf only use in develop mode
@@ -111,6 +111,7 @@ func Adjust(logger *zap.Logger, options ...zap.Option) *zap.Logger {
 
 // GetLoggerWithOptions get default zap logger
 func GetLoggerWithOptions(level zapcore.LevelEnabler, encoder zapcore.Encoder, syncer zapcore.WriteSyncer, options ...zap.Option) *zap.Logger {
+	var cores []zapcore.Core
 	options = append(options, zap.AddStacktrace(zapcore.FatalLevel), zap.AddCaller())
 	if syncer == nil {
 		syncer = getConsoleSyncer()
@@ -118,7 +119,15 @@ func GetLoggerWithOptions(level zapcore.LevelEnabler, encoder zapcore.Encoder, s
 	if encoder == nil {
 		encoder = getLoggerEncoder("")
 	}
-	return zap.New(zapcore.NewCore(encoder, syncer, level), options...)
+	cores = append(cores, zapcore.NewCore(encoder, syncer, level))
+
+	if EnableStoreDB() {
+		encoder, syncer := getTraceLogSinks()
+		cores = append(cores, zapcore.NewCore(encoder, syncer, level))
+	}
+
+	GetLevelChangeFunc()(level)
+	return zap.New(zapcore.NewTee(cores...), options...)
 }
 
 // GetLogger get default zap logger
diff --git a/pkg/logutil/dragonboat.go b/pkg/logutil/dragonboat.go
new file mode 100644
index 000000000..f887f067a
--- /dev/null
+++ b/pkg/logutil/dragonboat.go
@@ -0,0 +1,89 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"github.com/lni/dragonboat/v4/logger"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+var _ logger.ILogger = (*DragonboatAdaptLogger)(nil)
+
+type DragonboatAdaptLogger struct {
+	logger  *zap.SugaredLogger
+	atom    *zap.AtomicLevel
+	pkgName string
+}
+
+func (d *DragonboatAdaptLogger) SetLevel(level logger.LogLevel) {
+	switch level {
+	case logger.CRITICAL:
+		d.atom.SetLevel(zapcore.DPanicLevel)
+	case logger.ERROR:
+		d.atom.SetLevel(zapcore.ErrorLevel)
+	case logger.WARNING:
+		d.atom.SetLevel(zapcore.WarnLevel)
+	case logger.INFO:
+		d.atom.SetLevel(zapcore.InfoLevel)
+	case logger.DEBUG:
+		d.atom.SetLevel(zapcore.DebugLevel)
+	default:
+		d.atom.SetLevel(zapcore.DebugLevel)
+	}
+}
+
+func (d DragonboatAdaptLogger) Debugf(format string, args ...interface{}) {
+	d.logger.Debugf(format, args...)
+}
+
+func (d DragonboatAdaptLogger) Infof(format string, args ...interface{}) {
+	d.logger.Infof(format, args...)
+}
+
+func (d DragonboatAdaptLogger) Warningf(format string, args ...interface{}) {
+	d.logger.Warnf(format, args...)
+}
+
+func (d DragonboatAdaptLogger) Errorf(format string, args ...interface{}) {
+	d.logger.Errorf(format, args...)
+}
+
+func (d DragonboatAdaptLogger) Panicf(format string, args ...interface{}) {
+	d.logger.Panicf(format, args...)
+}
+
+// DragonboatFactory implement logger.Factory for logger.SetLoggerFactory
+// create DragonboatAdaptLogger intance
+func DragonboatFactory(name string) logger.ILogger {
+	var cores = make([]zapcore.Core, 0, 2)
+	cfg := getGlobalLogConfig()
+	atom := cfg.getLevel()
+	sinks := cfg.getSinks()
+	for _, sink := range sinks {
+		cores = append(cores, zapcore.NewCore(sink.enc, sink.out, atom))
+	}
+	options := cfg.getOptions()
+	options = append(options, zap.AddCallerSkip(2))
+	return &DragonboatAdaptLogger{
+		logger:  zap.New(zapcore.NewTee(cores...), options...).Named(name).Sugar(),
+		atom:    &atom,
+		pkgName: name,
+	}
+}
+
+func init() {
+	//logger.SetLoggerFactory(DragonboatFactory)
+}
diff --git a/pkg/logutil/internal.go b/pkg/logutil/internal.go
index bd3f0f939..568c39932 100644
--- a/pkg/logutil/internal.go
+++ b/pkg/logutil/internal.go
@@ -26,6 +26,7 @@ import (
 
 // SetupMOLogger sets up the global logger for MO Server.
 func SetupMOLogger(conf *LogConfig) {
+	setGlobalLogConfig(conf) // keep for dragonboat/v4, ... and so on
 	logger, err := initMOLogger(conf)
 	if err != nil {
 		panic(err)
@@ -41,10 +42,13 @@ func initMOLogger(cfg *LogConfig) (*zap.Logger, error) {
 
 // global zap logger for MO server.
 var _globalLogger atomic.Value
+var _skip1Logger atomic.Value
 
 // init initializes a default zap logger before set up logger.
 func init() {
+	SetLogReporter(&TraceReporter{noopReportLog, noopReportZap, noopLevelSignal, noopContextField})
 	conf := &LogConfig{Level: "info", Format: "console"}
+	setGlobalLogConfig(conf)
 	logger, _ := initMOLogger(conf)
 	replaceGlobalLogger(logger)
 }
@@ -54,18 +58,24 @@ func GetGlobalLogger() *zap.Logger {
 	return _globalLogger.Load().(*zap.Logger)
 }
 
+func GetSkip1Logger() *zap.Logger {
+	return _skip1Logger.Load().(*zap.Logger)
+}
+
 // replaceGlobalLogger replaces the current global zap Logger.
 func replaceGlobalLogger(logger *zap.Logger) {
 	_globalLogger.Store(logger)
+	_skip1Logger.Store(logger.WithOptions(zap.AddCallerSkip(1)))
 }
 
 type LogConfig struct {
-	Level      string `toml:"level"`
-	Format     string `toml:"format"`
-	Filename   string `toml:"filename"`
-	MaxSize    int    `toml:"max-size"`
-	MaxDays    int    `toml:"max-days"`
-	MaxBackups int    `toml:"max-backups"`
+	Level       string `toml:"level"`
+	Format      string `toml:"format"`
+	Filename    string `toml:"filename"`
+	MaxSize     int    `toml:"max-size"`
+	MaxDays     int    `toml:"max-days"`
+	MaxBackups  int    `toml:"max-backups"`
+	EnableStore bool   `toml:"enable-report"` // for mo, store log into db
 }
 
 func (cfg *LogConfig) getSyncer() zapcore.WriteSyncer {
@@ -106,6 +116,20 @@ func (cfg *LogConfig) getLevel() zap.AtomicLevel {
 	return level
 }
 
+func (cfg *LogConfig) getSinks() (sinks []ZapSink) {
+	encoder, syncer := cfg.getEncoder(), cfg.getSyncer()
+	sinks = append(sinks, ZapSink{encoder, syncer})
+	if cfg.EnableStore {
+		encoder, syncer := getTraceLogSinks()
+		sinks = append(sinks, ZapSink{encoder, syncer})
+	}
+	return
+}
+
+func (cfg *LogConfig) getOptions() []zap.Option {
+	return []zap.Option{zap.AddStacktrace(zapcore.FatalLevel), zap.AddCaller()}
+}
+
 func getLoggerEncoder(format string) zapcore.Encoder {
 	encoderConfig := zapcore.EncoderConfig{
 		MessageKey:    "msg",
diff --git a/pkg/logutil/logutil2/api.go b/pkg/logutil/logutil2/api.go
new file mode 100644
index 000000000..cfe828deb
--- /dev/null
+++ b/pkg/logutil/logutil2/api.go
@@ -0,0 +1,122 @@
+// Copyright 2021 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil2
+
+import (
+	"context"
+	"fmt"
+	"github.com/matrixorigin/matrixone/pkg/logutil"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+func Debug(ctx context.Context, msg string, fields ...zap.Field) {
+	fields = append(fields, ContextField(ctx))
+	logutil.GetGlobalLogger().WithOptions(zap.AddCallerSkip(1)).Debug(msg, fields...)
+}
+
+func Info(ctx context.Context, msg string, fields ...zap.Field) {
+	fields = append(fields, ContextField(ctx))
+	logutil.GetGlobalLogger().WithOptions(zap.AddCallerSkip(1)).Info(msg, fields...)
+}
+
+func Warn(ctx context.Context, msg string, fields ...zap.Field) {
+	fields = append(fields, ContextField(ctx))
+	logutil.GetGlobalLogger().WithOptions(zap.AddCallerSkip(1)).Warn(msg, fields...)
+}
+
+func Error(ctx context.Context, msg string, fields ...zap.Field) {
+	fields = append(fields, ContextField(ctx))
+	logutil.GetGlobalLogger().WithOptions(zap.AddCallerSkip(1)).Error(msg, fields...)
+}
+
+func Panic(ctx context.Context, msg string, fields ...zap.Field) {
+	fields = append(fields, ContextField(ctx))
+	logutil.GetGlobalLogger().WithOptions(zap.AddCallerSkip(1)).Panic(msg, fields...)
+}
+
+func Fatal(ctx context.Context, msg string, fields ...zap.Field) {
+	fields = append(fields, ContextField(ctx))
+	logutil.GetGlobalLogger().WithOptions(zap.AddCallerSkip(1)).Fatal(msg, fields...)
+}
+
+// Debugf only use in develop mode
+func Debugf(ctx context.Context, msg string, fields ...interface{}) {
+	if len(fields) == 0 {
+		logutil.GetSkip1Logger().Debug(msg, ContextField(ctx))
+	} else {
+		logutil.GetSkip1Logger().Debug(fmt.Sprintf(msg, fields...), ContextField(ctx))
+	}
+}
+
+// Infof only use in develop mode
+func Infof(ctx context.Context, msg string, fields ...interface{}) {
+	if len(fields) == 0 {
+		logutil.GetSkip1Logger().Info(msg, ContextField(ctx))
+	} else {
+		logutil.GetSkip1Logger().Info(fmt.Sprintf(msg, fields...), ContextField(ctx))
+	}
+}
+
+// Warnf only use in develop mode
+func Warnf(ctx context.Context, msg string, fields ...interface{}) {
+	if len(fields) == 0 {
+		logutil.GetSkip1Logger().Warn(msg, ContextField(ctx))
+	} else {
+		logutil.GetSkip1Logger().Warn(fmt.Sprintf(msg, fields...), ContextField(ctx))
+	}
+}
+
+// Errorf only use in develop mode
+func Errorf(ctx context.Context, msg string, fields ...interface{}) {
+	l := logutil.GetGlobalLogger().WithOptions(zap.AddCallerSkip(1), ContextFieldsOption(ctx), zap.AddStacktrace(zap.ErrorLevel))
+	if len(fields) == 0 {
+		l.Error(msg, ContextField(ctx))
+	} else {
+		l.Error(fmt.Sprintf(msg, fields...), ContextField(ctx))
+	}
+}
+
+// Panicf only use in develop mode
+func Panicf(ctx context.Context, msg string, fields ...interface{}) {
+	if len(fields) == 0 {
+		logutil.GetSkip1Logger().Panic(msg, ContextField(ctx))
+	} else {
+		logutil.GetSkip1Logger().Panic(fmt.Sprintf(msg, fields...), ContextField(ctx))
+	}
+}
+
+// Fatalf only use in develop mode
+func Fatalf(ctx context.Context, msg string, fields ...interface{}) {
+	if len(fields) == 0 {
+		logutil.GetSkip1Logger().Fatal(msg, ContextField(ctx))
+	} else {
+		logutil.GetSkip1Logger().Fatal(fmt.Sprintf(msg, fields...), ContextField(ctx))
+	}
+}
+
+// hook can catch zapcore.Entry, which can add by WithOptions(zap.Hooks(hook))
+// But what we need is zapcore.CheckedEntry
+// @deprecated
+func hook(e zapcore.Entry) error { return nil }
+
+var _ = hook(zapcore.Entry{})
+
+func ContextFieldsOption(ctx context.Context) zap.Option {
+	return zap.Fields(logutil.GetContextFieldFunc()(ctx))
+}
+func ContextField(ctx context.Context) zap.Field {
+	return logutil.GetContextFieldFunc()(ctx)
+}
diff --git a/pkg/logutil/report.go b/pkg/logutil/report.go
new file mode 100644
index 000000000..648721465
--- /dev/null
+++ b/pkg/logutil/report.go
@@ -0,0 +1,160 @@
+// Copyright 2021 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"context"
+	"go.uber.org/zap"
+	"go.uber.org/zap/buffer"
+	"go.uber.org/zap/zapcore"
+	"io"
+	"sync/atomic"
+)
+
+var gLogConfigs atomic.Value
+
+func EnableStoreDB() bool {
+	return gLogConfigs.Load().(*LogConfig).EnableStore
+}
+
+func setGlobalLogConfig(cfg *LogConfig) {
+	gLogConfigs.Store(cfg)
+}
+
+func getGlobalLogConfig() *LogConfig {
+	return gLogConfigs.Load().(*LogConfig)
+}
+
+type ZapSink struct {
+	enc zapcore.Encoder
+	out zapcore.WriteSyncer
+}
+
+// logReporter should be trace.ReportLog
+var logReporter atomic.Value
+
+// zapReporter should be trace.ReportZap
+var zapReporter atomic.Value
+
+// levelChangeFunc should be trace.SetLogLevel function
+var levelChangeFunc atomic.Value
+
+// contextField should be trace.ContextField function
+var contextField atomic.Value
+
+type TraceReporter struct {
+	ReportLog    reportLogFunc
+	ReportZap    reportZapFunc
+	LevelSignal  levelChangeSignal
+	ContextField contextFieldFunc
+}
+
+type reportLogFunc func(context.Context, zapcore.Level, int, string, ...any)
+type reportZapFunc func(zapcore.Encoder, zapcore.Entry, []zapcore.Field) (*buffer.Buffer, error)
+type levelChangeSignal func(zapcore.LevelEnabler)
+type contextFieldFunc func(context.Context) zap.Field
+
+func noopReportLog(context.Context, zapcore.Level, int, string, ...any) {}
+func noopReportZap(zapcore.Encoder, zapcore.Entry, []zapcore.Field) (*buffer.Buffer, error) {
+	return buffer.NewPool().Get(), nil
+}
+func noopLevelSignal(zapcore.LevelEnabler)       {}
+func noopContextField(context.Context) zap.Field { return zap.String("span", "{}") }
+
+func SetLogReporter(r *TraceReporter) {
+	if r.ReportLog != nil {
+		logReporter.Store(r.ReportLog)
+	}
+	if r.ReportZap != nil {
+		zapReporter.Store(r.ReportZap)
+	}
+	if r.LevelSignal != nil {
+		levelChangeFunc.Store(r.LevelSignal)
+	}
+	if r.ContextField != nil {
+		contextField.Store(r.ContextField)
+	}
+}
+
+func GetReportLogFunc() reportLogFunc {
+	return logReporter.Load().(reportLogFunc)
+}
+
+func GetReportZapFunc() reportZapFunc {
+	return zapReporter.Load().(reportZapFunc)
+}
+
+func GetLevelChangeFunc() levelChangeSignal {
+	return levelChangeFunc.Load().(levelChangeSignal)
+}
+
+func GetContextFieldFunc() contextFieldFunc {
+	return contextField.Load().(contextFieldFunc)
+}
+
+var _ zapcore.Encoder = (*TraceLogEncoder)(nil)
+
+type TraceLogEncoder struct {
+	zapcore.Encoder
+	spanContextField zap.Field
+}
+
+func (e *TraceLogEncoder) Clone() zapcore.Encoder {
+	return &TraceLogEncoder{
+		Encoder: e.Encoder.Clone(),
+	}
+}
+
+var SpanFieldKey atomic.Value
+
+func (e *TraceLogEncoder) AddObject(key string, val zapcore.ObjectMarshaler) error {
+	if key == SpanFieldKey.Load().(string) {
+		//e.sp = obj.(*trace.SpanContext)
+		e.spanContextField = zap.Object(key, val)
+		return nil
+	}
+	return e.Encoder.AddObject(key, val)
+}
+
+func (e *TraceLogEncoder) EncodeEntry(entry zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
+	if e.spanContextField.Key == SpanFieldKey.Load().(string) {
+		fields = append(fields, e.spanContextField)
+	}
+	return GetReportZapFunc()(e.Encoder, entry, fields)
+}
+
+func newTraceLogEncoder() *TraceLogEncoder {
+	// default like zap.NewProductionEncoderConfig(), but clean core-elems ENCODE
+	e := &TraceLogEncoder{
+		Encoder: zapcore.NewJSONEncoder(
+			zapcore.EncoderConfig{
+				StacktraceKey:  "stacktrace",
+				LineEnding:     zapcore.DefaultLineEnding,
+				EncodeLevel:    zapcore.LowercaseLevelEncoder,
+				EncodeTime:     zapcore.EpochTimeEncoder,
+				EncodeDuration: zapcore.SecondsDurationEncoder,
+				EncodeCaller:   zapcore.ShortCallerEncoder,
+			}),
+	}
+	return e
+}
+
+func getTraceLogSinks() (zapcore.Encoder, zapcore.WriteSyncer) {
+	return newTraceLogEncoder(), zapcore.AddSync(io.Discard)
+}
+
+func init() {
+	SpanFieldKey.Store("")
+}
diff --git a/pkg/util/batchpipe/batch_pipe.go b/pkg/util/batchpipe/batch_pipe.go
new file mode 100644
index 000000000..4f9495539
--- /dev/null
+++ b/pkg/util/batchpipe/batch_pipe.go
@@ -0,0 +1,365 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package batchpipe
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"runtime"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/matrixorigin/matrixone/pkg/common/moerr"
+	"github.com/matrixorigin/matrixone/pkg/logutil"
+)
+
+const chanCapConst = 10000
+
+// HasName decides which table the owner will go to
+type HasName interface {
+	GetName() string
+}
+
+// Reminder to force flush ItemBuffer
+type Reminder interface {
+	RemindNextAfter() time.Duration
+	RemindBackOff()
+	RemindBackOffCnt() int
+	RemindReset()
+}
+
+// ItemBuffer Stash items and construct a batch can be stored. for instance, an sql inserting all items into a table
+type ItemBuffer[T any, B any] interface {
+	Reminder
+	Add(item T)
+	Reset()
+	IsEmpty() bool
+	ShouldFlush() bool
+	// GetBatch use bytes.Buffer to mitigate mem allocation and the returned bytes should own its data
+	GetBatch(buf *bytes.Buffer) B
+}
+
+type PipeImpl[T any, B any] interface {
+	// NewItemBuffer create a new buffer for one kind of Item
+	NewItemBuffer(name string) ItemBuffer[T, B]
+	// NewItemBatchHandler handle the StoreBatch from an ItemBuffer, for example, execute an insert sql.
+	// this handle may be running on multiple goroutine
+	NewItemBatchHandler() func(batch B)
+}
+
+type backOffClock struct {
+	base       time.Duration
+	current    time.Duration
+	backOffCnt int
+	next       func(int, time.Duration) time.Duration
+}
+
+func NewConstantClock(cycle time.Duration) Reminder {
+	return NewExpBackOffClock(cycle, cycle, 1)
+}
+
+func NewExpBackOffClock(init, max time.Duration, base int) Reminder {
+	next := func(_ int, current time.Duration) time.Duration {
+		if current < max {
+			current = current * time.Duration(base)
+			if current >= max {
+				current = max
+			}
+		}
+		return current
+	}
+	return NewBackOffClock(init, next)
+}
+
+func NewBackOffClock(base time.Duration, f func(int, time.Duration) time.Duration) Reminder {
+	return &backOffClock{
+		base:       base,
+		current:    base,
+		backOffCnt: 0,
+		next:       f,
+	}
+}
+func (c *backOffClock) RemindNextAfter() time.Duration { return c.current }
+func (c *backOffClock) RemindReset()                   { c.current = c.base; c.backOffCnt = 0 }
+func (c *backOffClock) RemindBackOffCnt() int          { return c.backOffCnt }
+func (c *backOffClock) RemindBackOff() {
+	c.backOffCnt += 1
+	if c.next != nil {
+		c.current = c.next(c.backOffCnt, c.current)
+	}
+}
+
+type reminderRegistry struct {
+	registry map[string]*time.Timer
+	C        chan string
+}
+
+func newReminderRegistry() *reminderRegistry {
+	return &reminderRegistry{
+		registry: make(map[string]*time.Timer),
+		C:        make(chan string, chanCapConst),
+	}
+}
+
+func (r *reminderRegistry) Register(name string, after time.Duration) {
+	if after <= 0 {
+		return
+	}
+	if r.registry[name] != nil {
+		panic(fmt.Sprintf("%s already registered", name))
+	}
+	r.registry[name] = time.AfterFunc(after, func() { r.C <- name })
+}
+
+func (r *reminderRegistry) Reset(name string, after time.Duration) {
+	if after <= 0 {
+		return
+	}
+	if t := r.registry[name]; t != nil {
+		t.Reset(after)
+	}
+}
+
+func (r *reminderRegistry) CleanAll() {
+	for _, timer := range r.registry {
+		timer.Stop()
+	}
+}
+
+type baseBatchPipeOpts struct {
+	// the number of goroutines to handle buffer batch, default is runtime.NumCPU()
+	BatchWorkerNum int
+	// the number of goroutines to merge items into a buffer, default is 1
+	BufferWorkerNum int
+}
+
+type BaseBatchPipeOpt interface {
+	ApplyTo(*baseBatchPipeOpts)
+}
+
+type PipeWithBatchWorkerNum int
+
+func (x PipeWithBatchWorkerNum) ApplyTo(o *baseBatchPipeOpts) {
+	o.BatchWorkerNum = int(x)
+}
+
+type PipeWithBufferWorkerNum int
+
+func (x PipeWithBufferWorkerNum) ApplyTo(o *baseBatchPipeOpts) {
+	o.BufferWorkerNum = int(x)
+}
+
+func defaultBaseBatchPipeOpts() baseBatchPipeOpts {
+	return baseBatchPipeOpts{
+		BatchWorkerNum:  runtime.NumCPU(),
+		BufferWorkerNum: 1,
+	}
+}
+
+type BaseBatchPipe[T HasName, B any] struct {
+	impl              PipeImpl[T, B]
+	isRunning         int32
+	opts              baseBatchPipeOpts
+	dropped           int64
+	itemCh            chan T
+	batchCh           chan B
+	sendingLock       sync.RWMutex
+	mergeStopWg       sync.WaitGroup
+	batchStopWg       sync.WaitGroup
+	batchWorkerCancel context.CancelFunc
+	mergeWorkerCancel context.CancelFunc
+}
+
+func NewBaseBatchPipe[T HasName, B any](impl PipeImpl[T, B], opts ...BaseBatchPipeOpt) *BaseBatchPipe[T, B] {
+	initOpts := defaultBaseBatchPipeOpts()
+	for _, o := range opts {
+		o.ApplyTo(&initOpts)
+	}
+	return &BaseBatchPipe[T, B]{
+		impl:    impl,
+		opts:    initOpts,
+		itemCh:  make(chan T, chanCapConst),
+		batchCh: make(chan B, chanCapConst),
+	}
+}
+
+// SendItem returns error when pipe is closed
+func (bc *BaseBatchPipe[T, B]) SendItem(items ...T) error {
+	if atomic.LoadInt32(&bc.isRunning) == 0 {
+		return moerr.NewWarn("Collector has been stopped")
+	}
+	// avoid data race on itemCh between concurrent sending and closing
+	bc.sendingLock.RLock()
+	defer bc.sendingLock.RUnlock()
+	for _, item := range items {
+		select {
+		case bc.itemCh <- item:
+		default:
+			atomic.AddInt64(&bc.dropped, 1)
+		}
+	}
+	return nil
+}
+
+// Start kicks off the merge workers and batch workers, return false if the pipe is workinghas been called
+func (bc *BaseBatchPipe[T, B]) Start() bool {
+	if atomic.SwapInt32(&bc.isRunning, 1) == 1 {
+		return false
+	}
+	bc.startBatchWorker()
+	bc.startMergeWorker()
+	return true
+}
+
+// Stop terminates all workers. If graceful asked, wait until all items are processed,
+// otherwise, quit immediately. Caller can use the returned channel to wait Stop finish
+func (bc *BaseBatchPipe[T, B]) Stop(graceful bool) (<-chan struct{}, bool) {
+	if atomic.SwapInt32(&bc.isRunning, 0) == 0 {
+		return nil, false
+	}
+
+	logutil.Infof("BaseBatchPipe accept graceful(%v) Stop", graceful)
+	stopCh := make(chan struct{})
+	if graceful {
+		go func() {
+			bc.sendingLock.Lock()
+			close(bc.itemCh)
+			bc.sendingLock.Unlock()
+			bc.mergeStopWg.Wait()
+			close(bc.batchCh)
+			bc.batchStopWg.Wait()
+			close(stopCh)
+		}()
+	} else {
+		bc.batchWorkerCancel()
+		bc.mergeWorkerCancel()
+		go func() {
+			bc.mergeStopWg.Wait()
+			bc.batchStopWg.Wait()
+			close(stopCh)
+		}()
+	}
+	return stopCh, true
+}
+
+func (bc *BaseBatchPipe[T, B]) startBatchWorker() {
+	ctx, cancel := context.WithCancel(context.Background())
+	bc.batchWorkerCancel = cancel
+	for i := 0; i < bc.opts.BatchWorkerNum; i++ {
+		bc.batchStopWg.Add(1)
+		go bc.batchWorker(ctx)
+	}
+}
+
+func (bc *BaseBatchPipe[T, B]) startMergeWorker() {
+	ctx, cancel := context.WithCancel(context.Background())
+	bc.mergeWorkerCancel = cancel
+	for i := 0; i < bc.opts.BufferWorkerNum; i++ {
+		bc.mergeStopWg.Add(1)
+		go bc.mergeWorker(ctx)
+	}
+}
+
+func (bc *BaseBatchPipe[T, B]) batchWorker(ctx context.Context) {
+	defer bc.batchStopWg.Done()
+	quitMsg := quitUnknown
+	defer logutil.Infof("batchWorker quit: %s", &quitMsg)
+	f := bc.impl.NewItemBatchHandler()
+	for {
+		select {
+		case <-ctx.Done():
+			quitMsg = quitCancel
+			return
+		case batch, ok := <-bc.batchCh:
+			if !ok {
+				quitMsg = quitChannelClose
+				return
+			}
+			f(batch)
+		}
+	}
+}
+
+type quitReason string
+
+var (
+	quitUnknown      quitReason = "unknown"
+	quitCancel       quitReason = "force cancel"
+	quitChannelClose quitReason = "closed channel"
+)
+
+func (s *quitReason) String() string { return string(*s) }
+
+func (bc *BaseBatchPipe[T, B]) mergeWorker(ctx context.Context) {
+	defer bc.mergeStopWg.Done()
+	bufByNames := make(map[string]ItemBuffer[T, B])
+	batchbuf := new(bytes.Buffer)
+	registry := newReminderRegistry()
+	quitMsg := quitUnknown
+	defer registry.CleanAll()
+	defer logutil.Infof("mergeWorker quit: %v", &quitMsg)
+
+	doFlush := func(name string, itembuf ItemBuffer[T, B]) {
+		batch := itembuf.GetBatch(batchbuf)
+		bc.batchCh <- batch
+		itembuf.Reset()
+		itembuf.RemindReset()
+		registry.Reset(name, itembuf.RemindNextAfter())
+	}
+
+	for {
+		select {
+		case <-ctx.Done():
+			quitMsg = quitCancel
+			return
+		case item, ok := <-bc.itemCh:
+			if !ok {
+				for name, buf := range bufByNames {
+					if !buf.IsEmpty() {
+						doFlush(name, buf)
+					}
+				}
+				quitMsg = quitChannelClose
+				return
+			}
+			name := item.GetName()
+			itembuf := bufByNames[name]
+			if itembuf == nil {
+				itembuf = bc.impl.NewItemBuffer(name)
+				bufByNames[name] = itembuf
+				registry.Register(name, itembuf.RemindNextAfter())
+			}
+
+			itembuf.Add(item)
+			if itembuf.ShouldFlush() {
+				doFlush(name, itembuf)
+			} else if itembuf.RemindBackOffCnt() > 0 {
+				itembuf.RemindReset()
+				registry.Reset(name, itembuf.RemindNextAfter())
+			}
+		case name := <-registry.C:
+			if itembuf := bufByNames[name]; itembuf != nil {
+				if itembuf.IsEmpty() {
+					itembuf.RemindBackOff()
+					registry.Reset(name, itembuf.RemindNextAfter())
+				} else {
+					doFlush(name, itembuf)
+				}
+			}
+		}
+	}
+}
diff --git a/pkg/util/batchpipe/batch_pipe_test.go b/pkg/util/batchpipe/batch_pipe_test.go
new file mode 100644
index 000000000..5773eba2d
--- /dev/null
+++ b/pkg/util/batchpipe/batch_pipe_test.go
@@ -0,0 +1,382 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package batchpipe
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"sort"
+	"strconv"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func waitChTimeout[T any](
+	ch <-chan T,
+	onRecvCheck func(element T, closed bool) (goOn bool, err error),
+	after time.Duration,
+) error {
+	timeout := time.After(after)
+	for {
+		select {
+		case <-timeout:
+			return errors.New("timeout")
+		case item, ok := <-ch:
+			goOn, err := onRecvCheck(item, !ok)
+			if err != nil {
+				return err
+			}
+			if !ok || !goOn {
+				return nil
+			}
+		}
+	}
+}
+
+type Pos struct {
+	line    int
+	linepos int
+	docpos  int
+}
+
+const (
+	T_INT = "tst__int"
+	T_POS = "tst__pos"
+)
+
+type TestItem struct {
+	name   string
+	intval int
+	posval *Pos
+}
+
+func (item *TestItem) GetName() string { return item.name }
+
+var _ ItemBuffer[*TestItem, string] = (*intBuf)(nil)
+
+type intBuf struct {
+	Reminder
+	sum int
+}
+
+func (b *intBuf) Add(x *TestItem) { b.sum += x.intval }
+
+func (b *intBuf) Reset() { b.sum = 0; b.RemindReset() }
+
+func (b *intBuf) IsEmpty() bool { return b.sum == 0 }
+
+func (b *intBuf) ShouldFlush() bool { return b.sum > 100 }
+
+func (b *intBuf) GetBatch(_ *bytes.Buffer) string {
+	return fmt.Sprintf("Batch int %d", b.sum)
+}
+
+var _ ItemBuffer[*TestItem, string] = (*posBuf)(nil)
+
+type posBuf struct {
+	Reminder
+	posList  []*Pos
+	wakeupCh chan<- time.Time
+}
+
+func (b *posBuf) Add(item *TestItem) {
+	b.posList = append(b.posList, item.posval)
+}
+
+func (b *posBuf) Reset() { b.posList = b.posList[:0]; b.RemindReset() }
+
+func (b *posBuf) IsEmpty() bool {
+	select {
+	case b.wakeupCh <- time.Now():
+	default:
+	}
+	return len(b.posList) == 0
+}
+
+func (b *posBuf) ShouldFlush() bool { return len(b.posList) > 3 }
+
+// bytes.Buffer to mitigate mem allocaction and the return bytes should own its data
+func (b *posBuf) GetBatch(buf *bytes.Buffer) string {
+	buf.Reset()
+	for _, pos := range b.posList {
+		buf.WriteString(fmt.Sprintf("Ln %d, Col %d, Doc %d\n", pos.line, pos.linepos, pos.docpos))
+	}
+	return buf.String()
+}
+
+var _ PipeImpl[*TestItem, string] = &testCollector{}
+
+type testCollector struct {
+	sync.Mutex
+	*BaseBatchPipe[*TestItem, string]
+	receivedString []string
+	posBufWakeupCh chan time.Time
+	notify4Batch   func()
+}
+
+// create a new buffer for one kind of Item
+func (c *testCollector) NewItemBuffer(name string) ItemBuffer[*TestItem, string] {
+	switch name {
+	case T_INT:
+		return &intBuf{
+			Reminder: NewConstantClock(0),
+		}
+	case T_POS:
+		return &posBuf{
+			Reminder: NewExpBackOffClock(30*time.Millisecond, 300*time.Millisecond, 2),
+			wakeupCh: c.posBufWakeupCh,
+		}
+	}
+	panic("unrecognized name")
+}
+
+// BatchHandler handle the StoreBatch from a ItemBuffer, for example, execute inster sql
+// this handle may be running on multiple gorutine
+func (c *testCollector) NewItemBatchHandler() func(batch string) {
+	return func(batch string) {
+		c.Lock()
+		defer c.Unlock()
+		c.receivedString = append(c.receivedString, batch)
+		if c.notify4Batch != nil {
+			c.notify4Batch()
+		}
+	}
+}
+
+func (c *testCollector) Received() []string {
+	c.Lock()
+	defer c.Unlock()
+	return c.receivedString[:]
+}
+
+func newTestCollector(opts ...BaseBatchPipeOpt) *testCollector {
+	collector := &testCollector{
+		receivedString: make([]string, 0),
+		posBufWakeupCh: make(chan time.Time, 10),
+	}
+	base := NewBaseBatchPipe[*TestItem, string](collector, opts...)
+	collector.BaseBatchPipe = base
+	return collector
+}
+
+func TestBaseCollector(t *testing.T) {
+	collector := newTestCollector(PipeWithBatchWorkerNum(1))
+	require.True(t, collector.Start())
+	require.False(t, collector.Start())
+	err := collector.SendItem(
+		&TestItem{name: T_INT, intval: 32},
+		&TestItem{name: T_POS, posval: &Pos{line: 1, linepos: 12, docpos: 12}},
+		&TestItem{name: T_INT, intval: 33},
+	)
+	require.NoError(t, err)
+	time.Sleep(50 * time.Millisecond)
+	require.Contains(t, collector.Received(), fmt.Sprintf("Ln %d, Col %d, Doc %d\n", 1, 12, 12))
+
+	_ = collector.SendItem(&TestItem{name: T_INT, intval: 40})
+	time.Sleep(20 * time.Millisecond)
+	require.Contains(t, collector.Received(), "Batch int 105")
+
+	_ = collector.SendItem(&TestItem{name: T_INT, intval: 40})
+	handle, succ := collector.Stop(false)
+	require.True(t, succ)
+	require.NoError(t, waitChTimeout(handle, func(element struct{}, closed bool) (goOn bool, err error) {
+		assert.True(t, closed)
+		return
+	}, time.Second))
+	require.Equal(t, 2, len(collector.Received()))
+}
+
+func TestBaseCollectorReminder(t *testing.T) {
+	collector := newTestCollector(PipeWithBatchWorkerNum(1))
+	require.True(t, collector.Start())
+	err := collector.SendItem(
+		&TestItem{name: T_POS, posval: &Pos{line: 1, linepos: 12, docpos: 12}},
+	)
+	require.NoError(t, err)
+
+	var prev time.Time
+	gap := 30 // time.Millisecond
+	waitTimeCheck := func(element time.Time, closed bool) (goOn bool, err error) {
+		if gap >= 300 {
+			return
+		}
+		if prev.IsZero() {
+			goOn = true
+			prev = element
+		} else {
+			goOn = assert.InDelta(t, gap, element.Sub(prev).Milliseconds(), 10)
+			prev = element
+			gap *= 2
+		}
+		return
+	}
+	require.NoError(t, waitChTimeout(collector.posBufWakeupCh, waitTimeCheck, time.Second))
+
+	require.Contains(t, collector.Received(), fmt.Sprintf("Ln %d, Col %d, Doc %d\n", 1, 12, 12))
+
+	_ = collector.SendItem(
+		&TestItem{name: T_POS, posval: &Pos{line: 1, linepos: 12, docpos: 12}},
+	)
+
+	// new write will reset timer to 30ms
+	time.Sleep(50 * time.Millisecond)
+	require.Contains(t, collector.Received(), fmt.Sprintf("Ln %d, Col %d, Doc %d\n", 1, 12, 12))
+
+	handle, succ := collector.Stop(false)
+	require.True(t, succ)
+	require.NoError(t, waitChTimeout(handle, func(element struct{}, closed bool) (goOn bool, err error) {
+		assert.True(t, closed)
+		return
+	}, time.Second))
+}
+
+func TestBaseCollectorGracefulStop(t *testing.T) {
+	var notify sync.WaitGroup
+	var notifyFun = func() {
+		notify.Done()
+	}
+	collector := newTestCollector(PipeWithBatchWorkerNum(2), PipeWithBufferWorkerNum(1))
+	collector.notify4Batch = notifyFun
+	collector.Start()
+
+	notify.Add(1)
+	err := collector.SendItem(
+		&TestItem{name: T_INT, intval: 32},
+		&TestItem{name: T_INT, intval: 40},
+		&TestItem{name: T_INT, intval: 33},
+	)
+	notify.Wait()
+	require.NoError(t, err)
+	time.Sleep(10 * time.Millisecond)
+	require.Contains(t, collector.Received(), "Batch int 105")
+
+	notify.Add(1)
+	_ = collector.SendItem(&TestItem{name: T_INT, intval: 40})
+	handle, succ := collector.Stop(true)
+	require.True(t, succ)
+	handle2, succ2 := collector.Stop(true)
+	require.False(t, succ2)
+	require.Nil(t, handle2)
+	notify.Add(1)
+	require.Error(t, collector.SendItem(&TestItem{name: T_INT, intval: 40}))
+	require.NoError(t, waitChTimeout(handle, func(element struct{}, closed bool) (goOn bool, err error) {
+		assert.True(t, closed)
+		return
+	}, time.Second))
+	require.Contains(t, collector.Received(), "Batch int 40")
+}
+
+func TestBaseReminder(t *testing.T) {
+	ms := time.Millisecond
+	registry := newReminderRegistry()
+	registry.Register("1", 1*ms)
+	registry.Register("2", 0)
+	require.NotPanics(t, func() { registry.Register("1", 0*ms) })
+	require.Panics(t, func() { registry.Register("1", 1*ms) })
+	checkOneRecevied := func(_ string, closed bool) (goOn bool, err error) {
+		if closed {
+			err = errors.New("unexpected close")
+		}
+		return
+	}
+	// only one event
+	require.NoError(t, waitChTimeout(registry.C, checkOneRecevied, 100*ms))
+	require.Error(t, waitChTimeout(registry.C, checkOneRecevied, 100*ms))
+
+	// nothing happens after these two lines
+	registry.Reset("2", 2*ms)
+	registry.Reset("1", 0*ms)
+	require.Error(t, waitChTimeout(registry.C, checkOneRecevied, 50*ms))
+	registry.Reset("1", 5*ms)
+	fmt.Println(time.Now())
+	require.NoError(t, waitChTimeout(registry.C, checkOneRecevied, 30*ms))
+	registry.CleanAll()
+}
+
+func TestBaseRemind2(t *testing.T) {
+	ms := time.Millisecond
+	r := newReminderRegistry()
+	cases := []*struct {
+		id     string
+		d      []time.Duration
+		offset int
+	}{
+		{"0", []time.Duration{11 * ms, 8 * ms, 25 * ms}, 0},
+		{"1", []time.Duration{7 * ms, 15 * ms, 16 * ms}, 0},
+		{"2", []time.Duration{3 * ms, 12 * ms, 11 * ms}, 0},
+	}
+
+	type item struct {
+		d  time.Duration
+		id string
+	}
+
+	seq := []item{}
+
+	for _, c := range cases {
+		r.Register(c.id, c.d[c.offset])
+		c.offset += 1
+		t := 0 * ms
+		for _, delta := range c.d {
+			t += delta
+			seq = append(seq, item{t, c.id})
+		}
+	}
+
+	sort.Slice(seq, func(i, j int) bool {
+		return int64(seq[i].d) < int64(seq[j].d)
+	})
+
+	gotids := make([]string, 0, 9)
+	for i := 0; i < 9; i++ {
+		id := <-r.C
+		gotids = append(gotids, id)
+		idx, _ := strconv.ParseInt(id, 10, 32)
+		c := cases[idx]
+		if c.offset < 3 {
+			r.Reset(id, c.d[c.offset])
+			c.offset++
+		}
+	}
+
+	diff := 0
+	for i := range gotids {
+		if gotids[i] != seq[i].id {
+			diff++
+		}
+	}
+
+	// Appending to reminder.C happens in a goroutine, considering its scheduling latency,
+	// here we tolerate the disorder for 2 pairs
+	if diff > 4 {
+		t.Errorf("bad order of the events, want %v, got %s", seq, gotids)
+	}
+}
+
+func TestBaseBackOff(t *testing.T) {
+	ms := time.Millisecond
+	exp := NewExpBackOffClock(ms, 20*ms, 2)
+	for expect := ms; expect <= 20*ms; expect *= time.Duration(2) {
+		assert.Equal(t, exp.RemindNextAfter(), expect)
+		exp.RemindBackOff()
+	}
+	exp.RemindBackOff()
+	assert.Equal(t, 20*ms, exp.RemindNextAfter())
+}
diff --git a/pkg/util/batchpipe/doc.go b/pkg/util/batchpipe/doc.go
new file mode 100644
index 000000000..f6ec80064
--- /dev/null
+++ b/pkg/util/batchpipe/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A common util to export data (trace and metric for now) as batch
+package batchpipe
diff --git a/pkg/util/errors/context.go b/pkg/util/errors/context.go
new file mode 100644
index 000000000..71f716969
--- /dev/null
+++ b/pkg/util/errors/context.go
@@ -0,0 +1,79 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"context"
+	"fmt"
+	"github.com/cockroachdb/errors/errbase"
+	"github.com/matrixorigin/matrixone/pkg/util"
+)
+
+// WithContext annotates err with a stack info and a context, which should contain span info.
+// ReportError at the mean time.
+// If err is nil, WithContext returns nil.
+func WithContext(ctx context.Context, err error) error {
+	return ContextErrWithDepth(ctx, err, 1)
+}
+
+func ContextErrWithDepth(ctx context.Context, err error, depth int) error {
+	if err == nil {
+		return nil
+	}
+	if _, ok := err.(StackTracer); !ok {
+		err = &withStack{cause: err, Stack: util.Callers(depth + 1)}
+	}
+	err = &withContext{cause: err, ctx: ctx}
+	GetReportErrorFunc()(ctx, err, depth+1)
+	return err
+}
+
+// ContextTracer retrieves the context.Context
+type ContextTracer interface {
+	Context() context.Context
+}
+
+func GetContextTracer(inErr error) ContextTracer {
+	var stacked ContextTracer
+	WalkDeep(inErr, func(err error) bool {
+		if contextTracer, ok := err.(ContextTracer); ok {
+			stacked = contextTracer
+			return true
+		}
+		return false
+	})
+	return stacked
+}
+
+func HasContext(err error) bool {
+	return GetContextTracer(err) != nil
+}
+
+var _ error = (*withContext)(nil)
+var _ Wrapper = (*withContext)(nil)
+var _ ContextTracer = (*withContext)(nil)
+var _ fmt.Formatter = (*withContext)(nil)
+
+type withContext struct {
+	cause error
+
+	ctx context.Context
+}
+
+func (w *withContext) Error() string                 { return w.cause.Error() }
+func (w *withContext) Cause() error                  { return w.cause }
+func (w *withContext) Unwrap() error                 { return w.cause }
+func (w *withContext) Context() context.Context      { return w.ctx }
+func (w *withContext) Format(s fmt.State, verb rune) { errbase.FormatError(w, s, verb) }
diff --git a/pkg/util/errors/context_test.go b/pkg/util/errors/context_test.go
new file mode 100644
index 000000000..89c543ebf
--- /dev/null
+++ b/pkg/util/errors/context_test.go
@@ -0,0 +1,302 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"context"
+	goErrors "errors"
+	"reflect"
+	"testing"
+)
+
+var ctx = context.Background()
+var testErr = goErrors.New("test error")
+var stackErr = WithStack(testErr)
+var msgErr = WithMessage(stackErr, "prefix")
+var msg2Err = WithMessagef(msgErr, "prefix by %s", "jack")
+
+func TestGetContextTracer(t *testing.T) {
+	type args struct {
+		err error
+	}
+	tests := []struct {
+		name string
+		args args
+		want context.Context
+	}{
+		{
+			name: "nil",
+			args: args{err: goErrors.New("test error")},
+			want: nil,
+		},
+		{
+			name: "context",
+			args: args{err: WithContext(context.Background(), goErrors.New("test error"))},
+			want: context.Background(),
+		},
+		{
+			name: "stack",
+			args: args{err: WithStack(goErrors.New("test error"))},
+			want: nil,
+		},
+		{
+			name: "message",
+			args: args{err: WithMessagef(goErrors.New("test error"), "prefix")},
+			want: nil,
+		},
+		{
+			name: "stack/context",
+			args: args{err: WithStack(WithContext(context.Background(), goErrors.New("test error")))},
+			want: context.Background(),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := GetContextTracer(tt.args.err); !reflect.DeepEqual(got, tt.want) && !reflect.DeepEqual(got.Context(), tt.want) {
+				t.Errorf("GetContextTracer() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestHasContext(t *testing.T) {
+	type args struct {
+		err error
+	}
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		{
+			name: "nil",
+			args: args{err: goErrors.New("test error")},
+			want: false,
+		},
+		{
+			name: "context",
+			args: args{err: WithContext(context.Background(), goErrors.New("test error"))},
+			want: true,
+		},
+		{
+			name: "stack",
+			args: args{err: WithStack(goErrors.New("test error"))},
+			want: false,
+		},
+		{
+			name: "message",
+			args: args{err: WithMessagef(goErrors.New("test error"), "prefix")},
+			want: false,
+		},
+		{
+			name: "stack/context",
+			args: args{err: WithStack(WithContext(context.Background(), goErrors.New("test error")))},
+			want: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := HasContext(tt.args.err); got != tt.want {
+				t.Errorf("HasContext() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_withContext_Cause(t *testing.T) {
+	type fields struct {
+		cause error
+		ctx   context.Context
+	}
+	tests := []struct {
+		name    string
+		fields  fields
+		wantErr error
+	}{
+		{
+			name:    "normal",
+			fields:  fields{cause: testErr, ctx: ctx},
+			wantErr: testErr,
+		},
+		{
+			name:    "stack",
+			fields:  fields{stackErr, ctx},
+			wantErr: stackErr,
+		},
+		{
+			name:    "message",
+			fields:  fields{msgErr, ctx},
+			wantErr: msgErr,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			w := &withContext{
+				cause: tt.fields.cause,
+				ctx:   tt.fields.ctx,
+			}
+			if got := w.Cause(); !reflect.DeepEqual(got, tt.wantErr) {
+				t.Errorf("Cause() error = %v, wantErr %v", got, tt.wantErr)
+			}
+		})
+	}
+}
+
+func Test_withContext_Context(t *testing.T) {
+	type fields struct {
+		cause error
+		ctx   context.Context
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		want   context.Context
+	}{
+		{
+			name:   "normal",
+			fields: fields{cause: testErr, ctx: ctx},
+			want:   ctx,
+		},
+		{
+			name:   "stack",
+			fields: fields{stackErr, ctx},
+			want:   ctx,
+		},
+		{
+			name:   "message",
+			fields: fields{msgErr, ctx},
+			want:   ctx,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			w := &withContext{
+				cause: tt.fields.cause,
+				ctx:   tt.fields.ctx,
+			}
+			if got := w.Context(); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("Context() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_withContext_Error(t *testing.T) {
+	type fields struct {
+		cause error
+		ctx   context.Context
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		want   string
+	}{
+		{
+			name:   "normal",
+			fields: fields{cause: testErr, ctx: ctx},
+			want:   "test error",
+		},
+		{
+			name:   "stack",
+			fields: fields{stackErr, ctx},
+			want:   "test error",
+		},
+		{
+			name:   "message",
+			fields: fields{msgErr, ctx},
+			want:   "prefix: test error",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			w := &withContext{
+				cause: tt.fields.cause,
+				ctx:   tt.fields.ctx,
+			}
+			if got := w.Error(); got != tt.want {
+				t.Errorf("Error() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_withContext_Unwrap(t *testing.T) {
+	type fields struct {
+		cause error
+		ctx   context.Context
+	}
+	tests := []struct {
+		name    string
+		fields  fields
+		wantErr error
+	}{
+		{
+			name:    "normal",
+			fields:  fields{cause: testErr, ctx: ctx},
+			wantErr: testErr,
+		},
+		{
+			name:    "stack",
+			fields:  fields{stackErr, ctx},
+			wantErr: stackErr,
+		},
+		{
+			name:    "message2",
+			fields:  fields{msg2Err, ctx},
+			wantErr: msg2Err,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			w := &withContext{
+				cause: tt.fields.cause,
+				ctx:   tt.fields.ctx,
+			}
+			if got := w.Unwrap(); !reflect.DeepEqual(got, tt.wantErr) {
+				t.Errorf("Unwrap() = %v, want %v", got, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestWithContext(t *testing.T) {
+	type args struct {
+		ctx context.Context
+		err error
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "normal",
+			args:    args{context.Background(), testErr},
+			wantErr: true,
+		},
+		{
+			name:    "nil",
+			args:    args{},
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := WithContext(tt.args.ctx, tt.args.err); (err != nil) != tt.wantErr {
+				t.Errorf("TestWithContext() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
diff --git a/pkg/util/errors/errors.go b/pkg/util/errors/errors.go
new file mode 100644
index 000000000..0a80df190
--- /dev/null
+++ b/pkg/util/errors/errors.go
@@ -0,0 +1,130 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"context"
+	goErrors "errors"
+	"fmt"
+	"sync/atomic"
+
+	"github.com/matrixorigin/matrixone/pkg/util"
+)
+
+func init() {
+	SetErrorReporter(noopReportError)
+}
+
+type Wrapper interface {
+	Unwrap() error
+}
+
+type WithIs interface {
+	Is(error) bool
+}
+
+func Unwrap(err error) error {
+	return goErrors.Unwrap(err)
+}
+
+func Is(err, target error) bool {
+	return goErrors.Is(err, target)
+}
+
+func As(err error, target any) bool {
+	return goErrors.As(err, target)
+}
+
+func New(text string) error {
+	err := &withStack{goErrors.New(text), util.Callers(1)}
+	GetReportErrorFunc()(nil, err, 1)
+	return err
+}
+
+func NewWithContext(ctx context.Context, text string) (err error) {
+	err = &withStack{goErrors.New(text), util.Callers(1)}
+	err = &withContext{err, ctx}
+	GetReportErrorFunc()(ctx, err, 1)
+	return err
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+	if err == nil {
+		return nil
+	}
+	err = &withMessage{
+		cause: err,
+		msg:   message,
+	}
+	return &withStack{
+		err,
+		util.Callers(1),
+	}
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is called, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...any) error {
+	if err == nil {
+		return nil
+	}
+	err = &withMessage{
+		cause: err,
+		msg:   fmt.Sprintf(format, args...),
+	}
+	return &withStack{
+		err,
+		util.Callers(1),
+	}
+}
+
+// WalkDeep does a depth-first traversal of all errors.
+// The visitor function can return true to end the traversal early.
+// In that case, WalkDeep will return true, otherwise false.
+func WalkDeep(err error, visitor func(err error) bool) bool {
+	// Go deep
+	unErr := err
+	for unErr != nil {
+		if done := visitor(unErr); done {
+			return true
+		}
+		unErr = Unwrap(unErr)
+	}
+
+	return false
+}
+
+func ReportError(ctx context.Context, err error) {
+	GetReportErrorFunc()(ctx, err, 1)
+}
+
+type reportErrorFunc func(context.Context, error, int)
+
+// errorReporter should be trace.HandleError
+var errorReporter atomic.Value
+
+func noopReportError(context.Context, error, int) {}
+
+func SetErrorReporter(f reportErrorFunc) {
+	errorReporter.Store(f)
+}
+
+func GetReportErrorFunc() reportErrorFunc {
+	return errorReporter.Load().(reportErrorFunc)
+}
diff --git a/pkg/util/errors/errors_test.go b/pkg/util/errors/errors_test.go
new file mode 100644
index 000000000..7a7fe73db
--- /dev/null
+++ b/pkg/util/errors/errors_test.go
@@ -0,0 +1,374 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"context"
+	"reflect"
+	"sync/atomic"
+	"testing"
+)
+
+func TestAs(t *testing.T) {
+	type args struct {
+		err    error
+		target error
+	}
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		{
+			name: "withContext",
+			args: args{
+				err:    WithContext(context.Background(), msgErr),
+				target: &withContext{},
+			},
+			want: true,
+		},
+		{
+			name: "withStack",
+			args: args{
+				err:    WithContext(context.Background(), msgErr),
+				target: &withStack{},
+			},
+			want: true,
+		},
+		{
+			name: "withMessage",
+			args: args{
+				err:    WithContext(context.Background(), msgErr),
+				target: &withMessage{},
+			},
+			want: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := As(tt.args.err, &tt.args.target); got != tt.want {
+				t.Errorf("As() = %v, want %v", got, tt.want)
+			}
+			t.Logf("target:: %v, type: %v", tt.args.target, reflect.ValueOf(tt.args.target).Type())
+		})
+	}
+}
+
+func mockReportError(_ context.Context, err error, depth int) {}
+
+func TestGetReportErrorFunc(t *testing.T) {
+	tests := []struct {
+		name string
+		f    reportErrorFunc
+		want uintptr
+	}{
+		{
+			name: "noop",
+			want: reflect.ValueOf(noopReportError).Pointer(),
+		},
+		{
+			name: "mark",
+			f:    mockReportError,
+			want: reflect.ValueOf(mockReportError).Pointer(),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if tt.f != nil {
+				SetErrorReporter(tt.f)
+			}
+			if got := GetReportErrorFunc(); reflect.ValueOf(got).Pointer() != tt.want {
+				t.Errorf("GetReportErrorFunc() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestIs(t *testing.T) {
+	type args struct {
+		err    error
+		target error
+	}
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		{
+			name: "withContext",
+			args: args{
+				err:    WithContext(context.Background(), msgErr),
+				target: &withContext{msgErr, context.Background()},
+			},
+			want: true,
+		},
+		{
+			name: "withStack",
+			args: args{
+				err:    WithContext(context.Background(), msgErr),
+				target: &withStack{testErr, nil},
+			},
+			want: false,
+		},
+		{
+			name: "withMessage",
+			args: args{
+				err:    WithContext(context.Background(), msgErr),
+				target: msgErr,
+			},
+			want: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := Is(tt.args.err, tt.args.target); got != tt.want {
+				t.Errorf("Is() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestNew(t *testing.T) {
+	type args struct {
+		text string
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "normal",
+			args:    args{text: "message"},
+			wantErr: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := New(tt.args.text); (err != nil) != tt.wantErr {
+				t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestNewWithContext(t *testing.T) {
+	type args struct {
+		ctx  context.Context
+		text string
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name: "normal",
+			args: args{
+				ctx:  context.Background(),
+				text: "normal",
+			},
+			wantErr: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := NewWithContext(tt.args.ctx, tt.args.text); (err != nil) != tt.wantErr {
+				t.Errorf("NewWithContext() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestReportError(t *testing.T) {
+	type args struct {
+		ctx context.Context
+		err error
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "normal",
+			args: args{context.Background(), testErr},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			ReportError(tt.args.ctx, tt.args.err)
+		})
+	}
+}
+
+func TestUnwrap(t *testing.T) {
+	type args struct {
+		err error
+	}
+	tests := []struct {
+		name string
+		args args
+		want error
+	}{
+		{
+			name: "withContext",
+			args: args{err: WithContext(context.Background(), stackErr)},
+			want: stackErr,
+		},
+		{
+			name: "withStack",
+			args: args{err: stackErr},
+			want: testErr,
+		},
+		{
+			name: "withMessage",
+			args: args{err: msgErr},
+			want: stackErr,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := Unwrap(tt.args.err); !reflect.DeepEqual(err, tt.want) {
+				t.Errorf("Unwrap() error = %v, wantErr %v", err, tt.want)
+			}
+		})
+	}
+}
+
+func TestWalkDeep(t *testing.T) {
+	type args struct {
+		err     error
+		visitor func(err error) bool
+	}
+	var depth int32 = 0
+	var visitor = func(error) bool {
+		atomic.AddInt32(&depth, 1)
+		return false
+	}
+	tests := []struct {
+		name      string
+		args      args
+		wantDepth int32
+	}{
+		{
+			name: "depth_2",
+			args: args{
+				err:     stackErr,
+				visitor: visitor,
+			},
+			wantDepth: 2,
+		},
+		{
+			name: "depth_4",
+			args: args{
+				err:     msg2Err,
+				visitor: visitor,
+			},
+			wantDepth: 4,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			atomic.StoreInt32(&depth, 0)
+			_ = WalkDeep(tt.args.err, tt.args.visitor)
+			if depth != tt.wantDepth {
+				t.Errorf("WalkDeep() depth = %v, want %v", depth, tt.wantDepth)
+			}
+		})
+	}
+}
+
+func TestWrap(t *testing.T) {
+	type args struct {
+		err     error
+		message string
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "normal",
+			args:    args{testErr, "normal message"},
+			wantErr: true,
+		},
+		{
+			name:    "nil",
+			args:    args{},
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := Wrap(tt.args.err, tt.args.message); (err != nil) != tt.wantErr {
+				t.Errorf("Wrap() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestWrapf(t *testing.T) {
+	type args struct {
+		err    error
+		format string
+		args   []interface{}
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "normal",
+			args:    args{testErr, "normal message", []any{}},
+			wantErr: true,
+		},
+		{
+			name:    "nil",
+			args:    args{},
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := Wrapf(tt.args.err, tt.args.format, tt.args.args...); (err != nil) != tt.wantErr {
+				t.Errorf("Wrapf() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func Test_noopReportError(t *testing.T) {
+	type args struct {
+		in0 context.Context
+		in1 error
+		in2 int
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "normal",
+			args: args{context.Background(), testErr, 1},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			noopReportError(tt.args.in0, tt.args.in1, tt.args.in2)
+		})
+	}
+}
diff --git a/pkg/util/errors/message.go b/pkg/util/errors/message.go
new file mode 100644
index 000000000..283915a30
--- /dev/null
+++ b/pkg/util/errors/message.go
@@ -0,0 +1,86 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"fmt"
+	"io"
+)
+
+// This file mirrors the WithMessage functionality from
+// github.com/pkg/errors. We would prefer to reuse the withStack
+// struct from that package directly (the library recognizes it well)
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+	if err == nil {
+		return nil
+	}
+	return &withMessage{
+		cause: err,
+		msg:   message,
+	}
+}
+
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...any) error {
+	if err == nil {
+		return nil
+	}
+	return &withMessage{
+		cause: err,
+		msg:   fmt.Sprintf(format, args...),
+	}
+}
+
+var _ error = (*withMessage)(nil)
+var _ Wrapper = (*withMessage)(nil)
+var _ WithIs = (*withMessage)(nil)
+var _ fmt.Formatter = (*withMessage)(nil)
+
+type withMessage struct {
+	cause error
+	msg   string
+}
+
+func (w *withMessage) Error() string {
+	if w.msg == "" {
+		return w.cause.Error()
+	}
+	return w.msg + ": " + w.cause.Error()
+}
+func (w *withMessage) Cause() error  { return w.cause }
+func (w *withMessage) Unwrap() error { return w.cause }
+
+func (w *withMessage) Is(err error) bool {
+	return w.Error() == err.Error()
+}
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			fmt.Fprintf(s, "%+v\n", w.Cause())
+			io.WriteString(s, w.msg)
+			return
+		}
+	case 's':
+		io.WriteString(s, w.Error())
+	case 'q':
+		fmt.Fprintf(s, "%q", w.Error())
+	}
+}
diff --git a/pkg/util/errors/message_test.go b/pkg/util/errors/message_test.go
new file mode 100644
index 000000000..54f7e01e3
--- /dev/null
+++ b/pkg/util/errors/message_test.go
@@ -0,0 +1,124 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"fmt"
+	"github.com/stretchr/testify/require"
+	"testing"
+)
+
+func TestWithMessage(t *testing.T) {
+	type args struct {
+		err     error
+		message string
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		// TODO: Add test cases.
+		{
+			name:    "normal",
+			args:    args{testErr, "message"},
+			wantErr: true,
+		},
+		{
+			name:    "nil",
+			args:    args{},
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := WithMessage(tt.args.err, tt.args.message); (err != nil) != tt.wantErr {
+				t.Errorf("WithMessage() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func TestWithMessagef(t *testing.T) {
+	type args struct {
+		err    error
+		format string
+		args   []any
+	}
+	tests := []struct {
+		name    string
+		args    args
+		wantErr bool
+	}{
+		{
+			name:    "normal",
+			args:    args{testErr, "message", []any{}},
+			wantErr: true,
+		},
+		{
+			name:    "nil",
+			args:    args{},
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if err := WithMessagef(tt.args.err, tt.args.format, tt.args.args...); (err != nil) != tt.wantErr {
+				t.Errorf("WithMessagef() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func Test_withMessage_Format(t *testing.T) {
+	type fields struct {
+		cause error
+		msg   string
+	}
+	tests := []struct {
+		name     string
+		fields   fields
+		wantMsg  string
+		wantMsgQ string
+		wantMsgV string
+	}{
+		{
+			name:     "normal",
+			fields:   fields{testErr, "prefix"},
+			wantMsg:  "prefix: test error",
+			wantMsgQ: `"prefix: test error"`,
+			wantMsgV: "test error\nprefix",
+		},
+		{
+			name:     "empty",
+			fields:   fields{testErr, ""},
+			wantMsg:  "test error",
+			wantMsgQ: `"test error"`,
+			wantMsgV: "test error\n",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			w := &withMessage{
+				cause: tt.fields.cause,
+				msg:   tt.fields.msg,
+			}
+			require.Equal(t, tt.wantMsg, w.Error())
+			require.Equal(t, tt.wantMsg, fmt.Sprintf("%s", w))
+			require.Equal(t, tt.wantMsgQ, fmt.Sprintf("%q", w))
+			require.Equal(t, tt.wantMsgV, fmt.Sprintf("%+v", w))
+		})
+	}
+}
diff --git a/pkg/util/errors/panic.go b/pkg/util/errors/panic.go
new file mode 100644
index 000000000..004f62670
--- /dev/null
+++ b/pkg/util/errors/panic.go
@@ -0,0 +1,41 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/matrixorigin/matrixone/pkg/util"
+)
+
+// ReportPanic reports a panic has occurred on the real stderr.
+// return error, which already reported.
+func ReportPanic(ctx context.Context, r any) error {
+	return ReportPanicWithDepth(ctx, r, 3)
+}
+
+func ReportPanicWithDepth(ctx context.Context, r any, depth int) error {
+	panicErr := PanicAsError(r, depth+1)
+	return WithContext(ctx, panicErr)
+}
+
+// PanicAsError turns r into an error if it is not one already.
+func PanicAsError(r any, depth int) error {
+	if err, ok := r.(error); ok {
+		return &withStack{err, util.Callers(depth + 1)}
+	}
+	return &withStack{fmt.Errorf("panic: %v", r), util.Callers(depth + 1)}
+}
diff --git a/pkg/util/errors/panic_test.go b/pkg/util/errors/panic_test.go
new file mode 100644
index 000000000..09b5a1502
--- /dev/null
+++ b/pkg/util/errors/panic_test.go
@@ -0,0 +1,122 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"context"
+	"fmt"
+	"testing"
+
+	"github.com/matrixorigin/matrixone/pkg/util"
+
+	"github.com/stretchr/testify/require"
+)
+
+func InitErrorCollector(ch chan error) {
+	SetErrorReporter(func(_ context.Context, err error, i int) {
+		ch <- err
+	})
+}
+
+func TestRecoverRaw(t *testing.T) {
+	defer func() {
+		if err := recover(); err != nil {
+			t.Logf("Recover() error = %v", err)
+		} else {
+			t.Logf("Recover() error = %v, wantErr %v", err, true)
+		}
+	}()
+	panic("TestRecoverRaw")
+}
+
+func TestRecoverFunc(t *testing.T) {
+	resultCh := make(chan error)
+	InitErrorCollector(resultCh)
+	go func() {
+		defer func() {
+			if err := recover(); err != nil {
+				ReportPanic(context.Background(), err)
+				t.Logf("raw Caller: %+v", util.Callers(0))
+			}
+		}()
+		panic("TestRecoverFunc")
+	}()
+	err := <-resultCh
+	require.NotEmpty(t, err, "get error is nil.")
+	t.Logf("err %%s : %s", err)
+	t.Logf("err %%+v: %+v", err)
+}
+
+func TestRecoverFunc_nil(t *testing.T) {
+	resultCh := make(chan error)
+	InitErrorCollector(resultCh)
+	go func() {
+		defer func() {
+			if err := recover(); err != nil {
+
+				ReportPanic(context.Background(), err)
+				t.Logf("raw Caller: %+v", util.Callers(0))
+			}
+		}()
+		panic(fmt.Errorf("TestRecoverFunc"))
+	}()
+	err := <-resultCh
+	require.NotEmpty(t, err, "get error is nil.")
+	t.Logf("err %%s : %s", WithStack(fmt.Errorf("example")))
+	t.Logf("err %%+v: %+v", err)
+}
+
+func TestReportPanic(t *testing.T) {
+	SetErrorReporter(noopReportError)
+	type args struct {
+		ctx context.Context
+	}
+	tests := []struct {
+		name       string
+		args       args
+		wantErr    bool
+		hasContext bool
+		wantMsg    string
+	}{
+		{
+			name:       "normal",
+			args:       args{ctx: context.Background()},
+			wantErr:    true,
+			hasContext: true,
+			wantMsg:    "TestReportPanic",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			defer func() {
+				if err := recover(); (err != nil) == tt.wantErr {
+					t.Logf("recover() error = %v", err)
+					err = ReportPanic(tt.args.ctx, err)
+					if fmt.Sprintf("%s", err) != fmt.Sprintf("panic: %s", tt.wantMsg) {
+						t.Errorf("ReportPanic() error = %v, wantMsg: %s", err, tt.wantMsg)
+					}
+					if HasContext(err.(error)) != tt.hasContext {
+						t.Errorf("ReportPanic() error = %v, wantErr %v", err, tt.hasContext)
+					} else {
+						t.Logf("ReportPanic() error = %+v", err)
+					}
+				} else {
+					t.Errorf("Recover() error = %v, wantErr %v", err, tt.wantErr)
+				}
+			}()
+			panic(tt.wantMsg)
+		})
+	}
+}
diff --git a/pkg/util/errors/stack.go b/pkg/util/errors/stack.go
new file mode 100644
index 000000000..43c652631
--- /dev/null
+++ b/pkg/util/errors/stack.go
@@ -0,0 +1,90 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"fmt"
+
+	"github.com/matrixorigin/matrixone/pkg/util"
+
+	"github.com/cockroachdb/errors/errbase"
+)
+
+// This file mirrors the WithStack functionality from
+// github.com/pkg/errors. We would prefer to reuse the withStack
+// struct from that package directly (the library recognizes it well)
+// unfortunately github.com/pkg/errors does not enable client code to
+// customize the depth at which the stack trace is captured.
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+func WithStack(err error) error {
+	// Skip the frame of WithStack itself in caller stack.
+	// this mirrors the behavior of WithStack() in github.com/pkg/errors.
+	return WithStackDepth(err, 1)
+}
+
+// WithStackDepth annotates err with a stack trace starting from the given call depth.
+// The value zero identifies the caller of WithStackDepth itself.
+// See the documentation of WithStack() for more details.
+func WithStackDepth(err error, depth int) error {
+	if err == nil {
+		return nil
+	}
+	return &withStack{cause: err, Stack: util.Callers(depth + 1)}
+}
+
+// StackTracer retrieves the StackTrace
+// Generally you would want to use the GetStackTracer function to do that.
+type StackTracer interface {
+	StackTrace() util.StackTrace
+}
+
+func GetStackTracer(inErr error) StackTracer {
+	var stacked StackTracer
+	WalkDeep(inErr, func(err error) bool {
+		if stackTracer, ok := err.(StackTracer); ok {
+			stacked = stackTracer
+			return true
+		}
+		return false
+	})
+	return stacked
+}
+
+func HasStack(err error) bool {
+	return GetStackTracer(err) != nil
+}
+
+type withStack struct {
+	cause error
+
+	*util.Stack
+}
+
+var _ error = (*withStack)(nil)
+var _ Wrapper = (*withStack)(nil)
+var _ fmt.Formatter = (*withStack)(nil)
+var _ StackTracer = (*withStack)(nil)
+
+func (w *withStack) Error() string { return w.cause.Error() }
+func (w *withStack) Cause() error  { return w.cause }
+func (w *withStack) Unwrap() error { return w.cause }
+
+// Format implements the fmt.Formatter interface.
+func (w *withStack) Format(s fmt.State, verb rune) { errbase.FormatError(w, s, verb) }
+
+func (w *withStack) HasStack() bool {
+	return true
+}
diff --git a/pkg/util/errors/stack_test.go b/pkg/util/errors/stack_test.go
new file mode 100644
index 000000000..872b2c19a
--- /dev/null
+++ b/pkg/util/errors/stack_test.go
@@ -0,0 +1,217 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"context"
+	"github.com/matrixorigin/matrixone/pkg/util"
+	"github.com/stretchr/testify/require"
+	"reflect"
+	"testing"
+)
+
+func TestGetStackTracer(t *testing.T) {
+	type args struct {
+		cause error
+	}
+	tests := []struct {
+		name           string
+		args           args
+		hasStackTracer bool
+		want           util.StackTrace
+	}{
+		{
+			name: "normal",
+			args: args{
+				cause: stackErr,
+			},
+			hasStackTracer: true,
+			want:           stackErr.(*withStack).Stack.StackTrace(),
+		},
+		{
+			name: "withContext",
+			args: args{
+				cause: WithContext(context.Background(), stackErr),
+			},
+			hasStackTracer: true,
+			want:           stackErr.(*withStack).Stack.StackTrace(),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got := GetStackTracer(tt.args.cause)
+			if !tt.hasStackTracer {
+				require.Empty(t, got, "GetStackTracer not empty")
+				return
+			}
+			if !reflect.DeepEqual(got.StackTrace(), tt.want) {
+				t.Errorf("GetStackTracer() = %v, want %v", got.StackTrace(), tt.want)
+			}
+		})
+	}
+}
+
+func TestHasStack(t *testing.T) {
+	type args struct {
+		err error
+	}
+	tests := []struct {
+		name string
+		args args
+		want bool
+	}{
+		{
+			name: "normal",
+			args: args{
+				err: stackErr,
+			},
+			want: true,
+		},
+		{
+			name: "withContext",
+			args: args{
+				err: WithContext(context.Background(), stackErr),
+			},
+			want: true,
+		},
+		{
+			name: "raw error",
+			args: args{
+				err: testErr,
+			},
+			want: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := HasStack(tt.args.err); got != tt.want {
+				t.Errorf("HasStack() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_withStack_Cause(t *testing.T) {
+	type fields struct {
+		cause error
+		Stack *util.Stack
+	}
+	tests := []struct {
+		name    string
+		fields  fields
+		wantErr bool
+	}{
+		{
+			name: "normal",
+			fields: fields{
+				cause: stackErr,
+				Stack: stackErr.(*withStack).Stack,
+			},
+			wantErr: true,
+		},
+		{
+			name: "withContext",
+			fields: fields{
+				cause: msgErr,
+				Stack: stackErr.(*withStack).Stack,
+			},
+			wantErr: true,
+		},
+		{
+			name: "empty",
+			fields: fields{
+				cause: nil,
+				Stack: nil,
+			},
+			wantErr: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			w := &withStack{
+				cause: tt.fields.cause,
+				Stack: tt.fields.Stack,
+			}
+			if err := w.Cause(); (err != nil) != tt.wantErr {
+				t.Errorf("Cause() error = %v, wantErr %v", err, tt.wantErr)
+			}
+		})
+	}
+}
+
+func Test_WithStack_HasStack(t *testing.T) {
+	type fields struct {
+		cause error
+		Stack *util.Stack
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		want   bool
+	}{
+		{
+			name: "normal",
+			fields: fields{
+				cause: stackErr,
+				Stack: stackErr.(*withStack).Stack,
+			},
+			want: true,
+		},
+		{
+			name: "withContext",
+			fields: fields{
+				cause: WithContext(context.Background(), stackErr),
+				Stack: stackErr.(*withStack).Stack,
+			},
+			want: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			w := &withStack{
+				cause: tt.fields.cause,
+				Stack: tt.fields.Stack,
+			}
+			if got := w.HasStack(); got != tt.want {
+				t.Errorf("HasStack() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_WithStack(t *testing.T) {
+	type fields struct {
+		cause error
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		want   bool
+	}{
+		{
+			name:   "nil",
+			fields: fields{},
+			want:   false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got := WithStack(tt.fields.cause)
+			if (got != nil) != tt.want {
+				t.Errorf("HasStack() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
diff --git a/pkg/util/export/batch_processor.go b/pkg/util/export/batch_processor.go
new file mode 100644
index 000000000..ae9242ea3
--- /dev/null
+++ b/pkg/util/export/batch_processor.go
@@ -0,0 +1,374 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/matrixorigin/matrixone/pkg/logutil"
+	"github.com/matrixorigin/matrixone/pkg/logutil/logutil2"
+	"github.com/matrixorigin/matrixone/pkg/util/batchpipe"
+)
+
+const defaultQueueSize = 262144 // queue mem cost = 2MB
+
+// bufferHolder
+type bufferHolder struct {
+	// name like a type
+	name string
+	// buffer is instance of batchpipe.ItemBuffer with its own elimination algorithm(like LRU, LFU)
+	buffer batchpipe.ItemBuffer[batchpipe.HasName, any]
+	// signal send signal to Collector
+	signal bufferSignalFunc
+	// impl NewItemBatchHandler
+	impl batchpipe.PipeImpl[batchpipe.HasName, any]
+	// trigger handle Reminder strategy
+	trigger *time.Timer
+
+	mux      sync.RWMutex
+	readonly uint32
+
+	batch *any
+}
+
+const READWRITE = 0
+const READONLY = 1
+
+type bufferSignalFunc func(*bufferHolder)
+
+func newBufferHolder(name batchpipe.HasName, impl batchpipe.PipeImpl[batchpipe.HasName, any], signal bufferSignalFunc) *bufferHolder {
+	buffer := impl.NewItemBuffer(name.GetName())
+	b := &bufferHolder{
+		name:     name.GetName(),
+		buffer:   buffer,
+		signal:   signal,
+		impl:     impl,
+		readonly: READWRITE,
+	}
+	b.mux.Lock()
+	defer b.mux.Unlock()
+	reminder := b.buffer.(batchpipe.Reminder)
+	b.trigger = time.AfterFunc(reminder.RemindNextAfter(), func() {
+		if atomic.LoadUint32(&b.readonly) == READONLY {
+			logutil.Debugf("buffer %s trigger time, pass.", b.name)
+			return
+		}
+		b.signal(b)
+	})
+	return b
+}
+
+// Add directly call buffer.Add(), while bufferHolder is NOT readonly
+func (b *bufferHolder) Add(item batchpipe.HasName) {
+	b.mux.RLock()
+	for atomic.LoadUint32(&b.readonly) == READONLY {
+		b.mux.RUnlock()
+		time.Sleep(time.Millisecond)
+		b.mux.RLock()
+	}
+	defer b.mux.RUnlock()
+	b.buffer.Add(item)
+	if b.buffer.ShouldFlush() {
+		b.signal(b)
+	}
+}
+
+// Stop set bufferHolder readonly, and hold Add request
+func (b *bufferHolder) Stop() bool {
+	b.mux.RLock()
+	defer b.mux.RUnlock()
+	if !atomic.CompareAndSwapUint32(&b.readonly, READWRITE, READONLY) {
+		return false
+	}
+	b.trigger.Stop()
+	return true
+}
+
+func (b *bufferHolder) StopTrigger() bool {
+	b.mux.Lock()
+	defer b.mux.Unlock()
+	return b.trigger.Stop()
+}
+
+func (b *bufferHolder) GetBatch(buf *bytes.Buffer) (any, bool) {
+	b.mux.Lock()
+	defer b.mux.Unlock()
+	if atomic.LoadUint32(&b.readonly) != READONLY {
+		return nil, false
+	}
+	return b.buffer.GetBatch(buf), true
+}
+
+func (b *bufferHolder) FlushAndReset() bool {
+	b.mux.Lock()
+	defer b.mux.Unlock()
+	if atomic.LoadUint32(&b.readonly) != READONLY {
+		return false
+	}
+	if b.batch != nil {
+		var flush = b.impl.NewItemBatchHandler()
+		flush(*b.batch)
+	} else {
+		logutil.Debugf("batch is nil")
+	}
+	b.resetTrigger()
+	b.buffer.Reset()
+	b.batch = nil
+	atomic.StoreUint32(&b.readonly, READWRITE)
+	return true
+}
+
+func (b *bufferHolder) resetTrigger() {
+	b.trigger.Reset(b.buffer.(batchpipe.Reminder).RemindNextAfter())
+}
+
+var _ BatchProcessor = &MOCollector{}
+
+// MOCollector handle all bufferPipe
+type MOCollector struct {
+	// mux control all changes on buffers
+	mux sync.RWMutex
+	// buffers maintain working buffer for each type
+	buffers map[string]*bufferHolder
+	// awakeCollect handle collect signal
+	awakeCollect chan batchpipe.HasName
+	// awakeGenerate handle generate signal
+	awakeGenerate chan *bufferHolder
+	// awakeBatch handle export signal
+	awakeBatch chan *bufferHolder
+
+	collectorCnt int // see WithCollectorCnt
+	generatorCnt int // see WithGeneratorCnt
+	exporterCnt  int // see WithExporterCnt
+
+	// flow control
+	started  uint32
+	stopOnce sync.Once
+	stopWait sync.WaitGroup
+	stopCh   chan struct{}
+}
+
+func NewMOCollector() *MOCollector {
+	c := &MOCollector{
+		buffers:       make(map[string]*bufferHolder),
+		awakeCollect:  make(chan batchpipe.HasName, defaultQueueSize),
+		awakeGenerate: make(chan *bufferHolder, 16),
+		awakeBatch:    make(chan *bufferHolder),
+		stopCh:        make(chan struct{}),
+		collectorCnt:  2 * gPipeImplHolder.Size(),
+		generatorCnt:  gPipeImplHolder.Size(),
+		exporterCnt:   gPipeImplHolder.Size(),
+	}
+	return c
+}
+
+func (c *MOCollector) Collect(ctx context.Context, i batchpipe.HasName) error {
+	c.awakeCollect <- i
+	return nil
+}
+
+func (c *MOCollector) Start() bool {
+	if atomic.LoadUint32(&c.started) != 0 {
+		return false
+	}
+	c.mux.Lock()
+	defer c.mux.Unlock()
+	if c.started != 0 {
+		return false
+	}
+	defer atomic.StoreUint32(&c.started, 1)
+
+	logutil2.Infof(DefaultContext(), "MOCollector Start")
+	for i := 0; i < c.collectorCnt; i++ {
+		c.stopWait.Add(1)
+		go c.doCollect(i)
+	}
+	for i := 0; i < c.generatorCnt; i++ {
+		c.stopWait.Add(1)
+		go c.doGenerate(i)
+	}
+	for i := 0; i < c.exporterCnt; i++ {
+		c.stopWait.Add(1)
+		go c.doExport(i)
+	}
+	return true
+}
+
+// doCollect handle all item accept work, send it to the corresponding buffer
+// goroutine worker
+func (c *MOCollector) doCollect(idx int) {
+	defer c.stopWait.Done()
+	logutil.Debugf("doCollect %dth: start", idx)
+loop:
+	for {
+		select {
+		case i := <-c.awakeCollect:
+			c.mux.RLock()
+			if buf, has := c.buffers[i.GetName()]; !has {
+				logutil.Debugf("doCollect %dth: init buffer for %s", idx, i.GetName())
+				c.mux.RUnlock()
+				c.mux.Lock()
+				if _, has := c.buffers[i.GetName()]; !has {
+					logutil.Debugf("doCollect %dth: init buffer done.", idx)
+					if impl, has := gPipeImplHolder.Get(i.GetName()); !has {
+						panic(fmt.Errorf("unknown item type: %s", i.GetName()))
+					} else {
+						buf = newBufferHolder(i, impl, awakeBuffer(c))
+						c.buffers[i.GetName()] = buf
+						buf.Add(i)
+					}
+				}
+				c.mux.Unlock()
+			} else {
+				buf.Add(i)
+				c.mux.RUnlock()
+			}
+		case <-c.stopCh:
+			break loop
+		}
+	}
+	logutil.Debugf("doCollect %dth: Done.", idx)
+}
+
+func awakeBuffer(c *MOCollector) func(holder *bufferHolder) {
+	return func(holder *bufferHolder) {
+		c.awakeGenerate <- holder
+	}
+}
+
+// doGenerate handle buffer gen BatchRequest, which could be anything
+// goroutine worker
+func (c *MOCollector) doGenerate(idx int) {
+	defer c.stopWait.Done()
+	var buf = new(bytes.Buffer)
+	logutil.Debugf("doGenerate %dth: start", idx)
+loop:
+	for {
+		select {
+		case holder := <-c.awakeGenerate:
+			c.genBatch(holder, buf)
+			select {
+			case c.awakeBatch <- holder:
+			case <-c.stopCh:
+			}
+		case <-c.stopCh:
+			break loop
+		}
+	}
+	logutil.Debugf("doGenerate %dth: Done.", idx)
+}
+
+// doExport handle BatchRequest
+func (c *MOCollector) doExport(idx int) {
+	defer c.stopWait.Done()
+	logutil.Debugf("doExport %dth: start", idx)
+loop:
+	for {
+		select {
+		case holder := <-c.awakeBatch:
+			c.handleBatch(holder)
+		case <-c.stopCh:
+			c.mux.Lock()
+			for len(c.awakeBatch) > 0 {
+				<-c.awakeBatch
+			}
+			c.mux.Unlock()
+			break loop
+		}
+	}
+	logutil.Debugf("doExport %dth: Done.", idx)
+}
+
+func (c *MOCollector) genBatch(holder *bufferHolder, buf *bytes.Buffer) {
+	if ok := holder.Stop(); !ok {
+		logutil.Debugf("genBatch: buffer %s: already stop", holder.name)
+		return
+	}
+	if batch, ok := holder.GetBatch(buf); ok {
+		holder.batch = &batch
+	}
+}
+
+func (c *MOCollector) handleBatch(holder *bufferHolder) {
+	holder.FlushAndReset()
+}
+
+func (c *MOCollector) Stop(graceful bool) error {
+	var err error
+	var buf = new(bytes.Buffer)
+	c.stopOnce.Do(func() {
+		for len(c.awakeCollect) > 0 {
+			logutil.Debugf("doCollect left %d job", len(c.awakeCollect))
+			time.Sleep(250 * time.Second)
+		}
+		for _, buffer := range c.buffers {
+			_ = buffer.StopTrigger()
+		}
+		close(c.stopCh)
+		c.stopWait.Wait()
+		for _, buffer := range c.buffers {
+			c.genBatch(buffer, buf)
+			c.handleBatch(buffer)
+		}
+	})
+	return err
+}
+
+var _ BatchProcessor = &noopBatchProcessor{}
+
+type noopBatchProcessor struct {
+}
+
+func (n noopBatchProcessor) Collect(context.Context, batchpipe.HasName) error { return nil }
+func (n noopBatchProcessor) Start() bool                                      { return true }
+func (n noopBatchProcessor) Stop(bool) error                                  { return nil }
+
+var gPipeImplHolder *pipeImplHolder = newPipeImplHolder()
+
+// pipeImplHolder
+type pipeImplHolder struct {
+	mux   sync.RWMutex
+	impls map[string]batchpipe.PipeImpl[batchpipe.HasName, any]
+}
+
+func newPipeImplHolder() *pipeImplHolder {
+	return &pipeImplHolder{
+		impls: make(map[string]batchpipe.PipeImpl[batchpipe.HasName, any]),
+	}
+}
+
+func (h *pipeImplHolder) Get(name string) (batchpipe.PipeImpl[batchpipe.HasName, any], bool) {
+	h.mux.RLock()
+	defer h.mux.RUnlock()
+	impl, has := h.impls[name]
+	return impl, has
+}
+
+func (h *pipeImplHolder) Put(name string, impl batchpipe.PipeImpl[batchpipe.HasName, any]) bool {
+	h.mux.Lock()
+	defer h.mux.Unlock()
+	_, has := h.impls[name]
+	h.impls[name] = impl
+	return has
+}
+
+func (h *pipeImplHolder) Size() int {
+	return len(h.impls)
+}
diff --git a/pkg/util/export/batch_processor_test.go b/pkg/util/export/batch_processor_test.go
new file mode 100644
index 000000000..915e41f8b
--- /dev/null
+++ b/pkg/util/export/batch_processor_test.go
@@ -0,0 +1,263 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"github.com/matrixorigin/matrixone/pkg/util"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/matrixorigin/matrixone/pkg/config"
+	"github.com/matrixorigin/matrixone/pkg/logutil"
+	"github.com/matrixorigin/matrixone/pkg/util/batchpipe"
+	"github.com/matrixorigin/matrixone/pkg/util/errors"
+
+	"github.com/google/gops/agent"
+	"github.com/stretchr/testify/require"
+	"go.uber.org/zap/zapcore"
+)
+
+func init() {
+	logutil.SetupMOLogger(&logutil.LogConfig{
+		Level:       zapcore.DebugLevel.String(),
+		Format:      "console",
+		Filename:    config.GlobalSystemVariables.GetLogFilename(),
+		MaxSize:     int(config.GlobalSystemVariables.GetLogMaxSize()),
+		MaxDays:     int(config.GlobalSystemVariables.GetLogMaxDays()),
+		MaxBackups:  int(config.GlobalSystemVariables.GetLogMaxBackups()),
+		EnableStore: false,
+	})
+	if err := agent.Listen(agent.Options{}); err != nil {
+		logutil.Errorf("listen gops agent failed: %s", err)
+		return
+	}
+}
+
+const NumType = "Num"
+
+var _ batchpipe.HasName = (*Num)(nil)
+var _ batchpipe.ItemBuffer[batchpipe.HasName, any] = &numBuffer{}
+var _ batchpipe.PipeImpl[batchpipe.HasName, any] = &dummyNumPipeImpl{}
+
+type Num int64
+
+func newNum(v int64) *Num {
+	n := Num(v)
+	return &n
+}
+
+func (n Num) GetName() string { return NumType }
+
+var signalFunc = func() {}
+
+type numBuffer struct {
+	batchpipe.Reminder
+	arr    []batchpipe.HasName
+	mux    sync.Mutex
+	signal func()
+}
+
+func (s *numBuffer) Add(item batchpipe.HasName) {
+	s.mux.Lock()
+	defer s.mux.Unlock()
+	s.arr = append(s.arr, item)
+	if s.signal != nil {
+		val := int(*item.(*Num))
+		length := len(s.arr)
+		logutil.Infof("accept: %v, len: %d", *item.(*Num), length)
+		if (val <= 3 && val != length) && (val-3) != length {
+			panic(fmt.Errorf("len not rignt, elem: %d, len: %d", val, length))
+		}
+		s.signal()
+	}
+}
+func (s *numBuffer) Reset() {
+	s.mux.Lock()
+	defer s.mux.Unlock()
+	logutil.Infof("buffer reset, stack: %+v", util.Callers(0))
+	s.arr = s.arr[0:0]
+}
+func (s *numBuffer) IsEmpty() bool {
+	s.mux.Lock()
+	defer s.mux.Unlock()
+	return len(s.arr) == 0
+}
+func (s *numBuffer) ShouldFlush() bool {
+	s.mux.Lock()
+	defer s.mux.Unlock()
+	return len(s.arr) >= 3
+}
+func (s *numBuffer) GetBatch(buf *bytes.Buffer) any {
+	s.mux.Lock()
+	defer s.mux.Unlock()
+	if len(s.arr) == 0 {
+		return ""
+	}
+
+	logutil.Infof("GetBatch, len: %d", len(s.arr))
+	buf.Reset()
+	for _, item := range s.arr {
+		s, ok := item.(*Num)
+		if !ok {
+			panic("Not Num type")
+		}
+		buf.WriteString("(")
+		buf.WriteString(fmt.Sprintf("%d", *s))
+		buf.WriteString("),")
+	}
+	logutil.Infof("GetBatch: %s", buf.String())
+	return string(buf.Next(buf.Len() - 1))
+}
+
+type dummyNumPipeImpl struct {
+	ch chan string
+}
+
+func (n *dummyNumPipeImpl) NewItemBuffer(string) batchpipe.ItemBuffer[batchpipe.HasName, any] {
+	return &numBuffer{Reminder: batchpipe.NewConstantClock(100 * time.Millisecond), signal: signalFunc}
+}
+
+func (n *dummyNumPipeImpl) NewItemBatchHandler() func(any) {
+	return func(batch any) {
+		n.ch <- batch.(string)
+	}
+}
+
+func Test_newBufferHolder(t *testing.T) {
+	type args struct {
+		name          batchpipe.HasName
+		impl          batchpipe.PipeImpl[batchpipe.HasName, any]
+		signal        bufferSignalFunc
+		elems         []*Num
+		elemsReminder []*Num
+		interval      time.Duration
+	}
+	ch := make(chan string, 1)
+	byteBuf := new(bytes.Buffer)
+	signalC := make(chan *bufferHolder)
+	var signal = func(b *bufferHolder) {
+		signalC <- b
+	}
+	go func() {
+		for {
+			b, ok := <-signalC
+			if !ok {
+				break
+			}
+			b.Stop()
+			if val, ok := b.GetBatch(byteBuf); !ok {
+				t.Errorf("GenBatch failed by in readwrite mode")
+			} else {
+				ch <- val.(string)
+			}
+		}
+	}()
+	tests := []struct {
+		name         string
+		args         args
+		want         string
+		wantReminder string
+	}{
+		{
+			name: "normal",
+			args: args{
+				name:          newNum(0),
+				impl:          &dummyNumPipeImpl{ch: ch},
+				signal:        signal,
+				elems:         []*Num{newNum(1), newNum(2), newNum(3)},
+				elemsReminder: []*Num{newNum(4), newNum(5)},
+				interval:      100 * time.Millisecond,
+			},
+			want:         `(1),(2),(3)`,
+			wantReminder: `(4),(5)`,
+		},
+		{
+			name: "emptyReminder",
+			args: args{
+				name:          newNum(0),
+				impl:          &dummyNumPipeImpl{ch: ch},
+				signal:        signal,
+				elems:         []*Num{newNum(1), newNum(2), newNum(3)},
+				elemsReminder: []*Num{},
+				interval:      100 * time.Millisecond,
+			},
+			want:         `(1),(2),(3)`,
+			wantReminder: ``,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			buf := newBufferHolder(tt.args.name, tt.args.impl, tt.args.signal)
+			for _, v := range tt.args.elems {
+				buf.Add(v)
+			}
+			got := <-ch
+			require.Equal(t, got, tt.want)
+			buf.FlushAndReset()
+
+			for _, v := range tt.args.elemsReminder {
+				buf.Add(v)
+			}
+			time.Sleep(tt.args.interval)
+			got = <-ch
+			if got != tt.wantReminder {
+				t.Errorf("newBufferHolder() = %v, want %v", got, tt.wantReminder)
+			}
+			buf.Stop()
+		})
+	}
+	close(signalC)
+}
+
+func TestNewMOCollector(t *testing.T) {
+	ch := make(chan string, 3)
+	errors.SetErrorReporter(func(ctx context.Context, err error, i int) {
+		t.Logf("TestNewMOCollector::ErrorReport: %+v", err)
+	})
+	var signalC = make(chan struct{}, 16)
+	var acceptSignal = func() { <-signalC }
+	signalFunc = func() { signalC <- struct{}{} }
+
+	Register(newNum(0), &dummyNumPipeImpl{ch: ch})
+	collector := NewMOCollector()
+	collector.Start()
+
+	collector.Collect(DefaultContext(), newNum(1))
+	acceptSignal()
+	collector.Collect(DefaultContext(), newNum(2))
+	acceptSignal()
+	collector.Collect(DefaultContext(), newNum(3))
+	acceptSignal()
+	got := <-ch
+	require.Equal(t, `(1),(2),(3)`, got)
+	collector.Collect(DefaultContext(), newNum(4))
+	acceptSignal()
+	collector.Collect(DefaultContext(), newNum(5))
+	acceptSignal()
+	collector.Collect(DefaultContext(), newNum(6))
+	acceptSignal()
+	collector.Stop(true)
+	logutil.GetGlobalLogger().Sync()
+	got = <-ch
+	require.Equal(t, `(4),(5),(6)`, got)
+	for i := len(ch); i > 0; i-- {
+		got = <-ch
+		t.Logf("left ch: %s", got)
+	}
+}
diff --git a/pkg/util/export/type.go b/pkg/util/export/type.go
new file mode 100644
index 000000000..8ed387bce
--- /dev/null
+++ b/pkg/util/export/type.go
@@ -0,0 +1,66 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+	"context"
+	"github.com/matrixorigin/matrixone/pkg/util/batchpipe"
+	"sync/atomic"
+)
+
+func init() {
+	ResetGlobalBatchProcessor()
+	SetDefaultContextFunc(func() context.Context { return context.Background() })
+}
+
+type BatchProcessor interface {
+	Collect(context.Context, batchpipe.HasName) error
+	Start() bool
+	Stop(graceful bool) error
+}
+
+func Register(name batchpipe.HasName, impl batchpipe.PipeImpl[batchpipe.HasName, any]) {
+	_ = gPipeImplHolder.Put(name.GetName(), impl)
+}
+
+var gBatchProcessor atomic.Value
+
+type processorHolder struct {
+	p BatchProcessor
+}
+
+func ResetGlobalBatchProcessor() {
+	var p BatchProcessor = &noopBatchProcessor{}
+	SetGlobalBatchProcessor(p)
+}
+
+func SetGlobalBatchProcessor(p BatchProcessor) {
+	gBatchProcessor.Store(&processorHolder{p: p})
+}
+
+func GetGlobalBatchProcessor() BatchProcessor {
+	return gBatchProcessor.Load().(*processorHolder).p
+}
+
+type getContextFunc func() context.Context
+
+var defaultContext atomic.Value
+
+func SetDefaultContextFunc(f getContextFunc) {
+	defaultContext.Store(f)
+}
+func DefaultContext() context.Context {
+	return defaultContext.Load().(getContextFunc)()
+}
diff --git a/pkg/util/export/type_test.go b/pkg/util/export/type_test.go
new file mode 100644
index 000000000..544c183fe
--- /dev/null
+++ b/pkg/util/export/type_test.go
@@ -0,0 +1,99 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package export
+
+import (
+	"context"
+	"github.com/matrixorigin/matrixone/pkg/util/batchpipe"
+	"reflect"
+	"testing"
+)
+
+type dummyContextKey int
+
+var dummyCKKey = dummyContextKey(0)
+
+func TestDefaultContext(t *testing.T) {
+	tests := []struct {
+		name string
+		f    getContextFunc
+		want context.Context
+	}{
+		{
+			name: "normal",
+			want: context.Background(),
+		},
+		{
+			name: "set",
+			f: func() context.Context {
+				return context.WithValue(context.Background(), dummyCKKey, "val")
+			},
+			want: context.WithValue(context.Background(), dummyCKKey, "val"),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if tt.f != nil {
+				SetDefaultContextFunc(tt.f)
+			}
+			if got := DefaultContext(); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("DefaultContext() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestGetGlobalBatchProcessor(t *testing.T) {
+	tests := []struct {
+		name string
+		want BatchProcessor
+	}{
+		{
+			name: "normal",
+			want: &noopBatchProcessor{},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := GetGlobalBatchProcessor(); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("GetGlobalBatchProcessor() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestRegister(t *testing.T) {
+	type args struct {
+		name batchpipe.HasName
+		impl batchpipe.PipeImpl[batchpipe.HasName, any]
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "normal",
+			args: args{
+				name: newNum(1),
+				impl: &dummyNumPipeImpl{},
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			Register(tt.args.name, tt.args.impl)
+		})
+	}
+}
diff --git a/pkg/util/internalExecutor/internal_executor.go b/pkg/util/internalExecutor/internal_executor.go
index e10c57205..76f6accbe 100644
--- a/pkg/util/internalExecutor/internal_executor.go
+++ b/pkg/util/internalExecutor/internal_executor.go
@@ -67,10 +67,10 @@ type InternalExecResult interface {
 }
 
 type InternalExecutor interface {
-	// exec sql without returning results set
+	// Exec sql without returning results set
 	Exec(string, SessionOverrideOptions) error
-	// exec sql and return results set
+	// Query exec sql and return results set
 	Query(string, SessionOverrideOptions) InternalExecResult
-	// override session for the executor scope
+	// ApplySessionOverride override session for the executor scope
 	ApplySessionOverride(SessionOverrideOptions)
 }
diff --git a/pkg/util/metric/batch_collector.go b/pkg/util/metric/batch_collector.go
index 64294586c..9adf95a93 100644
--- a/pkg/util/metric/batch_collector.go
+++ b/pkg/util/metric/batch_collector.go
@@ -24,8 +24,8 @@ import (
 type cacheKey = int
 
 const (
-	CacheKey_MemStats cacheKey = iota
-	CacheKey_Process
+	cacheKeyMemStats cacheKey = iota
+	cacheKeyProcess
 )
 
 type statCaches struct {
diff --git a/pkg/util/metric/m_hardware.go b/pkg/util/metric/m_hardware.go
index 6fad19ced..11ce1afe1 100644
--- a/pkg/util/metric/m_hardware.go
+++ b/pkg/util/metric/m_hardware.go
@@ -23,7 +23,7 @@ import (
 	"github.com/shirou/gopsutil/v3/mem"
 )
 
-var HardwareStatsCollector = newBatchStatsCollector(
+var hardwareStatsCollector = newBatchStatsCollector(
 	cpuTotal{},
 	cpuPercent{},
 	memUsed{},
@@ -99,7 +99,7 @@ func (m memUsed) Desc() *prom.Desc {
 }
 
 func (m memUsed) Metric(s *statCaches) (prom.Metric, error) {
-	val := s.getOrInsert(CacheKey_MemStats, getMemStats)
+	val := s.getOrInsert(cacheKeyMemStats, getMemStats)
 	if val == nil {
 		return nil, errors.New("empty available memomry")
 	}
@@ -119,7 +119,7 @@ func (m memAvail) Desc() *prom.Desc {
 }
 
 func (m memAvail) Metric(s *statCaches) (prom.Metric, error) {
-	val := s.getOrInsert(CacheKey_MemStats, getMemStats)
+	val := s.getOrInsert(cacheKeyMemStats, getMemStats)
 	if val == nil {
 		return nil, errors.New("empty available memomry")
 	}
diff --git a/pkg/util/metric/m_process.go b/pkg/util/metric/m_process.go
index 1f5bc50d7..5dba161e7 100644
--- a/pkg/util/metric/m_process.go
+++ b/pkg/util/metric/m_process.go
@@ -29,7 +29,7 @@ import (
 // - open fds & max fds (not available on MacOS)
 // - virtual_resident_mem_bytes
 
-var ProcessCollector = newBatchStatsCollector(procCpuPercent{}, procMemUsage{}, procOpenFds{}, procFdsLimit{})
+var processCollector = newBatchStatsCollector(procCpuPercent{}, procMemUsage{}, procOpenFds{}, procFdsLimit{})
 
 var pid = int32(os.Getpid())
 
@@ -54,7 +54,7 @@ func (c procCpuPercent) Desc() *prom.Desc {
 }
 
 func (c procCpuPercent) Metric(s *statCaches) (prom.Metric, error) {
-	val := s.getOrInsert(CacheKey_Process, getProcess)
+	val := s.getOrInsert(cacheKeyProcess, getProcess)
 	if val == nil {
 		return nil, errors.New("empty process")
 	}
@@ -79,7 +79,7 @@ func (c procMemUsage) Desc() *prom.Desc {
 }
 
 func (c procMemUsage) Metric(s *statCaches) (prom.Metric, error) {
-	val := s.getOrInsert(CacheKey_Process, getProcess)
+	val := s.getOrInsert(cacheKeyProcess, getProcess)
 	if val == nil {
 		return nil, errors.New("empty process")
 	}
@@ -102,7 +102,7 @@ func (c procOpenFds) Desc() *prom.Desc {
 }
 
 func (c procOpenFds) Metric(s *statCaches) (prom.Metric, error) {
-	val := s.getOrInsert(CacheKey_Process, getProcess)
+	val := s.getOrInsert(cacheKeyProcess, getProcess)
 	if val == nil {
 		return nil, errors.New("empty process")
 	}
@@ -125,7 +125,7 @@ func (c procFdsLimit) Desc() *prom.Desc {
 }
 
 func (c procFdsLimit) Metric(s *statCaches) (prom.Metric, error) {
-	val := s.getOrInsert(CacheKey_Process, getProcess)
+	val := s.getOrInsert(cacheKeyProcess, getProcess)
 	if val == nil {
 		return nil, errors.New("empty process")
 	}
diff --git a/pkg/util/metric/m_register.go b/pkg/util/metric/m_register.go
index 04d3c6602..c0cdaa151 100644
--- a/pkg/util/metric/m_register.go
+++ b/pkg/util/metric/m_register.go
@@ -17,8 +17,8 @@ package metric
 var initCollectors = []Collector{
 	SQLLatencyObserverFactory,
 	StatementCounterFactory,
-	ProcessCollector,
-	HardwareStatsCollector,
+	processCollector,
+	hardwareStatsCollector,
 }
 
 // register all defined collector here
diff --git a/pkg/util/metric/metric.go b/pkg/util/metric/metric.go
index 4bbc31cd8..f3764890b 100644
--- a/pkg/util/metric/metric.go
+++ b/pkg/util/metric/metric.go
@@ -33,23 +33,23 @@ import (
 )
 
 const (
-	METRIC_DB       = "system_metrics"
-	SQL_CREATE_DB   = "create database if not exists " + METRIC_DB
-	SQL_DROP_DB     = "drop database if exists " + METRIC_DB
-	ALL_IN_ONE_MODE = "monolithic"
+	metricDBConst    = "system_metrics"
+	sqlCreateDBConst = "create database if not exists " + metricDBConst
+	sqlDropDBConst   = "drop database if exists " + metricDBConst
+	ALL_IN_ONE_MODE  = "monolithic"
 )
 
 var (
-	LBL_NODE     = "node"
-	LBL_ROLE     = "role"
-	LBL_VALUE    = "value"
-	LBL_TIME     = "collecttime"
-	occupiedLbls = map[string]struct{}{LBL_TIME: {}, LBL_VALUE: {}, LBL_NODE: {}, LBL_ROLE: {}}
+	lblNodeConst  = "node"
+	lblRoleConst  = "role"
+	lblValueConst = "value"
+	lblTimeConst  = "collecttime"
+	occupiedLbls  = map[string]struct{}{lblTimeConst: {}, lblValueConst: {}, lblNodeConst: {}, lblRoleConst: {}}
 )
 
 type Collector interface {
 	prom.Collector
-	// CancelToProm remove the cost introduced by being compatible with prometheus
+	// cancelToProm remove the cost introduced by being compatible with prometheus
 	CancelToProm()
 	// collectorForProm returns a collector used in prometheus scrape registry
 	CollectorToProm() prom.Collector
@@ -107,13 +107,13 @@ func InitMetric(ieFactory func() ie.InternalExecutor, pu *config.ParameterUnit,
 
 func StopMetricSync() {
 	if moCollector != nil {
-		if ch, effect := moCollector.Stop(); effect {
+		if ch, effect := moCollector.Stop(true); effect {
 			<-ch
 		}
 		moCollector = nil
 	}
 	if moExporter != nil {
-		if ch, effect := moExporter.Stop(); effect {
+		if ch, effect := moExporter.Stop(true); effect {
 			<-ch
 		}
 		moExporter = nil
@@ -144,16 +144,16 @@ func mustRegister(collector Collector) {
 // initTables gathers all metrics and extract metadata to format create table sql
 func initTables(ieFactory func() ie.InternalExecutor) {
 	exec := ieFactory()
-	exec.ApplySessionOverride(ie.NewOptsBuilder().Database(METRIC_DB).Internal(true).Finish())
+	exec.ApplySessionOverride(ie.NewOptsBuilder().Database(metricDBConst).Internal(true).Finish())
 	mustExec := func(sql string) {
 		if err := exec.Exec(sql, ie.NewOptsBuilder().Finish()); err != nil {
 			panic(fmt.Sprintf("[Metric] init metric tables error: %v, sql: %s", err, sql))
 		}
 	}
 	if getForceInit() {
-		mustExec(SQL_DROP_DB)
+		mustExec(sqlDropDBConst)
 	}
-	mustExec(SQL_CREATE_DB)
+	mustExec(sqlCreateDBConst)
 	var createCost time.Duration
 	defer func() {
 		logutil.Debugf(
@@ -186,7 +186,7 @@ func createTableSqlFromMetricFamily(desc *prom.Desc, buf *bytes.Buffer) string {
 	extra := newDescExtra(desc)
 	buf.WriteString(fmt.Sprintf(
 		"create table if not exists %s.%s (`%s` datetime, `%s` double, `%s` int, `%s` varchar(20)",
-		METRIC_DB, extra.fqName, LBL_TIME, LBL_VALUE, LBL_NODE, LBL_ROLE,
+		metricDBConst, extra.fqName, lblTimeConst, lblValueConst, lblNodeConst, lblRoleConst,
 	))
 	for _, lbl := range extra.labels {
 		buf.WriteString(", `")
diff --git a/pkg/util/metric/metric_collector.go b/pkg/util/metric/metric_collector.go
index 856ed6a9a..054bb139e 100644
--- a/pkg/util/metric/metric_collector.go
+++ b/pkg/util/metric/metric_collector.go
@@ -19,13 +19,12 @@ import (
 	"context"
 	"fmt"
 	"runtime"
-	"sync"
-	"sync/atomic"
 	"time"
 
 	"github.com/matrixorigin/matrixone/pkg/container/types"
 	"github.com/matrixorigin/matrixone/pkg/logutil"
 	pb "github.com/matrixorigin/matrixone/pkg/pb/metric"
+	bp "github.com/matrixorigin/matrixone/pkg/util/batchpipe"
 	ie "github.com/matrixorigin/matrixone/pkg/util/internalExecutor"
 )
 
@@ -33,8 +32,8 @@ const CHAN_CAPACITY = 10000
 
 type MetricCollector interface {
 	SendMetrics(context.Context, []*pb.MetricFamily) error
-	Start()
-	Stop() (<-chan struct{}, bool)
+	Start() bool
+	Stop(graceful bool) (<-chan struct{}, bool)
 }
 
 type collectorOpts struct {
@@ -89,14 +88,9 @@ func (x WithFlushInterval) ApplyTo(o *collectorOpts) {
 }
 
 type metricCollector struct {
-	ieFactory         func() ie.InternalExecutor
-	isRunning         int32
-	opts              collectorOpts
-	mfCh              chan *pb.MetricFamily
-	sqlCh             chan string
-	stopWg            sync.WaitGroup
-	sqlWorkerCancel   context.CancelFunc
-	mergeWorkerCancel context.CancelFunc
+	*bp.BaseBatchPipe[*pb.MetricFamily, string]
+	ieFactory func() ie.InternalExecutor
+	opts      collectorOpts
 }
 
 func newMetricCollector(factory func() ie.InternalExecutor, opts ...collectorOpt) MetricCollector {
@@ -107,166 +101,49 @@ func newMetricCollector(factory func() ie.InternalExecutor, opts ...collectorOpt
 	c := &metricCollector{
 		ieFactory: factory,
 		opts:      initOpts,
-		sqlCh:     make(chan string, CHAN_CAPACITY),
-		mfCh:      make(chan *pb.MetricFamily, CHAN_CAPACITY),
 	}
+	base := bp.NewBaseBatchPipe[*pb.MetricFamily, string](c, bp.PipeWithBatchWorkerNum(c.opts.sqlWorkerNum))
+	c.BaseBatchPipe = base
 	return c
 }
 
 func (c *metricCollector) SendMetrics(ctx context.Context, mfs []*pb.MetricFamily) error {
 	for _, mf := range mfs {
-		c.mfCh <- mf
-	}
-	return nil
-}
-
-func (c *metricCollector) Start() {
-	if atomic.SwapInt32(&c.isRunning, 1) == 1 {
-		return
-	}
-	c.startSqlWorker()
-	c.startMergeWorker()
-}
-
-func (c *metricCollector) Stop() (<-chan struct{}, bool) {
-	if atomic.SwapInt32(&c.isRunning, 0) == 0 {
-		return nil, false
-	}
-	c.sqlWorkerCancel()
-	c.mergeWorkerCancel()
-	stopCh := make(chan struct{})
-	go func() { c.stopWg.Wait(); close(stopCh) }()
-	return stopCh, true
-}
-
-func (c *metricCollector) startSqlWorker() {
-	ctx, cancel := context.WithCancel(context.Background())
-	c.sqlWorkerCancel = cancel
-	for i := 0; i < c.opts.sqlWorkerNum; i++ {
-		exec := c.ieFactory()
-		exec.ApplySessionOverride(ie.NewOptsBuilder().Database(METRIC_DB).Internal(true).Finish())
-		c.stopWg.Add(1)
-		go c.sqlWorker(ctx, exec)
-	}
-}
-
-func (c *metricCollector) startMergeWorker() {
-	ctx, cancel := context.WithCancel(context.Background())
-	c.mergeWorkerCancel = cancel
-	c.stopWg.Add(1)
-	go c.mergeWorker(ctx)
-}
-
-func (c *metricCollector) mergeWorker(ctx context.Context) {
-	defer c.stopWg.Done()
-	mfByNames := make(map[string]*mfset)
-	sqlbuf := new(bytes.Buffer)
-	reminder := newReminder()
-	defer reminder.CleanAll()
-
-	doFlush := func(name string, set *mfset) {
-		c.pushToSqlCh(set, sqlbuf)
-		set.reset()
-		reminder.Reset(name, c.opts.flushInterval)
-	}
-
-	for {
-		select {
-		case <-ctx.Done():
-			return
-		case mf := <-c.mfCh:
-			if isFullBatchRawHist(mf) {
-				c.pushToSqlCh(newMfset(mf), sqlbuf)
-				continue
-			}
-			name := mf.GetName()
-			entryMfs := mfByNames[name]
-			if entryMfs != nil {
-				entryMfs.add(mf)
-			} else {
-				entryMfs = newMfset(mf)
-				mfByNames[name] = entryMfs
-				reminder.Register(name, c.opts.flushInterval)
-			}
-			if entryMfs.shouldFlush(&c.opts) {
-				doFlush(name, entryMfs)
-			}
-		case name := <-reminder.C:
-			if entryMfs := mfByNames[name]; entryMfs != nil && entryMfs.rows > 0 {
-				doFlush(name, entryMfs)
-			} else {
-				reminder.Reset(name, c.opts.flushInterval)
-			}
+		if err := c.SendItem(mf); err != nil {
+			return err
 		}
 	}
+	return nil
 }
 
-func (c *metricCollector) pushToSqlCh(set *mfset, buf *bytes.Buffer) {
-	if sql := set.getSql(buf); sql != "" {
-		c.sqlCh <- sql
-	}
-}
-
-func (c *metricCollector) sqlWorker(ctx context.Context, exec ie.InternalExecutor) {
-	defer c.stopWg.Done()
-	for {
-		select {
-		case <-ctx.Done():
-			return
-		case sql := <-c.sqlCh:
-			if err := exec.Exec(sql, ie.NewOptsBuilder().Finish()); err != nil {
-				logutil.Errorf("[Metric] insert error. sql: %s; err: %v", sql, err)
-			}
+func (c *metricCollector) NewItemBatchHandler() func(batch string) {
+	exec := c.ieFactory()
+	exec.ApplySessionOverride(ie.NewOptsBuilder().Database(metricDBConst).Internal(true).Finish())
+	return func(batch string) {
+		if err := exec.Exec(batch, ie.NewOptsBuilder().Finish()); err != nil {
+			logutil.Errorf("[Trace] insert error. sql: %s; err: %v", batch, err)
 		}
 	}
 }
 
-type reminder struct {
-	registry map[string]*time.Timer
-	C        chan string
-}
-
-func newReminder() *reminder {
-	return &reminder{
-		registry: make(map[string]*time.Timer),
-		C:        make(chan string, CHAN_CAPACITY),
-	}
-}
-
-func (r *reminder) Register(name string, after time.Duration) {
-	if r.registry[name] != nil {
-		panic(fmt.Sprintf("%s already registered", name))
-	}
-	r.registry[name] = time.AfterFunc(after, func() { r.C <- name })
-}
-
-func (r *reminder) Reset(name string, after time.Duration) {
-	if t := r.registry[name]; t != nil {
-		t.Reset(after)
-	}
-}
-
-func (r *reminder) CleanAll() {
-	for _, timer := range r.registry {
-		timer.Stop()
+func (c *metricCollector) NewItemBuffer(_ string) bp.ItemBuffer[*pb.MetricFamily, string] {
+	return &mfset{
+		Reminder:        bp.NewConstantClock(c.opts.flushInterval),
+		metricThreshold: c.opts.metricThreshold,
+		sampleThreshold: c.opts.sampleThreshold,
 	}
 }
 
 type mfset struct {
-	mfs  []*pb.MetricFamily
-	typ  pb.MetricType
-	rows int // how many rows it would take when flushing to db
+	bp.Reminder
+	mfs             []*pb.MetricFamily
+	typ             pb.MetricType
+	rows            int // how many buffered rows
+	metricThreshold int // haw many rows should be flushed as a batch
+	sampleThreshold int // treat rawhist samples differently because it has higher generate rate
 }
 
-func newMfset(mfs ...*pb.MetricFamily) *mfset {
-	set := &mfset{}
-	for _, mf := range mfs {
-		set.add(mf)
-	}
-	return set
-}
-
-func (s *mfset) add(mf *pb.MetricFamily) {
+func (s *mfset) Add(mf *pb.MetricFamily) {
 	if s.typ == mf.GetType() {
 		s.typ = mf.GetType()
 	}
@@ -281,28 +158,33 @@ func (s *mfset) add(mf *pb.MetricFamily) {
 	s.mfs = append(s.mfs, mf)
 }
 
-func (s *mfset) shouldFlush(opts *collectorOpts) bool {
+func (s *mfset) ShouldFlush() bool {
 	switch s.typ {
 	case pb.MetricType_COUNTER, pb.MetricType_GAUGE:
-		return s.rows > opts.metricThreshold
+		return s.rows > s.metricThreshold
 	case pb.MetricType_RAWHIST:
-		return s.rows > opts.sampleThreshold
+		return s.rows > s.sampleThreshold
 	default:
 		return false
 	}
 }
 
-func (s *mfset) reset() {
+func (s *mfset) Reset() {
 	s.mfs = s.mfs[:0]
 	s.typ = pb.MetricType_COUNTER // 0
 	s.rows = 0
+	s.RemindReset()
+}
+
+func (s *mfset) IsEmpty() bool {
+	return len(s.mfs) == 0
 }
 
 // getSql extracts a insert sql from a set of MetricFamily. the bytes.Buffer is
-// used to mitigate memory allocation. getSql assume there is at least one row in mfset
-func (s *mfset) getSql(buf *bytes.Buffer) string {
+// used to mitigate memory allocation
+func (s *mfset) GetBatch(buf *bytes.Buffer) string {
 	buf.Reset()
-	buf.WriteString(fmt.Sprintf("insert into %s.%s values ", METRIC_DB, s.mfs[0].GetName()))
+	buf.WriteString(fmt.Sprintf("insert into %s.%s values ", metricDBConst, s.mfs[0].GetName()))
 	lblsBuf := new(bytes.Buffer)
 	writeValues := func(t string, v float64, lbls string) {
 		buf.WriteString("(")
diff --git a/pkg/util/metric/metric_collector_test.go b/pkg/util/metric/metric_collector_test.go
index dbc9502ce..c107b80fd 100644
--- a/pkg/util/metric/metric_collector_test.go
+++ b/pkg/util/metric/metric_collector_test.go
@@ -17,8 +17,6 @@ package metric
 import (
 	"context"
 	"regexp"
-	"sort"
-	"strconv"
 	"testing"
 	"time"
 
@@ -57,66 +55,6 @@ func newExecutorFactory(sqlch chan string) func() ie.InternalExecutor {
 	}
 }
 
-func TestCollectorRemind(t *testing.T) {
-	ms := time.Millisecond
-	r := newReminder()
-	cases := []*struct {
-		id     string
-		d      []time.Duration
-		offset int
-	}{
-		{"0", []time.Duration{11 * ms, 8 * ms, 25 * ms}, 0},
-		{"1", []time.Duration{7 * ms, 15 * ms, 16 * ms}, 0},
-		{"2", []time.Duration{3 * ms, 12 * ms, 11 * ms}, 0},
-	}
-
-	type item struct {
-		d  time.Duration
-		id string
-	}
-
-	seq := []item{}
-
-	for _, c := range cases {
-		r.Register(c.id, c.d[c.offset])
-		c.offset += 1
-		t := 0 * ms
-		for _, delta := range c.d {
-			t += delta
-			seq = append(seq, item{t, c.id})
-		}
-	}
-
-	sort.Slice(seq, func(i, j int) bool {
-		return int64(seq[i].d) < int64(seq[j].d)
-	})
-
-	gotids := make([]string, 0, 9)
-	for i := 0; i < 9; i++ {
-		id := <-r.C
-		gotids = append(gotids, id)
-		idx, _ := strconv.ParseInt(id, 10, 32)
-		c := cases[idx]
-		if c.offset < 3 {
-			r.Reset(id, c.d[c.offset])
-			c.offset++
-		}
-	}
-
-	diff := 0
-	for i := range gotids {
-		if gotids[i] != seq[i].id {
-			diff++
-		}
-	}
-
-	// Appending to reminder.C happens in a goroutine, considering its scheduling latency,
-	// here we tolerate the disorder for 2 pairs
-	if diff > 4 {
-		t.Errorf("bad order of the events, want %v, got %s", seq, gotids)
-	}
-}
-
 func TestCollectorOpts(t *testing.T) {
 	c := newMetricCollector(
 		nil, // this nil pointer won't be touched when SqlWorkerNum is set to 0
@@ -136,7 +74,7 @@ func TestCollector(t *testing.T) {
 	factory := newExecutorFactory(sqlch)
 	collector := newMetricCollector(factory, WithFlushInterval(200*time.Millisecond), WithMetricThreshold(2))
 	collector.Start()
-	defer collector.Stop()
+	defer collector.Stop(false)
 	names := []string{"m1", "m2"}
 	nodes := []int32{1, 2}
 	roles := []string{"ping", "pong"}
diff --git a/pkg/util/metric/metric_exporter.go b/pkg/util/metric/metric_exporter.go
index 78281b406..ed40fac1a 100644
--- a/pkg/util/metric/metric_exporter.go
+++ b/pkg/util/metric/metric_exporter.go
@@ -29,8 +29,8 @@ import (
 type MetricExporter interface {
 	// ExportMetricFamily can be used by a metric to push data. this method must be thread safe
 	ExportMetricFamily(context.Context, *pb.MetricFamily) error
-	Start()
-	Stop() (<-chan struct{}, bool)
+	Start() bool
+	Stop(bool) (<-chan struct{}, bool)
 }
 
 type metricExporter struct {
@@ -71,7 +71,7 @@ func (e *metricExporter) ExportMetricFamily(ctx context.Context, mf *pb.MetricFa
 	return nil
 }
 
-func (e *metricExporter) Stop() (<-chan struct{}, bool) {
+func (e *metricExporter) Stop(_ bool) (<-chan struct{}, bool) {
 	if atomic.SwapInt32(&e.isRunning, 0) == 0 {
 		return nil, false
 	}
@@ -81,9 +81,9 @@ func (e *metricExporter) Stop() (<-chan struct{}, bool) {
 	return stopCh, true
 }
 
-func (e *metricExporter) Start() {
+func (e *metricExporter) Start() bool {
 	if atomic.SwapInt32(&e.isRunning, 1) == 1 {
-		return
+		return false
 	}
 	ctx, cancel := context.WithCancel(context.Background())
 	e.cancel = cancel
@@ -101,6 +101,7 @@ func (e *metricExporter) Start() {
 			}
 		}
 	}()
+	return true
 }
 
 func (e *metricExporter) send(mfs []*pb.MetricFamily) {
diff --git a/pkg/util/metric/metric_exporter_test.go b/pkg/util/metric/metric_exporter_test.go
index 87616aa99..81a82e7e5 100644
--- a/pkg/util/metric/metric_exporter_test.go
+++ b/pkg/util/metric/metric_exporter_test.go
@@ -104,7 +104,7 @@ func TestExporter(t *testing.T) {
 		iexp := newMetricExporter(reg, dumCollect, 0, "monolithic")
 		exp = iexp.(*metricExporter)
 		exp.Start()
-		defer exp.Stop()
+		defer exp.Stop(false)
 		exp.now = dumClock
 		c := prom.NewCounter(prom.CounterOpts{Subsystem: "test", Name: "test_counter"})
 		reg.MustRegister(c)
diff --git a/pkg/util/metric/metric_test.go b/pkg/util/metric/metric_test.go
index 4fa118a3f..05855db4c 100644
--- a/pkg/util/metric/metric_test.go
+++ b/pkg/util/metric/metric_test.go
@@ -122,12 +122,12 @@ func TestCreateTable(t *testing.T) {
 	sql := createTableSqlFromMetricFamily(prom.NewDesc(name, "", []string{"zzz", "aaa"}, nil), buf)
 	assert.Equal(t, sql, fmt.Sprintf(
 		"create table if not exists %s.%s (`%s` datetime, `%s` double, `%s` int, `%s` varchar(20), `aaa` varchar(20), `zzz` varchar(20))",
-		METRIC_DB, name, LBL_TIME, LBL_VALUE, LBL_NODE, LBL_ROLE,
+		metricDBConst, name, lblTimeConst, lblValueConst, lblNodeConst, lblRoleConst,
 	))
 
 	sql = createTableSqlFromMetricFamily(prom.NewDesc(name, "", nil, nil), buf)
 	assert.Equal(t, sql, fmt.Sprintf(
 		"create table if not exists %s.%s (`%s` datetime, `%s` double, `%s` int, `%s` varchar(20))",
-		METRIC_DB, name, LBL_TIME, LBL_VALUE, LBL_NODE, LBL_ROLE,
+		metricDBConst, name, lblTimeConst, lblValueConst, lblNodeConst, lblRoleConst,
 	))
 }
diff --git a/pkg/util/metric/mtype_rawhist_test.go b/pkg/util/metric/mtype_rawhist_test.go
index 37e1a5bf2..5423bfd1a 100644
--- a/pkg/util/metric/mtype_rawhist_test.go
+++ b/pkg/util/metric/mtype_rawhist_test.go
@@ -83,7 +83,7 @@ func TestRawHistVec(t *testing.T) {
 		}
 
 		// wait Export complete
-		if err := waitTimeout(&dummyExp.WaitGroup, time.Second); err != nil {
+		if err := waitWgTimeout(&dummyExp.WaitGroup, time.Second); err != nil {
 			t.Errorf("rawHist send timeout")
 		}
 	})
@@ -136,7 +136,7 @@ func TestRawHistVec(t *testing.T) {
 		return
 	}
 
-	if err := waitTimeout(&dummyExp.WaitGroup, time.Second); err != nil {
+	if err := waitWgTimeout(&dummyExp.WaitGroup, time.Second); err != nil {
 		t.Errorf("rawHist send timeout")
 		return
 	}
@@ -198,7 +198,7 @@ func TestRawHistGather(t *testing.T) {
 	if err != nil {
 		t.Errorf("rawHist Gather err: %v", err)
 	}
-	if err := waitTimeout(&dumExp.WaitGroup, time.Second); err != nil {
+	if err := waitWgTimeout(&dumExp.WaitGroup, time.Second); err != nil {
 		t.Errorf("rawHist send timeout")
 		return
 	}
diff --git a/pkg/util/metric/testutil_test.go b/pkg/util/metric/testutil_test.go
index 14587dffc..752a1ce20 100644
--- a/pkg/util/metric/testutil_test.go
+++ b/pkg/util/metric/testutil_test.go
@@ -35,8 +35,8 @@ func withModifiedConfig(f func()) {
 	f()
 }
 
-// waitTimeout return a error if the WaitGroup doesn't return in timeout duration
-func waitTimeout(wg *sync.WaitGroup, after time.Duration) error {
+// waitWgTimeout returns an error if the WaitGroup doesn't return in timeout duration
+func waitWgTimeout(wg *sync.WaitGroup, after time.Duration) error {
 	c := make(chan struct{})
 	go func() {
 		defer close(c)
@@ -59,5 +59,5 @@ func makeDummyClock(startOffset int64) func() int64 {
 
 type dummySwitch struct{}
 
-func (dummySwitch) Start()                        {}
-func (dummySwitch) Stop() (<-chan struct{}, bool) { return nil, false }
+func (dummySwitch) Start() bool                                { return true }
+func (dummySwitch) Stop(graceful bool) (<-chan struct{}, bool) { return nil, false }
diff --git a/pkg/util/rand.go b/pkg/util/rand.go
new file mode 100644
index 000000000..2f9bac94d
--- /dev/null
+++ b/pkg/util/rand.go
@@ -0,0 +1,33 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	_ "unsafe"
+)
+
+// fastrand32 returns a lock free uint32 value. Compared to rand.Uint32, this
+// implementation scales. We're using the go runtime's implementation through a
+// linker trick.
+//
+//go:linkname fastrand32 runtime.fastrand
+func fastrand32() uint32
+
+// Fastrand64 returns a lock free uint64 value.
+// Compared to rand.Int63(), this implementation scales.
+func Fastrand64() uint64 {
+	x, y := fastrand32(), fastrand32() // 32-bit halves
+	return uint64(x)<<32 ^ uint64(y)
+}
diff --git a/pkg/util/rand_test.go b/pkg/util/rand_test.go
new file mode 100644
index 000000000..a21c6bb26
--- /dev/null
+++ b/pkg/util/rand_test.go
@@ -0,0 +1,58 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"math/rand"
+	"testing"
+)
+
+func TestFastrand64(t *testing.T) {
+	tests := []struct {
+		name    string
+		wantNot uint64
+	}{
+		{
+			name:    "normal",
+			wantNot: rand.Uint64(),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := Fastrand64(); got == tt.wantNot {
+				t.Errorf("Fastrand64() = %v, wantNot %v", got, tt.wantNot)
+			}
+		})
+	}
+}
+
+func Test_fastrand32(t *testing.T) {
+	tests := []struct {
+		name    string
+		wantNot uint32
+	}{
+		{
+			name:    "normal",
+			wantNot: rand.Uint32(),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := fastrand32(); got == tt.wantNot {
+				t.Errorf("fastrand32() = %v, wantNot %v", got, tt.wantNot)
+			}
+		})
+	}
+}
diff --git a/pkg/util/stack.go b/pkg/util/stack.go
new file mode 100644
index 000000000..ead4a7ffb
--- /dev/null
+++ b/pkg/util/stack.go
@@ -0,0 +1,65 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"fmt"
+	"io"
+	"runtime"
+
+	pkgErr "github.com/pkg/errors"
+)
+
+type StackTrace = pkgErr.StackTrace
+
+type Frame = pkgErr.Frame
+
+type Stack []uintptr
+
+// Callers mirrors the code in github.com/pkg/errors,
+// but makes the depth customizable.
+func Callers(depth int) *Stack {
+	const numFrames = 32
+	var pcs [numFrames]uintptr
+	n := runtime.Callers(2+depth, pcs[:])
+	var st Stack = pcs[0:n]
+	return &st
+}
+
+// Caller return only one Frame
+func Caller(depth int) Frame {
+	const numFrames = 1
+	var pcs [numFrames]uintptr
+	_ = runtime.Callers(2+depth, pcs[:])
+	return Frame(pcs[0])
+}
+
+// Format mirrors the code in github.com/pkg/errors.
+func (s *Stack) Format(st fmt.State, verb rune) {
+	for _, pc := range *s {
+		f := Frame(pc)
+		io.WriteString(st, "\n")
+		f.Format(st, verb)
+	}
+}
+
+// StackTrace mirrors the code in github.com/pkg/errors.
+func (s *Stack) StackTrace() pkgErr.StackTrace {
+	f := make([]pkgErr.Frame, len(*s))
+	for i := 0; i < len(f); i++ {
+		f[i] = pkgErr.Frame((*s)[i])
+	}
+	return f
+}
diff --git a/pkg/util/stack_test.go b/pkg/util/stack_test.go
new file mode 100644
index 000000000..588aeec35
--- /dev/null
+++ b/pkg/util/stack_test.go
@@ -0,0 +1,62 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestCaller(t *testing.T) {
+	type args struct {
+		depth int
+	}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{name: "depth_0", args: args{depth: 0}, want: "stack_test.go:35"},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := Caller(tt.args.depth); fmt.Sprintf("%v", got) != tt.want {
+				t.Errorf("Caller() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestCallers(t *testing.T) {
+	type args struct {
+		depth int
+	}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{name: "depth_0", args: args{depth: 0}, want: "\n\tstack_test.go\n\ttesting.go\n\tasm_amd64.s"},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got := Callers(tt.args.depth)
+			t.Logf("Callers() = %s", got)
+			t.Logf("Callers(%%+s) = %+s", got)
+			t.Logf("Callers(%%v) = %v", got)
+			t.Logf("Callers(%%+v) = %+v", got)
+		})
+	}
+}
diff --git a/pkg/util/time.go b/pkg/util/time.go
new file mode 100644
index 000000000..43cc9537a
--- /dev/null
+++ b/pkg/util/time.go
@@ -0,0 +1,65 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"time"
+	_ "unsafe"
+)
+
+var globalStartUnixTimeNS TimeNano
+var globalStartMonoTimeNS TimeMono
+
+func init() {
+	globalStartUnixTimeNS = unixtimeNS()
+	globalStartMonoTimeNS = monotimeNS()
+}
+
+// `time.Now()` contain two syscalls in Linux.
+// One is `CLOCK_REALTIME` and another is `CLOCK_MONOTONIC`.
+// Separate it into two functions: walltime() and nanotime(), which can improve duration calculation.
+// PS: runtime.walltime() hav been removed from linux-amd64
+
+//go:linkname nanotime runtime.nanotime
+func nanotime() int64
+
+type TimeMono = uint64
+
+// MonotimeNS used to calculate duration.
+func monotimeNS() TimeMono {
+	return TimeMono(nanotime())
+}
+
+type TimeNano = uint64
+
+// unixtimeNS save time.Time as uint64
+func unixtimeNS() TimeNano {
+	t := time.Now()
+	sec, nsec := t.Unix(), t.Nanosecond()
+	return TimeNano(sec*1e9 + int64(nsec))
+}
+
+func NowNS() TimeNano {
+	mono := monotimeNS()
+	return TimeNano((mono - globalStartMonoTimeNS) + globalStartUnixTimeNS)
+}
+
+// Now generate `hasMonotonic=0` time.Time.
+// warning: It should NOT compare with time.Time, which generated by time.Now()
+func Now() time.Time {
+	nowNS := NowNS()
+	sec, nesc := nowNS/1e9, nowNS%1e9
+	return time.Unix(int64(sec), int64(nesc))
+}
diff --git a/pkg/util/time_test.go b/pkg/util/time_test.go
new file mode 100644
index 000000000..d660054d6
--- /dev/null
+++ b/pkg/util/time_test.go
@@ -0,0 +1,65 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"testing"
+	"time"
+)
+
+const timeFormat = "2006-01-02 15:04:05"
+
+func TestNow(t *testing.T) {
+	tests := []struct {
+		name  string
+		delta time.Duration
+		want  time.Time
+	}{
+		{
+			name:  "normal",
+			delta: 100 * time.Nanosecond,
+			want:  time.Now(),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			tt.want = tt.want.Add(tt.delta)
+			if got := Now(); got.Second() != tt.want.Second() || got.Format(timeFormat) != tt.want.Format(timeFormat) {
+				t.Errorf("Now() = %v, want %v", got, tt.want)
+			} else {
+				t.Logf("Now() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func TestNowNS(t *testing.T) {
+	tests := []struct {
+		name string
+		want TimeNano
+	}{
+		{
+			name: "normal",
+			want: NowNS(),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := NowNS(); got <= tt.want {
+				t.Errorf("NowNS() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
diff --git a/pkg/util/trace/batch_span_processor.go b/pkg/util/trace/batch_span_processor.go
new file mode 100644
index 000000000..809b66773
--- /dev/null
+++ b/pkg/util/trace/batch_span_processor.go
@@ -0,0 +1,88 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"context"
+	"sync"
+
+	"github.com/matrixorigin/matrixone/pkg/util/batchpipe"
+	"github.com/matrixorigin/matrixone/pkg/util/export"
+)
+
+var _ SpanProcessor = &batchSpanProcessor{}
+
+// batchSpanProcessor is a SpanProcessor that batches asynchronously-received
+// spans and sends them to a trace.Exporter when complete.
+type batchSpanProcessor struct {
+	e export.BatchProcessor
+
+	stopWait sync.WaitGroup
+	stopOnce sync.Once
+	stopCh   chan struct{}
+}
+
+func NewBatchSpanProcessor(exporter export.BatchProcessor) SpanProcessor {
+	bsp := &batchSpanProcessor{
+		e:      exporter,
+		stopCh: make(chan struct{}),
+	}
+
+	return bsp
+}
+
+// OnStart method does nothing.
+func (bsp *batchSpanProcessor) OnStart(parent context.Context, s Span) {}
+
+// OnEnd method enqueues a ReadOnlySpan for later processing.
+func (bsp *batchSpanProcessor) OnEnd(s Span) {
+	// Do not enqueue spans if we are just going to drop them.
+	if bsp.e == nil {
+		return
+	}
+	if i, ok := s.(batchpipe.HasName); !ok {
+		panic("No implement batchpipe.HasName")
+	} else {
+		bsp.e.Collect(DefaultContext(), i)
+	}
+}
+
+// Shutdown flushes the queue and waits until all spans are processed.
+// It only executes once. Subsequent call does nothing.
+func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
+	var err error
+	bsp.stopOnce.Do(func() {
+		wait := make(chan struct{})
+		go func() {
+			close(bsp.stopCh)
+			bsp.stopWait.Wait()
+			if bsp.e != nil {
+				// bsp.e.Shutdown(ctx)
+				if err := bsp.e.Stop(true); err != nil {
+					// TODO: otel.Handle(err)
+					panic(err)
+				}
+			}
+			close(wait)
+		}()
+		// Wait until the wait group is done or the context is cancelled
+		select {
+		case <-wait:
+		case <-ctx.Done():
+			err = ctx.Err()
+		}
+	})
+	return err
+}
diff --git a/pkg/util/trace/batch_span_processor_test.go b/pkg/util/trace/batch_span_processor_test.go
new file mode 100644
index 000000000..2a7d98d04
--- /dev/null
+++ b/pkg/util/trace/batch_span_processor_test.go
@@ -0,0 +1,135 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"context"
+	"fmt"
+	"github.com/matrixorigin/matrixone/pkg/util/batchpipe"
+	"github.com/matrixorigin/matrixone/pkg/util/export"
+	"github.com/stretchr/testify/assert"
+	"reflect"
+	"testing"
+)
+
+var _ export.BatchProcessor = &noopBatchProcessor{}
+
+type noopBatchProcessor struct{}
+
+func (n noopBatchProcessor) Collect(context.Context, batchpipe.HasName) error { return nil }
+func (n noopBatchProcessor) Start() bool                                      { return true }
+func (n noopBatchProcessor) Stop(bool) error                                  { return nil }
+
+var _ batchpipe.HasName = &namedNoopSpan{}
+
+type namedNoopSpan struct{ noopSpan }
+
+func (n namedNoopSpan) GetName() string { return "NamedNopSpan" }
+
+func TestNewBatchSpanProcessor(t *testing.T) {
+	type args struct {
+		exporter export.BatchProcessor
+	}
+	tests := []struct {
+		name string
+		args args
+		want SpanProcessor
+	}{
+		{
+			name: "normal",
+			args: args{exporter: &noopBatchProcessor{}},
+			want: &batchSpanProcessor{
+				e:      &noopBatchProcessor{},
+				stopCh: make(chan struct{}),
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := NewBatchSpanProcessor(tt.args.exporter); reflect.DeepEqual(got, tt.want) {
+				assert.Equalf(t, tt.want, got, "NewBatchSpanProcessor(%v)", tt.args.exporter)
+			}
+		})
+	}
+}
+
+func Test_batchSpanProcessor_OnEnd(t *testing.T) {
+	type fields struct {
+		e export.BatchProcessor
+	}
+	type args struct {
+		c context.Context
+		s Span
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		args   args
+	}{
+		{
+			name:   "normal",
+			fields: fields{noopBatchProcessor{}},
+			args:   args{context.Background(), namedNoopSpan{}},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			bsp := NewBatchSpanProcessor(tt.fields.e)
+			bsp.OnStart(tt.args.c, tt.args.s)
+			bsp.OnEnd(tt.args.s)
+		})
+	}
+}
+
+func Test_batchSpanProcessor_Shutdown(t *testing.T) {
+	type fields struct {
+		e      export.BatchProcessor
+		stopCh chan struct{}
+	}
+	type args struct {
+		ctx context.Context
+	}
+	tests := []struct {
+		name    string
+		fields  fields
+		args    args
+		wantErr assert.ErrorAssertionFunc
+	}{
+		{
+			name: "normal",
+			fields: fields{
+				e:      noopBatchProcessor{},
+				stopCh: make(chan struct{}),
+			},
+			args: args{context.Background()},
+			wantErr: func(t assert.TestingT, err error, msgs ...interface{}) bool {
+				if err != nil {
+					t.Errorf(msgs[0].(string), msgs[1:])
+					return false
+				}
+				return true
+			},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			bsp := &batchSpanProcessor{
+				e:      tt.fields.e,
+				stopCh: tt.fields.stopCh,
+			}
+			tt.wantErr(t, bsp.Shutdown(tt.args.ctx), fmt.Sprintf("Shutdown(%v)", tt.args.ctx))
+		})
+	}
+}
diff --git a/pkg/util/trace/buffer_pipe_sql.go b/pkg/util/trace/buffer_pipe_sql.go
new file mode 100644
index 000000000..2c4e56b4e
--- /dev/null
+++ b/pkg/util/trace/buffer_pipe_sql.go
@@ -0,0 +1,528 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/matrixorigin/matrixone/pkg/container/types"
+	"github.com/matrixorigin/matrixone/pkg/logutil"
+	"github.com/matrixorigin/matrixone/pkg/util"
+	bp "github.com/matrixorigin/matrixone/pkg/util/batchpipe"
+	"github.com/matrixorigin/matrixone/pkg/util/errors"
+	ie "github.com/matrixorigin/matrixone/pkg/util/internalExecutor"
+)
+
+var errorFormatter atomic.Value
+var insertSQLPrefix []string
+
+func init() {
+	errorFormatter.Store("%+v")
+	logStackFormatter.Store("%+v")
+
+	tables := []string{statementInfoTbl, spanInfoTbl, logInfoTbl, errorInfoTbl}
+	for _, table := range tables {
+		insertSQLPrefix = append(insertSQLPrefix, fmt.Sprintf("insert into %s.%s ", statsDatabase, table))
+	}
+}
+
+type IBuffer2SqlItem interface {
+	bp.HasName
+	Size() int64
+	Free()
+}
+
+var _ bp.PipeImpl[bp.HasName, any] = &batchSqlHandler{}
+
+type batchSqlHandler struct {
+	defaultOpts []buffer2SqlOption
+}
+
+func NewBufferPipe2SqlWorker(opt ...buffer2SqlOption) bp.PipeImpl[bp.HasName, any] {
+	return &batchSqlHandler{opt}
+}
+
+// NewItemBuffer implement batchpipe.PipeImpl
+func (t batchSqlHandler) NewItemBuffer(name string) bp.ItemBuffer[bp.HasName, any] {
+	var opts []buffer2SqlOption
+	var f genBatchFunc
+	logutil.Debugf("NewItemBuffer name: %s", name)
+	switch name {
+	case MOSpanType:
+		f = genSpanBatchSql
+	case MOLogType:
+		f = genLogBatchSql
+	case MOZapType:
+		f = genZapLogBatchSql
+	case MOStatementType:
+		f = genStatementBatchSql
+		opts = append(opts, bufferWithFilterItemFunc(filterTraceInsertSql))
+	case MOErrorType:
+		f = genErrorBatchSql
+	default:
+		// fixme: catch Panic Error
+		panic(fmt.Sprintf("unknown type %s", name))
+	}
+	opts = append(opts, bufferWithGenBatchFunc(f), bufferWithType(name))
+	opts = append(opts, t.defaultOpts...)
+	return newBuffer2Sql(opts...)
+}
+
+// NewItemBatchHandler implement batchpipe.PipeImpl
+func (t batchSqlHandler) NewItemBatchHandler() func(batch any) {
+	var f = func(b any) {}
+	if gTracerProvider.sqlExecutor == nil {
+		// fixme: handle error situation, should panic
+		logutil.Errorf("[Trace] no SQL Executor.")
+		return f
+	}
+	exec := gTracerProvider.sqlExecutor()
+	if exec == nil {
+		// fixme: handle error situation, should panic
+		logutil.Errorf("[Trace] no SQL Executor.")
+		return f
+	}
+	exec.ApplySessionOverride(ie.NewOptsBuilder().Database(statsDatabase).Internal(true).Finish())
+	f = func(b any) {
+		// fixme: CollectCycle
+		_, span := Start(DefaultContext(), "BatchHandle")
+		defer span.End()
+		batch := b.(string)
+		if len(batch) == 0 {
+			logutil.Warnf("meet empty sql")
+			return
+		}
+		if err := exec.Exec(batch, ie.NewOptsBuilder().Finish()); err != nil {
+			// fixme: error -> log -> exec.Exec -> ... cycle
+			// fixme: handle error situation re-try
+			logutil.Error(fmt.Sprintf("[Trace] faield to insert. sql: %s", batch), NoReportFiled())
+			logutil.Error(fmt.Sprintf("[Trace] faield to insert. err: %v", err), NoReportFiled())
+		}
+	}
+	return f
+}
+
+func quote(value string) string {
+	replaceRules := []struct{ src, dst string }{
+		{`\\`, `\\\\`},
+		{`'`, `\'`},
+		{`\0`, `\\0`},
+		{"\n", "\\n"},
+		{"\r", "\\r"},
+		{"\t", "\\t"},
+		{`"`, `\"`},
+		{"\x1a", "\\\\Z"},
+	}
+	for _, rule := range replaceRules {
+		value = strings.Replace(value, rule.src, rule.dst, -1)
+	}
+	return value
+}
+
+func genSpanBatchSql(in []IBuffer2SqlItem, buf *bytes.Buffer) any {
+	buf.Reset()
+	if len(in) == 0 {
+		logutil.Debugf("genSpanBatchSql empty")
+		return ""
+	}
+
+	buf.WriteString(fmt.Sprintf("insert into %s.%s ", statsDatabase, spanInfoTbl))
+	buf.WriteString("(")
+	buf.WriteString("`span_id`")
+	buf.WriteString(", `statement_id`")
+	buf.WriteString(", `parent_span_id`")
+	buf.WriteString(", `node_id`")
+	buf.WriteString(", `node_type`")
+	buf.WriteString(", `resource`")
+	buf.WriteString(", `name`")
+	buf.WriteString(", `start_time`")
+	buf.WriteString(", `end_time`")
+	buf.WriteString(", `duration`")
+	buf.WriteString(") values ")
+
+	moNode := GetNodeResource()
+
+	for _, item := range in {
+		s, ok := item.(*MOSpan)
+		if !ok {
+			panic("Not MOSpan")
+		}
+		buf.WriteString("(")
+		buf.WriteString(fmt.Sprintf("%d", s.SpanID))
+		buf.WriteString(fmt.Sprintf(", %d", s.TraceID))
+		buf.WriteString(fmt.Sprintf(", %d", s.parent.SpanContext().SpanID))
+		buf.WriteString(fmt.Sprintf(", %d", moNode.NodeID))                                //node_d
+		buf.WriteString(fmt.Sprintf(`, "%s"`, moNode.NodeType.String()))                   // node_type
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.tracer.provider.resource.String()))) // resource
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Name.String())))                     // Name
+		buf.WriteString(fmt.Sprintf(`, "%s"`, nanoSec2DatetimeString(s.StartTimeNS)))      // start_time
+		buf.WriteString(fmt.Sprintf(`, "%s"`, nanoSec2DatetimeString(s.EndTimeNS)))        // end_time
+		buf.WriteString(fmt.Sprintf(", %d", s.Duration))                                   // Duration
+		buf.WriteString("),")
+	}
+	return string(buf.Next(buf.Len() - 1))
+}
+
+var logStackFormatter atomic.Value
+
+func genLogBatchSql(in []IBuffer2SqlItem, buf *bytes.Buffer) any {
+	buf.Reset()
+	if len(in) == 0 {
+		logutil.Debugf("genLogBatchSql empty")
+		return ""
+	}
+
+	buf.WriteString(fmt.Sprintf("insert into %s.%s ", statsDatabase, logInfoTbl))
+	buf.WriteString("(")
+	buf.WriteString("`span_id`")
+	buf.WriteString(", `statement_id`")
+	buf.WriteString(", `node_id`")
+	buf.WriteString(", `node_type`")
+	buf.WriteString(", `timestamp`")
+	buf.WriteString(", `name`")
+	buf.WriteString(", `level`")
+	buf.WriteString(", `caller`")
+	buf.WriteString(", `message`")
+	buf.WriteString(", `extra`")
+	buf.WriteString(") values ")
+
+	moNode := GetNodeResource()
+
+	for _, item := range in {
+		s, ok := item.(*MOLog)
+		if !ok {
+			panic("Not MOLog")
+		}
+		buf.WriteString("(")
+		buf.WriteString(fmt.Sprintf("%d", s.SpanId))
+		buf.WriteString(fmt.Sprintf(", %d", s.StatementId))
+		buf.WriteString(fmt.Sprintf(", %d", moNode.NodeID))                                                     // node_id
+		buf.WriteString(fmt.Sprintf(`, "%s"`, moNode.NodeType.String()))                                        // node_type
+		buf.WriteString(fmt.Sprintf(`, "%s"`, nanoSec2DatetimeString(s.Timestamp)))                             // timestamp
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Name)))                                                   // log level
+		buf.WriteString(fmt.Sprintf(`, "%s"`, s.Level.String()))                                                // log level
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(fmt.Sprintf(logStackFormatter.Load().(string), s.Caller)))) // caller
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Message)))                                                // message
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Extra)))                                                  // extra
+		buf.WriteString("),")
+	}
+	return string(buf.Next(buf.Len() - 1))
+}
+
+func genZapLogBatchSql(in []IBuffer2SqlItem, buf *bytes.Buffer) any {
+	buf.Reset()
+	if len(in) == 0 {
+		logutil.Debugf("genZapLogBatchSql empty")
+		return ""
+	}
+
+	buf.WriteString(fmt.Sprintf("insert into %s.%s ", statsDatabase, logInfoTbl))
+	buf.WriteString("(")
+	buf.WriteString("`span_id`")
+	buf.WriteString(", `statement_id`")
+	buf.WriteString(", `node_id`")
+	buf.WriteString(", `node_type`")
+	buf.WriteString(", `timestamp`")
+	buf.WriteString(", `name`")
+	buf.WriteString(", `level`")
+	buf.WriteString(", `caller`")
+	buf.WriteString(", `message`")
+	buf.WriteString(", `extra`")
+	buf.WriteString(") values ")
+
+	moNode := GetNodeResource()
+
+	for _, item := range in {
+		s, ok := item.(*MOZap)
+		if !ok {
+			panic("Not MOZap")
+		}
+
+		buf.WriteString("(")
+		buf.WriteString(fmt.Sprintf("%d", s.SpanContext.SpanID))
+		buf.WriteString(fmt.Sprintf(", %d", s.SpanContext.TraceID))
+		buf.WriteString(fmt.Sprintf(", %d", moNode.NodeID))                                      // node_id
+		buf.WriteString(fmt.Sprintf(`, "%s"`, moNode.NodeType.String()))                         // node_type
+		buf.WriteString(fmt.Sprintf(`, "%s"`, s.Timestamp.Format("2006-01-02 15:04:05.000000"))) // timestamp
+		buf.WriteString(fmt.Sprintf(`, "%s"`, s.LoggerName))                                     // name
+		buf.WriteString(fmt.Sprintf(`, "%s"`, s.Level.String()))                                 // log level
+		buf.WriteString(fmt.Sprintf(`, "%s"`, s.Caller))                                         // caller
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Message)))                                 // message
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Extra)))                                   // extra
+		buf.WriteString("),")
+	}
+	return string(buf.Next(buf.Len() - 1))
+}
+
+func genStatementBatchSql(in []IBuffer2SqlItem, buf *bytes.Buffer) any {
+	buf.Reset()
+	if len(in) == 0 {
+		logutil.Debugf("genStatementBatchSql empty")
+		return ""
+	}
+
+	buf.WriteString(fmt.Sprintf("insert into %s.%s ", statsDatabase, statementInfoTbl))
+	buf.WriteString("(")
+	buf.WriteString("`statement_id`")
+	buf.WriteString(", `transaction_id`")
+	buf.WriteString(", `session_id`")
+	buf.WriteString(", `account`")
+	buf.WriteString(", `user`")
+	buf.WriteString(", `host`")
+	buf.WriteString(", `database`")
+	buf.WriteString(", `statement`")
+	buf.WriteString(", `statement_tag`")
+	buf.WriteString(", `statement_fingerprint`")
+	buf.WriteString(", `node_id`")
+	buf.WriteString(", `node_type`")
+	buf.WriteString(", `request_at`")
+	buf.WriteString(", `status`")
+	buf.WriteString(", `exec_plan`")
+	buf.WriteString(") values ")
+
+	moNode := GetNodeResource()
+
+	for _, item := range in {
+		s, ok := item.(*StatementInfo)
+		if !ok {
+			panic("Not StatementInfo")
+		}
+		buf.WriteString("(")
+		buf.WriteString(fmt.Sprintf("%d", s.StatementID))
+		buf.WriteString(fmt.Sprintf(", %d", s.TransactionID))
+		buf.WriteString(fmt.Sprintf(", %d", s.SessionID))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Account)))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.User)))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Host)))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Database)))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Statement)))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.StatementFingerprint)))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.StatementTag)))
+		buf.WriteString(fmt.Sprintf(", %d", moNode.NodeID))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, moNode.NodeType.String()))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, nanoSec2DatetimeString(s.RequestAt)))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Status.String())))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.ExecPlan)))
+		buf.WriteString("),")
+	}
+	return string(buf.Next(buf.Len() - 1))
+}
+
+func genErrorBatchSql(in []IBuffer2SqlItem, buf *bytes.Buffer) any {
+	buf.Reset()
+	if len(in) == 0 {
+		logutil.Debugf("genErrorBatchSql empty")
+		return ""
+	}
+
+	buf.WriteString(fmt.Sprintf("insert into %s.%s ", statsDatabase, errorInfoTbl))
+	buf.WriteString("(")
+	buf.WriteString("`statement_id`")
+	buf.WriteString(", `span_id`")
+	buf.WriteString(", `node_id`")
+	buf.WriteString(", `node_type`")
+	buf.WriteString(", `err_code`")
+	buf.WriteString(", `stack`")
+	buf.WriteString(", `timestamp`")
+	buf.WriteString(") values ")
+
+	moNode := GetNodeResource()
+
+	var span Span
+	for _, item := range in {
+		s, ok := item.(*MOErrorHolder)
+		if !ok {
+			panic("Not MOErrorHolder")
+		}
+		if ct := errors.GetContextTracer(s.Error); ct != nil && ct.Context() != nil {
+			span = SpanFromContext(ct.Context())
+		} else {
+			span = SpanFromContext(DefaultContext())
+		}
+		buf.WriteString("(")
+		buf.WriteString(fmt.Sprintf("%d", span.SpanContext().TraceID))
+		buf.WriteString(fmt.Sprintf(", %d", span.SpanContext().SpanID))
+		buf.WriteString(fmt.Sprintf(", %d", moNode.NodeID))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, moNode.NodeType.String()))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(s.Error.Error())))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, quote(fmt.Sprintf(errorFormatter.Load().(string), s.Error))))
+		buf.WriteString(fmt.Sprintf(`, "%s"`, nanoSec2DatetimeString(s.Timestamp)))
+		buf.WriteString("),")
+	}
+	return string(buf.Next(buf.Len() - 1))
+}
+
+func filterTraceInsertSql(i IBuffer2SqlItem) {
+	s := i.(*StatementInfo)
+	for _, prefix := range insertSQLPrefix {
+		if strings.Contains(s.Statement, prefix) {
+			logutil.Debugf("find insert system sql, short it.")
+			s.Statement = prefix
+		}
+	}
+}
+
+var _ bp.ItemBuffer[bp.HasName, any] = &buffer2Sql{}
+
+// buffer2Sql catch item, like trace/log/error, buffer
+type buffer2Sql struct {
+	bp.Reminder   // see bufferWithReminder
+	buf           []IBuffer2SqlItem
+	mux           sync.Mutex
+	bufferType    string // see bufferWithType
+	size          int64  // default: 1 MB
+	sizeThreshold int64  // see bufferWithSizeThreshold
+
+	filterItemFunc filterItemFunc
+	genBatchFunc   genBatchFunc
+}
+
+type filterItemFunc func(IBuffer2SqlItem)
+type genBatchFunc func([]IBuffer2SqlItem, *bytes.Buffer) any
+
+var noopFilterItemFunc = func(IBuffer2SqlItem) {}
+var noopGenBatchSQL = genBatchFunc(func([]IBuffer2SqlItem, *bytes.Buffer) any { return "" })
+
+func newBuffer2Sql(opts ...buffer2SqlOption) *buffer2Sql {
+	b := &buffer2Sql{
+		Reminder:       bp.NewConstantClock(5 * time.Second),
+		buf:            make([]IBuffer2SqlItem, 0, 10240),
+		sizeThreshold:  1 * MB,
+		filterItemFunc: noopFilterItemFunc,
+		genBatchFunc:   noopGenBatchSQL,
+	}
+	for _, opt := range opts {
+		opt.apply(b)
+	}
+	logutil.Debugf("newBuffer2Sql, Reminder next: %v", b.Reminder.RemindNextAfter())
+	if b.genBatchFunc == nil || b.filterItemFunc == nil || b.Reminder == nil {
+		logutil.Debug("newBuffer2Sql meet nil elem")
+		return nil
+	}
+	return b
+}
+
+func (b *buffer2Sql) Add(i bp.HasName) {
+	b.mux.Lock()
+	defer b.mux.Unlock()
+	if item, ok := i.(IBuffer2SqlItem); !ok {
+		panic("not implement interface IBuffer2SqlItem")
+	} else {
+		b.filterItemFunc(item)
+		b.buf = append(b.buf, item)
+		atomic.AddInt64(&b.size, item.Size())
+	}
+}
+
+func (b *buffer2Sql) Reset() {
+	b.mux.Lock()
+	defer b.mux.Unlock()
+	b.buf = b.buf[0:0]
+	b.size = 0
+}
+
+func (b *buffer2Sql) IsEmpty() bool {
+	b.mux.Lock()
+	defer b.mux.Unlock()
+	return b.isEmpty()
+}
+
+func (b *buffer2Sql) isEmpty() bool {
+	return len(b.buf) == 0
+}
+
+func (b *buffer2Sql) ShouldFlush() bool {
+	return atomic.LoadInt64(&b.size) > b.sizeThreshold
+}
+
+func (b *buffer2Sql) Size() int64 {
+	return atomic.LoadInt64(&b.size)
+}
+
+func (b *buffer2Sql) GetBufferType() string {
+	return b.bufferType
+}
+
+func (b *buffer2Sql) GetBatch(buf *bytes.Buffer) any {
+	// fixme: CollectCycle
+	_, span := Start(DefaultContext(), "GenBatch")
+	defer span.End()
+	b.mux.Lock()
+	defer b.mux.Unlock()
+
+	if b.isEmpty() {
+		return ""
+	}
+	return b.genBatchFunc(b.buf, buf)
+}
+
+type buffer2SqlOption interface {
+	apply(*buffer2Sql)
+}
+
+type buffer2SqlOptionFunc func(*buffer2Sql)
+
+func (f buffer2SqlOptionFunc) apply(b *buffer2Sql) {
+	f(b)
+}
+
+func bufferWithReminder(reminder bp.Reminder) buffer2SqlOption {
+	return buffer2SqlOptionFunc(func(b *buffer2Sql) {
+		b.Reminder = reminder
+	})
+}
+
+func bufferWithType(name string) buffer2SqlOption {
+	return buffer2SqlOptionFunc(func(b *buffer2Sql) {
+		b.bufferType = name
+	})
+}
+
+func bufferWithSizeThreshold(size int64) buffer2SqlOption {
+	return buffer2SqlOptionFunc(func(b *buffer2Sql) {
+		b.sizeThreshold = size
+	})
+}
+
+func bufferWithFilterItemFunc(f filterItemFunc) buffer2SqlOption {
+	return buffer2SqlOptionFunc(func(b *buffer2Sql) {
+		b.filterItemFunc = f
+	})
+}
+
+func bufferWithGenBatchFunc(f genBatchFunc) buffer2SqlOption {
+	return buffer2SqlOptionFunc(func(b *buffer2Sql) {
+		b.genBatchFunc = f
+	})
+}
+
+// nanoSec2Datetime implement container/types/datetime.go Datetime.String2
+func nanoSec2Datetime(t util.TimeMono) types.Datetime {
+	sec, nsec := t/1e9, t%1e9
+	// calculate like Datetime::Now() in datetime.go, but year = 0053
+	return types.Datetime((sec << 20) + nsec/1000)
+}
+
+// nanoSec2Datetime implement container/types/datetime.go Datetime.String2
+func nanoSec2DatetimeString(t util.TimeMono) string {
+	sec, nsec := t/1e9, t%1e9
+	// fixme: format() should use db's time-zone
+	return time.Unix(int64(sec), int64(nsec)).Format("2006-01-02 15:04:05.000000")
+}
diff --git a/pkg/util/trace/buffer_pipe_sql_test.go b/pkg/util/trace/buffer_pipe_sql_test.go
new file mode 100644
index 000000000..e5d1ccdd5
--- /dev/null
+++ b/pkg/util/trace/buffer_pipe_sql_test.go
@@ -0,0 +1,1105 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"github.com/matrixorigin/matrixone/pkg/logutil"
+	"github.com/stretchr/testify/require"
+	"reflect"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/matrixorigin/matrixone/pkg/container/types"
+	"github.com/matrixorigin/matrixone/pkg/util"
+	"github.com/matrixorigin/matrixone/pkg/util/batchpipe"
+	"github.com/matrixorigin/matrixone/pkg/util/errors"
+	"github.com/matrixorigin/matrixone/pkg/util/internalExecutor"
+
+	"github.com/google/gops/agent"
+	"github.com/stretchr/testify/assert"
+	"go.uber.org/zap/zapcore"
+)
+
+var buf = new(bytes.Buffer)
+var err1 = errors.WithStack(errors.New("test1"))
+var err2 = errors.Wrapf(err1, "test2")
+var testBaseBuffer2SqlOption = []buffer2SqlOption{bufferWithSizeThreshold(1 * KB)}
+var nodeStateSpanIdStr string
+
+func noopReportLog(context.Context, zapcore.Level, int, string, ...any) {}
+func noopReportError(context.Context, error, int)                       {}
+
+func init() {
+	setup()
+}
+
+func setup() {
+	if _, err := Init(
+		context.Background(),
+		EnableTracer(true),
+		WithMOVersion("v0.test.0"),
+		WithNode(0, NodeTypeNode),
+		WithSQLExecutor(func() internalExecutor.InternalExecutor {
+			return nil
+		}),
+		DebugMode(true),
+	); err != nil {
+		panic(err)
+	}
+	logutil.SetLogReporter(&logutil.TraceReporter{ReportLog: noopReportLog, LevelSignal: SetLogLevel})
+	errors.SetErrorReporter(noopReportError)
+
+	sc := SpanFromContext(DefaultContext()).SpanContext()
+	nodeStateSpanIdStr = fmt.Sprintf("%d, %d", sc.TraceID, sc.SpanID)
+
+	if err := agent.Listen(agent.Options{}); err != nil {
+		_ = fmt.Errorf("listen gops agent failed: %s", err)
+		panic(err)
+	}
+	fmt.Println("Finish tests init.")
+}
+
+func Test_newBuffer2Sql_base(t *testing.T) {
+
+	buf := newBuffer2Sql()
+	byteBuf := new(bytes.Buffer)
+	assert.Equal(t, true, buf.IsEmpty())
+	buf.Add(&MOSpan{})
+	assert.Equal(t, false, buf.IsEmpty())
+	assert.Equal(t, false, buf.ShouldFlush())
+	assert.Equal(t, "", buf.GetBatch(byteBuf))
+	buf.Reset()
+	assert.Equal(t, true, buf.IsEmpty())
+}
+
+func TestNewSpanBufferPipeWorker(t *testing.T) {
+	type args struct {
+		opt []buffer2SqlOption
+	}
+	opts := testBaseBuffer2SqlOption[:]
+	tests := []struct {
+		name string
+		args args
+		want batchpipe.PipeImpl[batchpipe.HasName, any]
+	}{
+		{
+			name: "basic",
+			args: args{
+				opt: opts,
+			},
+			want: &batchSqlHandler{defaultOpts: opts},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := NewBufferPipe2SqlWorker(tt.args.opt...); !reflect.DeepEqual(got, tt.want) {
+				t.Errorf("NewBufferPipe2SqlWorker() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_batchSqlHandler_NewItemBuffer_Check_genBatchFunc(t1 *testing.T) {
+	type args struct {
+		opt  []buffer2SqlOption
+		name string
+	}
+	opts := testBaseBuffer2SqlOption[:]
+	tests := []struct {
+		name string
+		args args
+		want genBatchFunc
+	}{
+		{name: "span_type", args: args{opt: opts, name: MOSpanType}, want: genSpanBatchSql},
+		{name: "log_type", args: args{opt: opts, name: MOLogType}, want: genLogBatchSql},
+		{name: "statement_type", args: args{opt: opts, name: MOStatementType},
+			want: genStatementBatchSql},
+		{name: "error_type", args: args{opt: opts, name: MOErrorType},
+			want: genErrorBatchSql},
+	}
+	for _, tt := range tests {
+		t1.Run(tt.name, func(t1 *testing.T) {
+			t := batchSqlHandler{
+				defaultOpts: opts,
+			}
+			if got := t.NewItemBuffer(tt.args.name); reflect.ValueOf(got.(*buffer2Sql).genBatchFunc).Pointer() != reflect.ValueOf(tt.want).Pointer() {
+				t1.Errorf("NewItemBuffer()'s genBatchFunc = %v, want %v", got.(*buffer2Sql).genBatchFunc, tt.want)
+			}
+		})
+	}
+}
+
+func Test_batchSqlHandler_genErrorBatchSql(t1 *testing.T) {
+	time.Local = time.FixedZone("CST", 0) // set time-zone +0000
+	type args struct {
+		in  []IBuffer2SqlItem
+		buf *bytes.Buffer
+	}
+	buf := new(bytes.Buffer)
+	newCtx, span := Start(DefaultContext(), "Test_batchSqlHandler_genErrorBatchSql")
+	defer span.End()
+	sc := SpanFromContext(newCtx).SpanContext()
+	newStatementSpanIdStr := fmt.Sprintf("%d, %d", sc.TraceID, sc.SpanID)
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "single_error",
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOErrorHolder{Error: err1, Timestamp: uint64(0)},
+				},
+				buf: buf,
+			},
+			want: `insert into system.error_info (` +
+				"`statement_id`, `span_id`, `node_id`, `node_type`, `err_code`, `stack`, `timestamp`" +
+				`) values (` + nodeStateSpanIdStr + `, 0, "Node", "test1", "test1", "1970-01-01 00:00:00.000000")`,
+		},
+		{
+			name: "multi_error",
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOErrorHolder{Error: err1, Timestamp: uint64(0)},
+					&MOErrorHolder{Error: err2, Timestamp: uint64(time.Millisecond + time.Microsecond)},
+					&MOErrorHolder{Error: errors.WithContext(newCtx, err2),
+						Timestamp: uint64(time.Millisecond + time.Microsecond)},
+				},
+				buf: buf,
+			},
+			want: `insert into system.error_info (` +
+				"`statement_id`, `span_id`, `node_id`, `node_type`, `err_code`, `stack`, `timestamp`" +
+				`) values (` + nodeStateSpanIdStr + `, 0, "Node", "test1", "test1", "1970-01-01 00:00:00.000000")` +
+				`,(` + nodeStateSpanIdStr + `, 0, "Node", "test2: test1", "test2: test1", "1970-01-01 00:00:00.001001")` +
+				`,(` + newStatementSpanIdStr + `, 0, "Node", "test2: test1", "test2: test1", "1970-01-01 00:00:00.001001")`,
+		},
+	}
+	errorFormatter.Store("%v")
+	for _, tt := range tests {
+		t1.Run(tt.name, func(t1 *testing.T) {
+			if got := genErrorBatchSql(tt.args.in, tt.args.buf); got != tt.want {
+				t1.Errorf("genErrorBatchSql() = %v,\n want %v", got, tt.want)
+			} else {
+				t1.Logf("SQL: %s", got)
+			}
+		})
+	}
+}
+
+func Test_batchSqlHandler_genZapBatchSql(t1 *testing.T) {
+	time.Local = time.FixedZone("CST", 0) // set time-zone +0000
+	type args struct {
+		in  []IBuffer2SqlItem
+		buf *bytes.Buffer
+	}
+	buf := new(bytes.Buffer)
+	sc := SpanContextWithIDs(1, 1)
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "single",
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOZap{
+						Level:       zapcore.InfoLevel,
+						SpanContext: &sc,
+						Timestamp:   time.Unix(0, 0),
+						Caller:      "trace/buffer_pipe_sql_test.go:100",
+						Message:     "info message",
+						Extra:       "{}",
+					},
+				},
+				buf: buf,
+			},
+			want: `insert into system.log_info (` +
+				"`span_id`, `statement_id`, `node_id`, `node_type`, `timestamp`, `name`, `level`, `caller`, `message`, `extra`" +
+				`) values (1, 1, 0, "Node", "1970-01-01 00:00:00.000000", "", "info", "trace/buffer_pipe_sql_test.go:100", "info message", "{}")`,
+		},
+		{
+			name: "multi",
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOZap{
+						Level:       zapcore.InfoLevel,
+						SpanContext: &sc,
+						Timestamp:   time.Unix(0, 0),
+						Caller:      "trace/buffer_pipe_sql_test.go:100",
+						Message:     "info message",
+						Extra:       "{}",
+					},
+					&MOZap{
+						Level:       zapcore.DebugLevel,
+						SpanContext: &sc,
+						Timestamp:   time.Unix(0, int64(time.Microsecond+time.Millisecond)),
+						Caller:      "trace/buffer_pipe_sql_test.go:100",
+						Message:     "debug message",
+						Extra:       "{}",
+					},
+				},
+				buf: buf,
+			},
+			want: `insert into system.log_info (` +
+				"`span_id`, `statement_id`, `node_id`, `node_type`, `timestamp`, `name`, `level`, `caller`, `message`, `extra`" +
+				`) values (1, 1, 0, "Node", "1970-01-01 00:00:00.000000", "", "info", "trace/buffer_pipe_sql_test.go:100", "info message", "{}")` +
+				`,(1, 1, 0, "Node", "1970-01-01 00:00:00.001001", "", "debug", "trace/buffer_pipe_sql_test.go:100", "debug message", "{}")`,
+		},
+	}
+	for _, tt := range tests {
+		t1.Run(tt.name, func(t1 *testing.T) {
+			if got := genZapLogBatchSql(tt.args.in, tt.args.buf); got != tt.want {
+				t1.Errorf("genZapLogBatchSql() = %v,\n want %v", got, tt.want)
+			} else {
+				t1.Logf("SQL: %s", got)
+			}
+		})
+	}
+	time.Local = time.FixedZone("CST", 0) // set time-zone +0000
+}
+
+func Test_batchSqlHandler_genLogBatchSql(t1 *testing.T) {
+	time.Local = time.FixedZone("CST", 0) // set time-zone +0000
+	type args struct {
+		in  []IBuffer2SqlItem
+		buf *bytes.Buffer
+	}
+	buf := new(bytes.Buffer)
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "single",
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOLog{
+						StatementId: 1,
+						SpanId:      1,
+						Timestamp:   uint64(0),
+						Level:       zapcore.InfoLevel,
+						Caller:      util.Caller(0),
+						Message:     "info message",
+						Extra:       "{}",
+					},
+				},
+				buf: buf,
+			},
+			want: `insert into system.log_info (` +
+				"`span_id`, `statement_id`, `node_id`, `node_type`, `timestamp`, `name`, `level`, `caller`, `message`, `extra`" +
+				`) values (1, 1, 0, "Node", "1970-01-01 00:00:00.000000", "", "info", "Test_batchSqlHandler_genLogBatchSql", "info message", "{}")`,
+		},
+		{
+			name: "multi",
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOLog{
+						StatementId: 1,
+						SpanId:      1,
+						Timestamp:   uint64(0),
+						Level:       zapcore.InfoLevel,
+						Caller:      util.Caller(0),
+						Message:     "info message",
+						Extra:       "{}",
+					},
+					&MOLog{
+						StatementId: 1,
+						SpanId:      1,
+						Timestamp:   uint64(time.Millisecond + time.Microsecond),
+						Level:       zapcore.DebugLevel,
+						Caller:      util.Caller(0),
+						Message:     "debug message",
+						Extra:       "{}",
+					},
+				},
+				buf: buf,
+			},
+			want: `insert into system.log_info (` +
+				"`span_id`, `statement_id`, `node_id`, `node_type`, `timestamp`, `name`, `level`, `caller`, `message`, `extra`" +
+				`) values (1, 1, 0, "Node", "1970-01-01 00:00:00.000000", "", "info", "Test_batchSqlHandler_genLogBatchSql", "info message", "{}")` +
+				`,(1, 1, 0, "Node", "1970-01-01 00:00:00.001001", "", "debug", "Test_batchSqlHandler_genLogBatchSql", "debug message", "{}")`,
+		},
+	}
+	logStackFormatter.Store("%n")
+	t1.Logf("%%n : %n", tests[0].args.in[0].(*MOLog).Caller)
+	t1.Logf("%%s : %s", tests[0].args.in[0].(*MOLog).Caller)
+	t1.Logf("%%+s: %+s", tests[0].args.in[0].(*MOLog).Caller)
+	t1.Logf("%%v : %v", tests[0].args.in[0].(*MOLog).Caller)
+	t1.Logf("%%+v: %+v", tests[0].args.in[0].(*MOLog).Caller)
+	for _, tt := range tests {
+		t1.Run(tt.name, func(t1 *testing.T) {
+			if got := genLogBatchSql(tt.args.in, tt.args.buf); got != tt.want {
+				t1.Errorf("genLogBatchSql() = %v, want %v", got, tt.want)
+			} else {
+				t1.Logf("SQL: %s", got)
+			}
+		})
+	}
+}
+
+func Test_batchSqlHandler_genSpanBatchSql(t1 *testing.T) {
+	time.Local = time.FixedZone("CST", 0) // set time-zone +0000
+	type args struct {
+		in  []IBuffer2SqlItem
+		buf *bytes.Buffer
+	}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "single_span",
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOSpan{
+						SpanConfig:  SpanConfig{SpanContext: SpanContext{TraceID: 1, SpanID: 1}, parent: noopSpan{}},
+						Name:        *bytes.NewBuffer([]byte("span1")),
+						StartTimeNS: util.TimeNano(0),
+						EndTimeNS:   util.TimeNano(time.Microsecond),
+						Duration:    util.TimeNano(time.Microsecond),
+						tracer:      gTracer.(*MOTracer),
+					},
+				},
+				buf: buf,
+			},
+			want: `insert into system.span_info (` +
+				"`span_id`, `statement_id`, `parent_span_id`, `node_id`, `node_type`, `resource`, `name`, `start_time`, `end_time`, `duration`" +
+				`) values (1, 1, 0, 0, "Node", "{\"Node\":{\"node_id\":0,\"node_type\":0},\"version\":\"v0.test.0\"}", "span1", "1970-01-01 00:00:00.000000", "1970-01-01 00:00:00.000001", 1000)`,
+		},
+		{
+			name: "multi_span",
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOSpan{
+						SpanConfig:  SpanConfig{SpanContext: SpanContext{TraceID: 1, SpanID: 1}, parent: noopSpan{}},
+						Name:        *bytes.NewBuffer([]byte("span1")),
+						StartTimeNS: util.TimeNano(0),
+						EndTimeNS:   util.TimeNano(time.Microsecond),
+						Duration:    util.TimeNano(time.Microsecond),
+						tracer:      gTracer.(*MOTracer),
+					},
+					&MOSpan{
+						SpanConfig:  SpanConfig{SpanContext: SpanContext{TraceID: 1, SpanID: 2}, parent: noopSpan{}},
+						Name:        *bytes.NewBuffer([]byte("span2")),
+						StartTimeNS: util.TimeNano(time.Microsecond),
+						EndTimeNS:   util.TimeNano(time.Millisecond),
+						Duration:    util.TimeNano(time.Millisecond - time.Microsecond),
+						tracer:      gTracer.(*MOTracer),
+					},
+				},
+				buf: buf,
+			},
+			want: `insert into system.span_info (` +
+				"`span_id`, `statement_id`, `parent_span_id`, `node_id`, `node_type`, `resource`, `name`, `start_time`, `end_time`, `duration`" +
+				`) values (1, 1, 0, 0, "Node", "{\"Node\":{\"node_id\":0,\"node_type\":0},\"version\":\"v0.test.0\"}", "span1", "1970-01-01 00:00:00.000000", "1970-01-01 00:00:00.000001", 1000)` +
+				`,(2, 1, 0, 0, "Node", "{\"Node\":{\"node_id\":0,\"node_type\":0},\"version\":\"v0.test.0\"}", "span2", "1970-01-01 00:00:00.000001", "1970-01-01 00:00:00.001000", 999000)`,
+		},
+	}
+	for _, tt := range tests {
+		t1.Run(tt.name, func(t1 *testing.T) {
+			got := genSpanBatchSql(tt.args.in, tt.args.buf)
+			require.Equal(t1, tt.want, got)
+		})
+	}
+}
+
+func Test_batchSqlHandler_genStatementBatchSql(t1 *testing.T) {
+	type args struct {
+		in  []IBuffer2SqlItem
+		buf *bytes.Buffer
+	}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "single_statement",
+			args: args{
+				in: []IBuffer2SqlItem{
+					&StatementInfo{
+						StatementID:          1,
+						TransactionID:        1,
+						SessionID:            1,
+						Account:              "MO",
+						User:                 "moroot",
+						Database:             "system",
+						Statement:            "show tables",
+						StatementFingerprint: "show tables",
+						StatementTag:         "",
+						RequestAt:            util.TimeNano(0),
+						Status:               StatementStatusRunning,
+						ExecPlan:             "",
+					},
+				},
+				buf: buf,
+			},
+			want: `insert into system.statement_info (` +
+				"`statement_id`, `transaction_id`, `session_id`, `account`, `user`, `host`, `database`, `statement`, `statement_tag`, `statement_fingerprint`, `node_id`, `node_type`, `request_at`, `status`, `exec_plan`" +
+				`) values (1, 1, 1, "MO", "moroot", "", "system", "show tables", "show tables", "", 0, "Node", "1970-01-01 00:00:00.000000", "Running", "")`,
+		},
+		{
+			name: "multi_statement",
+			args: args{
+				in: []IBuffer2SqlItem{
+					&StatementInfo{
+						StatementID:          1,
+						TransactionID:        1,
+						SessionID:            1,
+						Account:              "MO",
+						User:                 "moroot",
+						Database:             "system",
+						Statement:            "show tables",
+						StatementFingerprint: "show tables",
+						StatementTag:         "",
+						RequestAt:            util.TimeNano(0),
+						Status:               StatementStatusRunning,
+						ExecPlan:             "",
+					},
+					&StatementInfo{
+						StatementID:          2,
+						TransactionID:        1,
+						SessionID:            1,
+						Account:              "MO",
+						User:                 "moroot",
+						Database:             "system",
+						Statement:            "show databases",
+						StatementFingerprint: "show databases",
+						StatementTag:         "dcl",
+						RequestAt:            util.TimeNano(time.Microsecond),
+						Status:               StatementStatusFailed,
+						ExecPlan:             "",
+					},
+				},
+				buf: buf,
+			},
+			want: `insert into system.statement_info (` +
+				"`statement_id`, `transaction_id`, `session_id`, `account`, `user`, `host`, `database`, `statement`, `statement_tag`, `statement_fingerprint`, `node_id`, `node_type`, `request_at`, `status`, `exec_plan`" +
+				`) values (1, 1, 1, "MO", "moroot", "", "system", "show tables", "show tables", "", 0, "Node", "1970-01-01 00:00:00.000000", "Running", "")` +
+				`,(2, 1, 1, "MO", "moroot", "", "system", "show databases", "show databases", "dcl", 0, "Node", "1970-01-01 00:00:00.000001", "Failed", "")`,
+		},
+	}
+	for _, tt := range tests {
+		t1.Run(tt.name, func(t1 *testing.T) {
+			if got := genStatementBatchSql(tt.args.in, tt.args.buf); got != tt.want {
+				t1.Errorf("genStatementBatchSql() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_buffer2Sql_GetBatch_AllType(t *testing.T) {
+	type fields struct {
+		Reminder      batchpipe.Reminder
+		sizeThreshold int64
+	}
+	defaultFields := fields{
+		Reminder:      batchpipe.NewConstantClock(15 * time.Second),
+		sizeThreshold: MB,
+	}
+	type args struct {
+		in  []IBuffer2SqlItem
+		buf *bytes.Buffer
+	}
+	tests := []struct {
+		name     string
+		fields   fields
+		args     args
+		wantFunc genBatchFunc
+		want     string
+	}{
+		{
+			name:   "single_error",
+			fields: defaultFields,
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOErrorHolder{Error: err1, Timestamp: uint64(0)},
+				},
+				buf: buf,
+			},
+			wantFunc: genErrorBatchSql,
+			want: `insert into system.error_info (` +
+				"`statement_id`, `span_id`, `node_id`, `node_type`, `err_code`, `stack`, `timestamp`" +
+				`) values (` + nodeStateSpanIdStr + `, 0, "Node", "test1", "test1", "1970-01-01 00:00:00.000000")`,
+		},
+		{
+			name:   "multi_error",
+			fields: defaultFields,
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOErrorHolder{Error: err1, Timestamp: uint64(0)},
+					&MOErrorHolder{Error: err2, Timestamp: uint64(time.Millisecond + time.Microsecond)},
+				},
+				buf: buf,
+			},
+			wantFunc: genErrorBatchSql,
+			want: `insert into system.error_info (` +
+				"`statement_id`, `span_id`, `node_id`, `node_type`, `err_code`, `stack`, `timestamp`" +
+				`) values (` + nodeStateSpanIdStr + `, 0, "Node", "test1", "test1", "1970-01-01 00:00:00.000000")` +
+				`,(` + nodeStateSpanIdStr + `, 0, "Node", "test2: test1", "test2: test1", "1970-01-01 00:00:00.001001")`,
+		},
+		{
+			name:   "single_log",
+			fields: defaultFields,
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOLog{
+						StatementId: 1,
+						SpanId:      1,
+						Timestamp:   uint64(0),
+						Level:       zapcore.InfoLevel,
+						Caller:      util.Caller(0),
+						Message:     "info message",
+						Extra:       "{}",
+					},
+				},
+				buf: buf,
+			},
+			wantFunc: genLogBatchSql,
+			want: `insert into system.log_info (` +
+				"`span_id`, `statement_id`, `node_id`, `node_type`, `timestamp`, `name`, `level`, `caller`, `message`, `extra`" +
+				`) values (1, 1, 0, "Node", "1970-01-01 00:00:00.000000", "", "info", "Test_buffer2Sql_GetBatch_AllType", "info message", "{}")`,
+		},
+		{
+			name:   "multi_log",
+			fields: defaultFields,
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOLog{
+						StatementId: 1,
+						SpanId:      1,
+						Timestamp:   uint64(0),
+						Level:       zapcore.InfoLevel,
+						Caller:      util.Caller(0),
+						Message:     "info message",
+						Extra:       "{}",
+					},
+					&MOLog{
+						StatementId: 1,
+						SpanId:      1,
+						Timestamp:   uint64(time.Millisecond + time.Microsecond),
+						Level:       zapcore.DebugLevel,
+						Caller:      util.Caller(0),
+						Message:     "debug message",
+						Extra:       "{}",
+					},
+				},
+				buf: buf,
+			},
+			wantFunc: genLogBatchSql,
+			want: `insert into system.log_info (` +
+				"`span_id`, `statement_id`, `node_id`, `node_type`, `timestamp`, `name`, `level`, `caller`, `message`, `extra`" +
+				`) values (1, 1, 0, "Node", "1970-01-01 00:00:00.000000", "", "info", "Test_buffer2Sql_GetBatch_AllType", "info message", "{}")` +
+				`,(1, 1, 0, "Node", "1970-01-01 00:00:00.001001", "", "debug", "Test_buffer2Sql_GetBatch_AllType", "debug message", "{}")`,
+		},
+		{
+			name:   "single_span",
+			fields: defaultFields,
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOSpan{
+						SpanConfig:  SpanConfig{SpanContext: SpanContext{TraceID: 1, SpanID: 1}, parent: noopSpan{}},
+						Name:        *bytes.NewBuffer([]byte("span1")),
+						StartTimeNS: util.TimeNano(0),
+						EndTimeNS:   util.TimeNano(time.Microsecond),
+						Duration:    util.TimeNano(time.Microsecond),
+						tracer:      gTracer.(*MOTracer),
+					},
+				},
+				buf: buf,
+			},
+			wantFunc: genSpanBatchSql,
+			want: `insert into system.span_info (` +
+				"`span_id`, `statement_id`, `parent_span_id`, `node_id`, `node_type`, `resource`, `name`, `start_time`, `end_time`, `duration`" +
+				`) values (1, 1, 0, 0, "Node", "{\"Node\":{\"node_id\":0,\"node_type\":0},\"version\":\"v0.test.0\"}", "span1", "1970-01-01 00:00:00.000000", "1970-01-01 00:00:00.000001", 1000)`,
+		},
+		{
+			name:   "multi_span",
+			fields: defaultFields,
+			args: args{
+				in: []IBuffer2SqlItem{
+					&MOSpan{
+						SpanConfig:  SpanConfig{SpanContext: SpanContext{TraceID: 1, SpanID: 1}, parent: noopSpan{}},
+						Name:        *bytes.NewBuffer([]byte("span1")),
+						StartTimeNS: util.TimeNano(0),
+						EndTimeNS:   util.TimeNano(time.Microsecond),
+						Duration:    util.TimeNano(time.Microsecond),
+						tracer:      gTracer.(*MOTracer),
+					},
+					&MOSpan{
+						SpanConfig:  SpanConfig{SpanContext: SpanContext{TraceID: 1, SpanID: 2}, parent: noopSpan{}},
+						Name:        *bytes.NewBuffer([]byte("span2")),
+						StartTimeNS: util.TimeNano(time.Microsecond),
+						EndTimeNS:   util.TimeNano(time.Millisecond),
+						Duration:    util.TimeNano(time.Millisecond - time.Microsecond),
+						tracer:      gTracer.(*MOTracer),
+					},
+				},
+				buf: buf,
+			},
+			wantFunc: genSpanBatchSql,
+			want: `insert into system.span_info (` +
+				"`span_id`, `statement_id`, `parent_span_id`, `node_id`, `node_type`, `resource`, `name`, `start_time`, `end_time`, `duration`" +
+				`) values (1, 1, 0, 0, "Node", "{\"Node\":{\"node_id\":0,\"node_type\":0},\"version\":\"v0.test.0\"}", "span1", "1970-01-01 00:00:00.000000", "1970-01-01 00:00:00.000001", 1000)` +
+				`,(2, 1, 0, 0, "Node", "{\"Node\":{\"node_id\":0,\"node_type\":0},\"version\":\"v0.test.0\"}", "span2", "1970-01-01 00:00:00.000001", "1970-01-01 00:00:00.001000", 999000)`,
+		},
+		{
+			name:   "single_statement",
+			fields: defaultFields,
+			args: args{
+				in: []IBuffer2SqlItem{
+					&StatementInfo{
+						StatementID:          1,
+						TransactionID:        1,
+						SessionID:            1,
+						Account:              "MO",
+						User:                 "moroot",
+						Database:             "system",
+						Statement:            "show tables",
+						StatementFingerprint: "show tables",
+						StatementTag:         "",
+						RequestAt:            util.TimeNano(0),
+						Status:               StatementStatusRunning,
+						ExecPlan:             "",
+					},
+				},
+				buf: buf,
+			},
+			wantFunc: genStatementBatchSql,
+			want: `insert into system.statement_info (` +
+				"`statement_id`, `transaction_id`, `session_id`, `account`, `user`, `host`, `database`, `statement`, `statement_tag`, `statement_fingerprint`, `node_id`, `node_type`, `request_at`, `status`, `exec_plan`" +
+				`) values (1, 1, 1, "MO", "moroot", "", "system", "show tables", "show tables", "", 0, "Node", "1970-01-01 00:00:00.000000", "Running", "")`,
+		},
+		{
+			name:   "multi_statement",
+			fields: defaultFields,
+			args: args{
+				in: []IBuffer2SqlItem{
+					&StatementInfo{
+						StatementID:          1,
+						TransactionID:        1,
+						SessionID:            1,
+						Account:              "MO",
+						User:                 "moroot",
+						Database:             "system",
+						Statement:            "show tables",
+						StatementFingerprint: "show tables",
+						StatementTag:         "",
+						RequestAt:            util.TimeNano(0),
+						Status:               StatementStatusRunning,
+						ExecPlan:             "",
+					},
+					&StatementInfo{
+						StatementID:          2,
+						TransactionID:        1,
+						SessionID:            1,
+						Account:              "MO",
+						User:                 "moroot",
+						Database:             "system",
+						Statement:            "show databases",
+						StatementFingerprint: "show databases",
+						StatementTag:         "dcl",
+						RequestAt:            util.TimeNano(time.Microsecond),
+						Status:               StatementStatusFailed,
+						ExecPlan:             "",
+					},
+				},
+				buf: buf,
+			},
+			wantFunc: genStatementBatchSql,
+			want: `insert into system.statement_info (` +
+				"`statement_id`, `transaction_id`, `session_id`, `account`, `user`, `host`, `database`, `statement`, `statement_tag`, `statement_fingerprint`, `node_id`, `node_type`, `request_at`, `status`, `exec_plan`" +
+				`) values (1, 1, 1, "MO", "moroot", "", "system", "show tables", "show tables", "", 0, "Node", "1970-01-01 00:00:00.000000", "Running", "")` +
+				`,(2, 1, 1, "MO", "moroot", "", "system", "show databases", "show databases", "dcl", 0, "Node", "1970-01-01 00:00:00.000001", "Failed", "")`,
+		},
+	}
+
+	errorFormatter.Store("%v")
+	logStackFormatter.Store("%n")
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			bpImpl := NewBufferPipe2SqlWorker(
+				bufferWithReminder(tt.fields.Reminder),
+				bufferWithSizeThreshold(tt.fields.sizeThreshold),
+			)
+			b := bpImpl.NewItemBuffer(tt.args.in[0].GetName())
+			t.Logf("buffer.type: %s", b.(*buffer2Sql).GetBufferType())
+			for _, i := range tt.args.in {
+				b.Add(i)
+			}
+			if got := b.(*buffer2Sql).genBatchFunc; reflect.ValueOf(got).Pointer() != reflect.ValueOf(tt.wantFunc).Pointer() {
+				t.Errorf("buffer2Sql's genBatchFunc = %v, want %v", got, tt.wantFunc)
+			}
+			if got := b.GetBatch(tt.args.buf); got.(string) != tt.want {
+				t.Errorf("GetBatch() = %v,\n want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_buffer2Sql_IsEmpty(t *testing.T) {
+	type fields struct {
+		Reminder      batchpipe.Reminder
+		buf           []IBuffer2SqlItem
+		sizeThreshold int64
+		batchFunc     genBatchFunc
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		want   bool
+	}{
+		{
+			name: "empty",
+			fields: fields{
+				Reminder:      batchpipe.NewConstantClock(time.Hour),
+				buf:           []IBuffer2SqlItem{},
+				sizeThreshold: GB,
+				batchFunc:     nil,
+			},
+			want: true,
+		},
+		{
+			name: "not_empty",
+			fields: fields{
+				Reminder:      batchpipe.NewConstantClock(time.Hour),
+				buf:           []IBuffer2SqlItem{&MOLog{}},
+				sizeThreshold: GB,
+				batchFunc:     nil,
+			},
+			want: false,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			b := &buffer2Sql{
+				Reminder:      tt.fields.Reminder,
+				buf:           tt.fields.buf,
+				sizeThreshold: tt.fields.sizeThreshold,
+				genBatchFunc:  tt.fields.batchFunc,
+			}
+			if got := b.IsEmpty(); got != tt.want {
+				t.Errorf("IsEmpty() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_buffer2Sql_Reset(t *testing.T) {
+	type fields struct {
+		Reminder      batchpipe.Reminder
+		buf           []IBuffer2SqlItem
+		sizeThreshold int64
+		batchFunc     genBatchFunc
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		want   bool
+	}{
+		{
+			name: "empty",
+			fields: fields{
+				Reminder:      batchpipe.NewConstantClock(time.Hour),
+				buf:           []IBuffer2SqlItem{},
+				sizeThreshold: GB,
+				batchFunc:     nil,
+			},
+			want: true,
+		},
+		{
+			name: "not_empty",
+			fields: fields{
+				Reminder:      batchpipe.NewConstantClock(time.Hour),
+				buf:           []IBuffer2SqlItem{&MOLog{}},
+				sizeThreshold: GB,
+				batchFunc:     nil,
+			},
+			want: true,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			b := &buffer2Sql{
+				Reminder:      tt.fields.Reminder,
+				buf:           tt.fields.buf,
+				sizeThreshold: tt.fields.sizeThreshold,
+				genBatchFunc:  tt.fields.batchFunc,
+			}
+			b.Reset()
+			if got := b.IsEmpty(); got != tt.want {
+				t.Errorf("IsEmpty() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_buffer2Sql_ShouldFlush(t *testing.T) {
+	type fields struct {
+		Reminder      batchpipe.Reminder
+		buf           []IBuffer2SqlItem
+		sizeThreshold int64
+		batchFunc     genBatchFunc
+	}
+	err1 := errors.WithStack(errors.New("test1"))
+	err2 := errors.Wrapf(err1, "test2")
+	tests := []struct {
+		name        string
+		fields      fields
+		isNilBuffer bool
+		want        bool
+	}{
+		{
+			name: "empty/nil",
+			fields: fields{
+				Reminder:      batchpipe.NewConstantClock(time.Hour),
+				buf:           []IBuffer2SqlItem{},
+				sizeThreshold: KB,
+				batchFunc:     nil,
+			},
+			isNilBuffer: true,
+			want:        false,
+		},
+		{
+			name: "empty/normal",
+			fields: fields{
+				Reminder:      batchpipe.NewConstantClock(time.Hour),
+				buf:           []IBuffer2SqlItem{},
+				sizeThreshold: KB,
+				batchFunc:     genErrorBatchSql,
+			},
+			isNilBuffer: false,
+			want:        false,
+		},
+		{
+			name: "not_empty",
+			fields: fields{
+				Reminder: batchpipe.NewConstantClock(time.Hour),
+				buf: []IBuffer2SqlItem{
+					&MOErrorHolder{Error: err1, Timestamp: uint64(0)},
+					&MOErrorHolder{Error: err2, Timestamp: uint64(time.Millisecond + time.Microsecond)},
+				},
+				sizeThreshold: 512 * B,
+				batchFunc:     genErrorBatchSql,
+			},
+			isNilBuffer: false,
+			want:        true,
+		},
+	}
+	errorFormatter.Store("%+v")
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			b := newBuffer2Sql(
+				bufferWithReminder(tt.fields.Reminder),
+				bufferWithSizeThreshold(tt.fields.sizeThreshold),
+				bufferWithGenBatchFunc(tt.fields.batchFunc),
+			)
+			t.Logf("ShouldFlush() get %p buffer", b)
+			if assert.NotEqual(t, nil, b, "ShouldFlush() get %p buffer", b) != tt.isNilBuffer || tt.isNilBuffer {
+				return
+			}
+			//assert.Equal(t, nil, b, "ShouldFlush() get nil Buffer")
+			for _, i := range tt.fields.buf {
+				b.Add(i)
+			}
+			if got := b.ShouldFlush(); got != tt.want {
+				t.Errorf("ShouldFlush() = %v, want %v, lenght: %d", got, tt.want, b.Size())
+			}
+		})
+	}
+}
+
+func Test_nanoSec2Datetime(t *testing.T) {
+	type args struct {
+		t util.TimeMono
+	}
+	tests := []struct {
+		name string
+		args args
+		want types.Datetime
+	}{
+		{
+			name: "1 ns",
+			args: args{t: util.TimeNano(1)},
+			want: types.Datetime(0),
+		},
+		{
+			name: "1 us",
+			args: args{t: util.TimeNano(time.Microsecond)},
+			want: types.Datetime(1),
+		},
+		{
+			name: "1 ms",
+			args: args{t: util.TimeNano(time.Millisecond)},
+			want: types.Datetime(1000),
+		},
+		{
+			name: "1 hour + 1ms",
+			args: args{t: util.TimeNano(time.Millisecond + time.Hour)},
+			want: types.Datetime(((time.Hour / time.Second) << 20) + 1000),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := nanoSec2Datetime(tt.args.t); got != tt.want {
+				t.Errorf("nanoSec2Datetime() = %d, want %d", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_quote(t *testing.T) {
+	type args struct {
+		value string
+	}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{name: "'", args: args{value: `'`}, want: "\\'"},
+		{name: `"`, args: args{value: `"`}, want: "\\\""},
+		{name: `\n`, args: args{value: `\n`}, want: "\\n"},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			if got := quote(tt.args.value); got != tt.want {
+				t.Errorf("quote() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+	var err1 = errors.WithStack(errors.New("test1"))
+	t.Logf("show quote(err): \"%s\"", quote(fmt.Sprintf("%+v", err1)))
+}
+
+func Test_withGenBatchFunc(t *testing.T) {
+	type args struct {
+		f genBatchFunc
+	}
+	tests := []struct {
+		name string
+		args args
+		want genBatchFunc
+	}{
+		{name: "genSpanBatchSql", args: args{f: genSpanBatchSql}, want: genSpanBatchSql},
+		{name: "genLogBatchSql", args: args{f: genLogBatchSql}, want: genLogBatchSql},
+		{name: "genStatementBatchSql", args: args{f: genStatementBatchSql}, want: genStatementBatchSql},
+		{name: "genErrorBatchSql", args: args{f: genErrorBatchSql}, want: genErrorBatchSql},
+	}
+	buf := &buffer2Sql{}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			bufferWithGenBatchFunc(tt.args.f).apply(buf)
+			got := buf.genBatchFunc
+			if reflect.ValueOf(got).Pointer() != reflect.ValueOf(tt.want).Pointer() {
+				t.Errorf("bufferWithGenBatchFunc() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_withSizeThreshold(t *testing.T) {
+	type args struct {
+		size int64
+	}
+	tests := []struct {
+		name string
+		args args
+		want int64
+	}{
+		{name: "1  B", args: args{size: B}, want: 1},
+		{name: "1 KB", args: args{size: KB}, want: 1 << 10},
+		{name: "1 MB", args: args{size: MB}, want: 1 << 20},
+		{name: "1 GB", args: args{size: GB}, want: 1 << 30},
+		{name: "1.001 GB", args: args{size: GB + MB}, want: 1<<30 + 1<<20},
+	}
+	buf := &buffer2Sql{}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			bufferWithSizeThreshold(tt.args.size).apply(buf)
+			if got := buf.sizeThreshold; got != tt.want {
+				t.Errorf("bufferWithSizeThreshold() = %v, want %v", got, tt.want)
+			}
+		})
+	}
+}
+
+func Test_batchSqlHandler_NewItemBatchHandler(t1 *testing.T) {
+	type fields struct {
+		defaultOpts []buffer2SqlOption
+		ch          chan string
+	}
+	type args struct {
+		batch string
+	}
+
+	tests := []struct {
+		name   string
+		fields fields
+		args   args
+		want   func(batch any)
+	}{
+		{
+			name: "nil",
+			fields: fields{
+				defaultOpts: []buffer2SqlOption{bufferWithSizeThreshold(GB)},
+				ch:          make(chan string, 10),
+			},
+			args: args{
+				batch: "batch",
+			},
+			want: func(batch any) {},
+		},
+	}
+	for _, tt := range tests {
+		t1.Run(tt.name, func(t1 *testing.T) {
+			gTracerProvider = newMOTracerProvider(WithSQLExecutor(newExecutorFactory(tt.fields.ch)))
+			t := batchSqlHandler{
+				defaultOpts: tt.fields.defaultOpts,
+			}
+
+			wg := sync.WaitGroup{}
+			startedC := make(chan struct{}, 1)
+			wg.Add(1)
+			go func() {
+				startedC <- struct{}{}
+			loop:
+				for {
+					batch, ok := <-tt.fields.ch
+					if ok {
+						require.Equal(t1, tt.args.batch, batch)
+					} else {
+						t1.Log("exec sql Done.")
+						break loop
+					}
+				}
+				wg.Done()
+			}()
+			<-startedC
+			got := t.NewItemBatchHandler()
+			got(tt.args.batch)
+			close(tt.fields.ch)
+			wg.Wait()
+		})
+	}
+}
diff --git a/pkg/util/trace/context.go b/pkg/util/trace/context.go
new file mode 100644
index 000000000..ec33b78f6
--- /dev/null
+++ b/pkg/util/trace/context.go
@@ -0,0 +1,39 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import "context"
+
+type traceContextKeyType int
+
+const currentSpanKey traceContextKeyType = iota
+
+func ContextWithSpan(parent context.Context, span Span) context.Context {
+	return context.WithValue(parent, currentSpanKey, span)
+}
+
+func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context {
+	return ContextWithSpan(parent, &nonRecordingSpan{sc: sc})
+}
+
+func SpanFromContext(ctx context.Context) Span {
+	if ctx == nil {
+		return noopSpan{}
+	}
+	if span, ok := ctx.Value(currentSpanKey).(Span); ok {
+		return span
+	}
+	return noopSpan{}
+}
diff --git a/pkg/util/trace/example/main.go b/pkg/util/trace/example/main.go
new file mode 100644
index 000000000..a82e75615
--- /dev/null
+++ b/pkg/util/trace/example/main.go
@@ -0,0 +1,237 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	goErrors "errors"
+	"github.com/lni/dragonboat/v4/logger"
+	"github.com/matrixorigin/matrixone/pkg/logutil"
+	"github.com/matrixorigin/matrixone/pkg/logutil/logutil2"
+	"github.com/matrixorigin/matrixone/pkg/util/errors"
+	ie "github.com/matrixorigin/matrixone/pkg/util/internalExecutor"
+	"github.com/matrixorigin/matrixone/pkg/util/trace"
+	"go.uber.org/zap"
+	"time"
+)
+
+var _ ie.InternalExecutor = &logOutputExecutor{}
+
+type logOutputExecutor struct{}
+
+func (l logOutputExecutor) Exec(s string, s2 ie.SessionOverrideOptions) error {
+	logutil.Info(s)
+	return nil
+}
+func (l logOutputExecutor) Query(s string, _ ie.SessionOverrideOptions) ie.InternalExecResult {
+	logutil.Info(s)
+	return nil
+}
+func (l logOutputExecutor) ApplySessionOverride(ie.SessionOverrideOptions) {}
+
+func bootstrap(ctx context.Context) (context.Context, error) {
+	logutil.SetupMOLogger(&logutil.LogConfig{Format: "console"})
+	// init trace/log/error framework & BatchProcessor
+	return trace.Init(ctx,
+		trace.WithMOVersion("v0.6.0"),
+		// nodeType like CN/DN/LogService; id maybe in config.
+		trace.WithNode(0, trace.NodeTypeNode),
+		// config[enableTrace], default: true
+		trace.EnableTracer(true),
+		// config[traceBatchProcessor], distributed node should use "FileService" in system_vars_config.toml
+		// "FileService" is not implement yet
+		trace.WithBatchProcessMode("InternalExecutor"),
+		// WithSQLExecutor for config[traceBatchProcessor] = "InternalExecutor"
+		trace.WithSQLExecutor(func() ie.InternalExecutor {
+			return &logOutputExecutor{}
+		}),
+	)
+
+}
+
+func traceUsage(ctx context.Context) {
+	// Case 1: start new span, which calculate duration of function traceUsage()
+	newCtx, span1 := trace.Start(ctx, "traceUsage")
+	// calling End() will calculate running duration(us)
+	defer span1.End()
+	logutil.Info("1st span with TraceId & SpanID", trace.SpanField(span1.SpanContext()))
+
+	// case 2: calling another function, please pass newCtx
+	traceUsageDepth_1(newCtx)
+	// case 4: calling again, will have new span
+	traceUsageDepth_1(newCtx)
+
+	// case 5: new span with same parent of span_1, you should use in-args ctx
+	// span2 will be brother with span1
+	newCtx2, span2 := trace.Start(ctx, "traceUsage_2")
+	traceUsageDepth_1_1(newCtx2)
+	span2.End()
+}
+
+func traceUsageDepth_1(ctx context.Context) {
+	// case 3: start new spanChild using ctx in-args,
+	// spanChild like a children of span1
+	depth1Ctx, spanChild := trace.Start(ctx, "traceUsage")
+	defer spanChild.End()
+	logutil.Info("2rd spanChild has same TraceId & new SpanID", trace.SpanField(spanChild.SpanContext()))
+	logutil.Info("ctx contain the spanChild info", trace.ContextField(depth1Ctx))
+	logutil.Infof("2rd spanChild has parent spanChild info, like parent span_id: %d", spanChild.ParentSpanContext().SpanID)
+}
+
+func traceUsageDepth_1_1(ctx context.Context) {
+	logutil.Info("traceUsageDepth_1_1 working")
+}
+
+func logUsage(ctx context.Context) {
+	// case 1: use logutil.Info/Infof/..., without context.Context
+	// it will store log into db, related to Node
+	logutil.Info("use without ctx")
+
+	// case 2: use logutil.Info/Infof/..., with context.Context
+	// it will store log into db, related to span, which save in ctx
+	// Suggestion: trace.ContextField should be 1st Field arg, which will help log to find span info faster.
+	logutil.Info("use with ctx", trace.ContextField(ctx), zap.Int("int", 1))
+
+	// case 3: use logutil2.Info/Infof/..., with contex.Context
+	// it will store log into db, related to span, which save in ctx
+	logutil2.Info(ctx, "use with ctx as 1st arg")
+	logutil2.Infof(ctx, "use with ctx as 1st arg, hello %s", "jack")
+
+	// case4: 3rd lib like dragonboat, could use logutil.DragonboatFactory, like
+	logger.SetLoggerFactory(logutil.DragonboatFactory)
+	plog := logger.GetLogger("dragonboat.logger")
+	plog.Infof("log with DragonboatFactory, now: %s", time.Now())
+}
+
+func outputError(msg string, err error) {
+	logutil.Infof("%s %%s: %s", msg, err)
+	logutil.Infof("%s %%+s: %+s", msg, err)
+	logutil.Infof("%s %%v: %v", msg, err)
+	logutil.Infof("%s %%+v: %+v", msg, err)
+	logutil.Infof("%s Error(): %v", msg, err.Error()) // just like raw error
+	logutil.Info("---")
+}
+
+func errorUsage(ctx context.Context) {
+	newCtx, span := trace.Start(ctx, "errorUsage")
+	defer span.End()
+
+	base := goErrors.New("base error")
+	logutil.Infof("base err: %v", base)
+
+	// case 1: WithMessage
+	outputError("WithMessage", errors.WithMessage(base, "new message"))
+	outputError("WithMessagef", errors.WithMessagef(base, "new %s", "world"))
+
+	// case 2: WithStack
+	outputError("WithStack", errors.WithStack(base))
+
+	// case 3: WithContext, store db & log
+	logutil.Info("WithContext with default action: 1) store in db; 2) gen log")
+	outputError("WithContext", errors.WithContext(newCtx, base))
+	outputError("Wrapf", errors.Wrapf(base, "extra message"))
+
+	// case 4: New, store db & log
+	logutil.Info("errors.New without ctx, with default action: 1) store in db; 2) gen log")
+	outputError("New", errors.New("new"))
+
+	// case 5: NewWithContext, store db & log
+	logutil.Info("errors.New with ctx, with default action: 1) store in db; 2) gen log")
+	outputError("New", errors.NewWithContext(newCtx, "new with ctx"))
+
+}
+
+type rpcRequest struct {
+	TraceId trace.TraceID
+	SpanId  trace.SpanID
+}
+type rpcResponse struct {
+	message string
+}
+type rpcServer struct {
+}
+
+func rpcUsage(ctx context.Context) {
+	traceId, spanId := trace.SpanFromContext(ctx).SpanContext().GetIDs()
+	req := &rpcRequest{
+		TraceId: traceId,
+		SpanId:  spanId,
+	}
+	_ = remoteCallFunction(ctx, req)
+}
+
+func remoteCallFunction(ctx context.Context, req *rpcRequest) error {
+	s := &rpcServer{}
+	resp, err := s.Function(ctx, req)
+	logutil2.Infof(ctx, "resp: %s", resp.message)
+	return err
+}
+
+func (s *rpcServer) Function(ctx context.Context, req *rpcRequest) (*rpcResponse, error) {
+	rootCtx := trace.ContextWithSpanContext(ctx, trace.SpanContextWithIDs(req.TraceId, req.SpanId))
+	newCtx, span := trace.Start(rootCtx, "Function")
+	defer span.End()
+
+	logutil2.Info(newCtx, "do Function")
+	return &rpcResponse{message: "success"}, nil
+}
+
+func mixUsage(ctx context.Context) {
+	newCtx, span := trace.Start(ctx, "mixUsage")
+	defer span.End()
+
+	logutil.Info("message", trace.ContextField(newCtx))
+
+	err := childFunc(newCtx)
+	trace.ReportError(newCtx, errors.Wrapf(err, "extra %s", "message"))
+
+}
+
+func childFunc(ctx context.Context) error {
+	err := goErrors.New("example: not found Database")
+	return errors.WithContext(ctx, err)
+}
+
+func shutdown(ctx context.Context) {
+	logutil2.Warn(ctx, "shutdown")
+	trace.Shutdown(ctx)
+}
+
+func main() {
+	ctx := context.Background()
+
+	// rootCtx should be root Context of Server running, you can get it also by trace.DefaultContext()
+	rootCtx, err := bootstrap(ctx)
+	if err != nil {
+		panic(err)
+	}
+	// show rootCtx in zap.logger format
+	logutil.Info("root ctx", trace.ContextField(rootCtx))
+	logutil.Info("default ctx", trace.ContextField(trace.DefaultContext()))
+
+	traceUsage(rootCtx)
+
+	logUsage(rootCtx)
+
+	errorUsage(rootCtx)
+
+	rpcUsage(rootCtx)
+
+	mixUsage(rootCtx)
+
+	logutil2.Warn(rootCtx, "wait 5s to see insert sql")
+
+	shutdown(rootCtx)
+}
diff --git a/pkg/util/trace/mo_trace.go b/pkg/util/trace/mo_trace.go
new file mode 100644
index 000000000..9e72a678f
--- /dev/null
+++ b/pkg/util/trace/mo_trace.go
@@ -0,0 +1,259 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"bytes"
+	"context"
+	"encoding/hex"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+	"sync"
+	"unsafe"
+
+	"github.com/matrixorigin/matrixone/pkg/util"
+	"github.com/matrixorigin/matrixone/pkg/util/batchpipe"
+)
+
+// TracerConfig is a group of options for a Tracer.
+type TracerConfig struct {
+	Name     string
+	reminder batchpipe.Reminder // WithReminder
+}
+
+// TracerOption applies an option to a TracerConfig.
+type TracerOption interface {
+	apply(*TracerConfig)
+}
+
+type tracerOptionFunc func(*TracerConfig)
+
+func (f tracerOptionFunc) apply(cfg *TracerConfig) {
+	f(cfg)
+}
+
+func WithReminder(r batchpipe.Reminder) tracerOptionFunc {
+	return tracerOptionFunc(func(cfg *TracerConfig) {
+		cfg.reminder = r
+	})
+}
+
+const (
+	// FlagsSampled is a bitmask with the sampled bit set. A SpanContext
+	// with the sampling bit set means the span is sampled.
+	FlagsSampled = TraceFlags(0x01)
+)
+
+// TraceFlags contains flags that can be set on a SpanContext.
+type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`.
+
+// IsSampled returns if the sampling bit is set in the TraceFlags.
+func (tf TraceFlags) IsSampled() bool {
+	return tf&FlagsSampled == FlagsSampled
+}
+
+// WithSampled sets the sampling bit in a new copy of the TraceFlags.
+func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive  // sampled is not a control flag.
+	if sampled {
+		return tf | FlagsSampled
+	}
+
+	return tf &^ FlagsSampled
+}
+
+// String returns the hex string representation form of TraceFlags.
+func (tf TraceFlags) String() string {
+	return hex.EncodeToString([]byte{byte(tf)}[:])
+}
+
+var _ Tracer = &MOTracer{}
+
+// MOTracer is the creator of Spans.
+type MOTracer struct {
+	TracerConfig
+	provider *MOTracerProvider
+}
+
+func (t *MOTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
+	span := newMOSpan()
+	span.init(name, opts...)
+	span.tracer = t
+
+	parent := SpanFromContext(ctx)
+
+	if span.NewRoot {
+		span.TraceID, span.SpanID = t.provider.idGenerator.NewIDs()
+	} else {
+		span.TraceID, span.SpanID = parent.SpanContext().TraceID, t.provider.idGenerator.NewSpanID()
+		span.parent = parent
+	}
+
+	return ContextWithSpan(ctx, span), span
+}
+
+var _ zapcore.ObjectMarshaler = (*SpanContext)(nil)
+
+const SpanFieldKey = "span"
+
+func SpanField(sc SpanContext) zap.Field {
+	return zap.Object(SpanFieldKey, &sc)
+}
+
+func IsSpanField(field zapcore.Field) bool {
+	return field.Key == SpanFieldKey
+}
+
+// SpanContext contains identifying trace information about a Span.
+type SpanContext struct {
+	TraceID TraceID `json:"trace_id"`
+	SpanID  SpanID  `json:"span_id"`
+}
+
+func (c SpanContext) GetIDs() (TraceID, SpanID) {
+	return c.TraceID, c.SpanID
+}
+
+func (c *SpanContext) Reset() {
+	c.TraceID = 0
+	c.SpanID = 0
+}
+
+func (c *SpanContext) IsEmpty() bool {
+	return c.TraceID == 0
+}
+
+func (c *SpanContext) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+	enc.AddUint64("TraceId", uint64(c.TraceID))
+	enc.AddUint64("SpanId", uint64(c.SpanID))
+	return nil
+}
+
+func SpanContextWithID(id TraceID) SpanContext {
+	return SpanContext{TraceID: id}
+}
+
+func SpanContextWithIDs(tid TraceID, sid SpanID) SpanContext {
+	return SpanContext{TraceID: tid, SpanID: sid}
+}
+
+// SpanConfig is a group of options for a Span.
+type SpanConfig struct {
+	SpanContext
+
+	// NewRoot identifies a Span as the root Span for a new trace. This is
+	// commonly used when an existing trace crosses trust boundaries and the
+	// remote parent span context should be ignored for security.
+	NewRoot bool `json:"NewRoot"` // see WithNewRoot
+	parent  Span `json:"-"`
+}
+
+// SpanStartOption applies an option to a SpanConfig. These options are applicable
+// only when the span is created.
+type SpanStartOption interface {
+	applySpanStart(*SpanConfig)
+}
+
+type SpanEndOption interface {
+	applySpanEnd(*SpanConfig)
+}
+
+// SpanOption applies an option to a SpanConfig.
+type SpanOption interface {
+	SpanStartOption
+	SpanEndOption
+}
+
+type spanOptionFunc func(*SpanConfig)
+
+func (f spanOptionFunc) applySpanEnd(cfg *SpanConfig) {
+	f(cfg)
+}
+
+func (f spanOptionFunc) applySpanStart(cfg *SpanConfig) {
+	f(cfg)
+}
+
+func WithNewRoot(newRoot bool) spanOptionFunc {
+	return spanOptionFunc(func(cfg *SpanConfig) {
+		cfg.NewRoot = newRoot
+	})
+}
+
+var _ Span = &MOSpan{}
+var _ IBuffer2SqlItem = &MOSpan{}
+
+type MOSpan struct {
+	SpanConfig
+	Name        bytes.Buffer  `json:"name"`
+	StartTimeNS util.TimeNano `json:"start_time"`
+	EndTimeNS   util.TimeNano `jons:"end_time"`
+	Duration    util.TimeNano `json:"duration"`
+
+	tracer *MOTracer `json:"-"`
+}
+
+var spanPool = &sync.Pool{New: func() any {
+	return &MOSpan{}
+}}
+
+func newMOSpan() *MOSpan {
+	return spanPool.Get().(*MOSpan)
+}
+
+func (s *MOSpan) init(name string, opts ...SpanOption) {
+	s.Name.WriteString(name)
+	s.StartTimeNS = util.NowNS()
+	for _, opt := range opts {
+		opt.applySpanStart(&s.SpanConfig)
+	}
+}
+
+func (s *MOSpan) Size() int64 {
+	return int64(unsafe.Sizeof(*s)) + int64(s.Name.Cap())
+}
+
+func (s *MOSpan) Free() {
+	s.Name.Reset()
+	s.Duration = 0
+	s.tracer = nil
+	s.StartTimeNS = 0
+	s.EndTimeNS = 0
+	spanPool.Put(s)
+}
+
+func (s *MOSpan) GetName() string {
+	return MOSpanType
+}
+
+func (s *MOSpan) End(options ...SpanEndOption) {
+	s.EndTimeNS = util.NowNS()
+	s.Duration = s.EndTimeNS - s.StartTimeNS
+
+	for _, sp := range s.tracer.provider.spanProcessors {
+		sp.OnEnd(s)
+	}
+}
+
+func (s *MOSpan) SpanContext() SpanContext {
+	return s.SpanConfig.SpanContext
+}
+
+func (s *MOSpan) ParentSpanContext() SpanContext {
+	return s.SpanConfig.parent.SpanContext()
+}
+
+func (s MOSpan) GetItemType() string {
+	return "MOSpan"
+}
diff --git a/pkg/util/trace/noop_trace.go b/pkg/util/trace/noop_trace.go
new file mode 100644
index 000000000..959a8bddf
--- /dev/null
+++ b/pkg/util/trace/noop_trace.go
@@ -0,0 +1,64 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"context"
+)
+
+var _ Tracer = &noopTracer{}
+var _ Span = &noopSpan{}
+
+// noopTracer is an implementation of Tracer that preforms no operations.
+type noopTracer struct{}
+
+// Start carries forward a non-recording Span, if one is present in the context, otherwise it
+// creates a no-op Span.
+func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanOption) (context.Context, Span) {
+	span := SpanFromContext(ctx)
+	if _, ok := span.(noopSpan); !ok {
+		// span is likely already a noopSpan, but let's be sure
+		span = noopSpan{}
+	}
+	return ContextWithSpan(ctx, span), span
+}
+
+// noopSpan is an implementation of Span that preforms no operations.
+type noopSpan struct{}
+
+var _ Span = noopSpan{}
+
+// SpanContext returns an empty span context.
+func (noopSpan) SpanContext() SpanContext { return SpanContext{} }
+
+func (noopSpan) ParentSpanContext() SpanContext { return SpanContext{} }
+
+// End does nothing.
+func (noopSpan) End(...SpanEndOption) {}
+
+// SetName does nothing.
+func (noopSpan) SetName(string) {}
+
+// TracerProvider returns a no-op TracerProvider.
+func (noopSpan) TracerProvider() TracerProvider { return gTracerProvider }
+
+// nonRecordingSpan keep SpanContext{TraceID, SpanID}
+type nonRecordingSpan struct {
+	noopSpan
+	sc SpanContext
+}
+
+func (s *nonRecordingSpan) SpanContext() SpanContext       { return s.sc }
+func (s *nonRecordingSpan) ParentSpanContext() SpanContext { return SpanContext{} }
diff --git a/pkg/util/trace/noop_trace_test.go b/pkg/util/trace/noop_trace_test.go
new file mode 100644
index 000000000..5acdf786f
--- /dev/null
+++ b/pkg/util/trace/noop_trace_test.go
@@ -0,0 +1,214 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"context"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func Test_nonRecordingSpan_ParentSpanContext(t *testing.T) {
+	type fields struct {
+		noopSpan noopSpan
+		sc       SpanContext
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		want   SpanContext
+	}{
+		{
+			name:   "normal",
+			fields: fields{noopSpan: noopSpan{}, sc: SpanContext{}},
+			want:   SpanContext{},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			s := &nonRecordingSpan{
+				noopSpan: tt.fields.noopSpan,
+				sc:       tt.fields.sc,
+			}
+			require.Equal(t, tt.want, s.ParentSpanContext())
+		})
+	}
+}
+
+func Test_nonRecordingSpan_SpanContext(t *testing.T) {
+	type fields struct {
+		noopSpan noopSpan
+		sc       SpanContext
+	}
+	tests := []struct {
+		name   string
+		fields fields
+		want   SpanContext
+	}{
+		{
+			name:   "normal",
+			fields: fields{noopSpan: noopSpan{}, sc: SpanContext{}},
+			want:   SpanContext{},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			s := &nonRecordingSpan{
+				noopSpan: tt.fields.noopSpan,
+				sc:       tt.fields.sc,
+			}
+			assert.Equalf(t, tt.want, s.SpanContext(), "SpanContext()")
+		})
+	}
+}
+
+func Test_noopSpan_End(t *testing.T) {
+	type args struct {
+		in0 []SpanEndOption
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		// TODO: Add test cases.
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			no := noopSpan{}
+			no.End(tt.args.in0...)
+		})
+	}
+}
+
+func Test_noopSpan_ParentSpanContext(t *testing.T) {
+	tests := []struct {
+		name string
+		want SpanContext
+	}{
+		{
+			name: "normal",
+			want: SpanContext{},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			no := noopSpan{}
+			assert.Equalf(t, tt.want, no.ParentSpanContext(), "ParentSpanContext()")
+		})
+	}
+}
+
+func Test_noopSpan_SetName(t *testing.T) {
+	type args struct {
+		in0 string
+	}
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "normal",
+			args: args{in0: "name"},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			no := noopSpan{}
+			no.SetName(tt.args.in0)
+		})
+	}
+}
+
+func Test_noopSpan_SpanContext(t *testing.T) {
+	tests := []struct {
+		name string
+		want SpanContext
+	}{
+		{name: "normal", want: SpanContext{}},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			no := noopSpan{}
+			assert.Equalf(t, tt.want, no.SpanContext(), "SpanContext()")
+		})
+	}
+}
+
+func Test_noopSpan_TracerProvider(t *testing.T) {
+	tests := []struct {
+		name string
+		want TracerProvider
+	}{
+		{
+			name: "normal",
+			want: gTracerProvider,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			no := noopSpan{}
+			assert.Equalf(t, tt.want, no.TracerProvider(), "TracerProvider()")
+		})
+	}
+}
+
+func Test_noopTracer_Start(t1 *testing.T) {
+	type args struct {
+		ctx   context.Context
+		name  string
+		in2   []SpanOption
+		endIn []SpanEndOption
+	}
+	tests := []struct {
+		name  string
+		args  args
+		want  context.Context
+		want1 Span
+	}{
+		{
+			name: "normal",
+			args: args{
+				ctx:   context.Background(),
+				name:  "noopTracer_Start",
+				in2:   []SpanOption{WithNewRoot(true)},
+				endIn: []SpanEndOption{},
+			},
+			want:  ContextWithSpan(context.Background(), noopSpan{}),
+			want1: noopSpan{},
+		},
+		{
+			name: "nonRecording",
+			args: args{
+				ctx:   ContextWithSpan(context.Background(), &nonRecordingSpan{}),
+				name:  "noopTracer_Start",
+				in2:   []SpanOption{WithNewRoot(true)},
+				endIn: []SpanEndOption{},
+			},
+			want:  ContextWithSpan(ContextWithSpan(context.Background(), &nonRecordingSpan{}), noopSpan{}),
+			want1: noopSpan{},
+		},
+	}
+	for _, tt := range tests {
+		t1.Run(tt.name, func(t1 *testing.T) {
+			t := noopTracer{}
+			got, got1 := t.Start(tt.args.ctx, tt.args.name, tt.args.in2...)
+			assert.Equalf(t1, tt.want, got, "Start(%v, %v, %v)", tt.args.ctx, tt.args.name, tt.args.in2)
+			assert.Equalf(t1, tt.want1, got1, "Start(%v, %v, %v)", tt.args.ctx, tt.args.name, tt.args.in2)
+			got1.End(tt.args.endIn...)
+		})
+	}
+}
diff --git a/pkg/util/trace/ptrace.go b/pkg/util/trace/ptrace.go
new file mode 100644
index 000000000..deec11ef1
--- /dev/null
+++ b/pkg/util/trace/ptrace.go
@@ -0,0 +1,180 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"github.com/matrixorigin/matrixone/pkg/util"
+	ie "github.com/matrixorigin/matrixone/pkg/util/internalExecutor"
+)
+
+/*
+import (
+	"sync/atomic"
+)
+
+var (
+	gTracer = defaultTracerValue()
+)
+
+type tracerProviderHolder struct {
+	tp TracerProvider
+}
+
+func defaultTracerValue() *atomic.Value {
+	v := &atomic.Value{}
+	v.Store(tracerProviderHolder{tp: &MOTracerProvider{}})
+	return v
+}
+
+func GetTracerProvider() TracerProvider {
+	return gTracer.Load().(tracerProviderHolder).tp
+}
+
+func SetTracerProvider(tp TracerProvider) {
+	gTracer.Store(tracerProviderHolder{tp: tp})
+}
+*/
+
+// tracerProviderConfig.
+type tracerProviderConfig struct {
+	// spanProcessors contains collection of SpanProcessors that are processing pipeline
+	// for spans in the trace signal.
+	// SpanProcessors registered with a TracerProvider and are called at the start
+	// and end of a Span's lifecycle, and are called in the order they are
+	// registered.
+	spanProcessors []SpanProcessor
+
+	enableTracer bool // see EnableTracer
+
+	// idGenerator is used to generate all Span and Trace IDs when needed.
+	idGenerator IDGenerator
+
+	// resource contains attributes representing an entity that produces telemetry.
+	resource *Resource // see WithMOVersion, WithNode,
+
+	// TODO: can check span's END
+	debugMode bool // see DebugMode
+
+	batchProcessMode string // see WithBatchProcessMode
+
+	sqlExecutor func() ie.InternalExecutor // see WithSQLExecutor
+}
+
+func (cfg tracerProviderConfig) getNodeResource() *MONodeResource {
+	if val, has := cfg.resource.Get("Node"); !has {
+		return &MONodeResource{}
+	} else {
+		return val.(*MONodeResource)
+	}
+}
+
+// TracerProviderOption configures a TracerProvider.
+type TracerProviderOption interface {
+	apply(*tracerProviderConfig)
+}
+
+type tracerProviderOptionFunc func(config *tracerProviderConfig)
+
+func (f tracerProviderOptionFunc) apply(config *tracerProviderConfig) {
+	f(config)
+}
+
+func WithMOVersion(v string) tracerProviderOptionFunc {
+	return func(config *tracerProviderConfig) {
+		config.resource.Put("version", v)
+	}
+}
+
+// WithNode give id as NodeId, t as NodeType
+func WithNode(id int64, t NodeType) tracerProviderOptionFunc {
+	return func(cfg *tracerProviderConfig) {
+		cfg.resource.Put("Node", &MONodeResource{
+			NodeID:   id,
+			NodeType: t,
+		})
+	}
+}
+
+func EnableTracer(enable bool) tracerProviderOptionFunc {
+	return func(cfg *tracerProviderConfig) {
+		cfg.enableTracer = enable
+	}
+}
+
+func DebugMode(debug bool) tracerProviderOptionFunc {
+	return func(cfg *tracerProviderConfig) {
+		cfg.debugMode = debug
+	}
+}
+
+func WithBatchProcessMode(mode string) tracerProviderOptionFunc {
+	return func(cfg *tracerProviderConfig) {
+		cfg.batchProcessMode = mode
+	}
+}
+
+func WithSQLExecutor(f func() ie.InternalExecutor) tracerProviderOptionFunc {
+	return func(cfg *tracerProviderConfig) {
+		cfg.sqlExecutor = f
+	}
+}
+
+var _ IDGenerator = &MOTraceIdGenerator{}
+
+type MOTraceIdGenerator struct{}
+
+func (M MOTraceIdGenerator) NewIDs() (TraceID, SpanID) {
+	return TraceID(util.Fastrand64()), SpanID(util.Fastrand64())
+}
+
+func (M MOTraceIdGenerator) NewSpanID() SpanID {
+	return SpanID(util.Fastrand64())
+}
+
+var _ TracerProvider = &MOTracerProvider{}
+
+type MOTracerProvider struct {
+	tracerProviderConfig
+}
+
+func newMOTracerProvider(opts ...TracerProviderOption) *MOTracerProvider {
+	pTracer := &MOTracerProvider{
+		tracerProviderConfig{
+			enableTracer:     false,
+			resource:         newResource(),
+			idGenerator:      &MOTraceIdGenerator{},
+			batchProcessMode: InternalExecutor,
+		},
+	}
+	for _, opt := range opts {
+		opt.apply(&pTracer.tracerProviderConfig)
+	}
+	return pTracer
+}
+
+func (p *MOTracerProvider) Tracer(instrumentationName string, opts ...TracerOption) Tracer {
+	if !p.enableTracer {
+		return noopTracer{}
+	}
+
+	tracer := &MOTracer{
+		TracerConfig: TracerConfig{Name: instrumentationName},
+		provider:     p,
+	}
+	for _, opt := range opts {
+		opt.apply(&tracer.TracerConfig)
+	}
+	return tracer
+}
diff --git a/pkg/util/trace/report_error.go b/pkg/util/trace/report_error.go
new file mode 100644
index 000000000..0784818ab
--- /dev/null
+++ b/pkg/util/trace/report_error.go
@@ -0,0 +1,64 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"context"
+	"fmt"
+	"github.com/matrixorigin/matrixone/pkg/logutil"
+	"go.uber.org/zap"
+	"unsafe"
+
+	"github.com/matrixorigin/matrixone/pkg/util"
+	"github.com/matrixorigin/matrixone/pkg/util/export"
+
+	"github.com/cockroachdb/errors/errbase"
+)
+
+var _ IBuffer2SqlItem = &MOErrorHolder{}
+
+type MOErrorHolder struct {
+	Error     error         `json:"error"`
+	Timestamp util.TimeNano `json:"timestamp"`
+}
+
+func (h MOErrorHolder) GetName() string {
+	return MOErrorType
+}
+
+func (h MOErrorHolder) Size() int64 {
+	return int64(32*8) + int64(unsafe.Sizeof(h))
+}
+func (h MOErrorHolder) Free() {}
+
+func (h *MOErrorHolder) Format(s fmt.State, verb rune) { errbase.FormatError(h.Error, s, verb) }
+
+// ReportError send to BatchProcessor
+func ReportError(ctx context.Context, err error) {
+	if ctx == nil {
+		ctx = DefaultContext()
+	}
+	e := &MOErrorHolder{Error: err, Timestamp: util.NowNS()}
+	export.GetGlobalBatchProcessor().Collect(ctx, e)
+}
+
+// HandleError api for pkg/util/errors as errorReporter
+func HandleError(ctx context.Context, err error, depth int) {
+	if ctx == nil {
+		ctx = DefaultContext()
+	}
+	ReportError(ctx, err)
+	logutil.GetGlobalLogger().WithOptions(zap.AddCallerSkip(depth+1)).Info("error", ContextField(ctx), zap.Error(err))
+}
diff --git a/pkg/util/trace/report_log.go b/pkg/util/trace/report_log.go
new file mode 100644
index 000000000..c5597592c
--- /dev/null
+++ b/pkg/util/trace/report_log.go
@@ -0,0 +1,163 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"context"
+	"fmt"
+	"go.uber.org/zap/buffer"
+	"sync/atomic"
+	"time"
+	"unsafe"
+
+	"github.com/matrixorigin/matrixone/pkg/util"
+	"github.com/matrixorigin/matrixone/pkg/util/batchpipe"
+	"github.com/matrixorigin/matrixone/pkg/util/export"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+func init() {
+	SetLogLevel(zapcore.DebugLevel)
+}
+
+var _ batchpipe.HasName = &MOLog{}
+var _ IBuffer2SqlItem = &MOLog{}
+
+type MOLog struct {
+	StatementId TraceID       `json:"statement_id"`
+	SpanId      SpanID        `json:"span_id"`
+	Timestamp   util.TimeNano `json:"timestamp"`
+	Level       zapcore.Level `json:"level"`
+	Caller      util.Frame    `json:"caller"` // like "util/trace/trace.go:666"
+	Name        string        `json:"name"`
+	Message     string        `json:"Message"`
+	Extra       string        `json:"extra"` // like json text
+}
+
+func newMOLog() *MOLog {
+	return &MOLog{}
+}
+
+func (MOLog) GetName() string {
+	return MOLogType
+}
+
+func (l MOLog) Size() int64 {
+	return int64(unsafe.Sizeof(l)) + int64(len(l.Message))
+}
+
+func (l MOLog) Free() {}
+
+var logLevelEnabler atomic.Value
+
+func SetLogLevel(l zapcore.LevelEnabler) {
+	logLevelEnabler.Store(l)
+}
+
+func ReportLog(ctx context.Context, level zapcore.Level, depth int, formatter string, args ...any) {
+	if !logLevelEnabler.Load().(zapcore.LevelEnabler).Enabled(level) {
+		return
+	}
+	if !gTracerProvider.enableTracer {
+		return
+	}
+	_, newSpan := Start(DefaultContext(), "ReportLog")
+	defer newSpan.End()
+
+	span := SpanFromContext(ctx)
+	sc := span.SpanContext()
+	if sc.IsEmpty() {
+		span = SpanFromContext(DefaultContext())
+		sc = span.SpanContext()
+	}
+	log := newMOLog()
+	log.StatementId = sc.TraceID
+	log.SpanId = sc.SpanID
+	log.Timestamp = util.NowNS()
+	log.Level = level
+	log.Caller = util.Caller(depth + 1)
+	log.Message = fmt.Sprintf(formatter, args...)
+	log.Extra = "{}"
+	export.GetGlobalBatchProcessor().Collect(DefaultContext(), log)
+}
+
+func ContextField(ctx context.Context) zap.Field {
+	return SpanField(SpanFromContext(ctx).SpanContext())
+}
+
+var _ batchpipe.HasName = &MOZap{}
+var _ IBuffer2SqlItem = &MOZap{}
+
+type MOZap struct {
+	Level       zapcore.Level `json:"Level"`
+	SpanContext *SpanContext  `json:"span"`
+	Timestamp   time.Time     `json:"timestamp"`
+	LoggerName  string
+	Caller      string `json:"caller"` // like "util/trace/trace.go:666"
+	Message     string `json:"message"`
+	Extra       string `json:"extra"` // like json text
+}
+
+func newMOZap() *MOZap {
+	return &MOZap{}
+}
+
+func (m MOZap) GetName() string {
+	return MOZapType
+}
+
+// Size 璁$畻杩戜技鍊�
+func (m MOZap) Size() int64 {
+	return int64(unsafe.Sizeof(m) + unsafe.Sizeof(len(m.LoggerName)+len(m.Caller)+len(m.Message)+len(m.Extra)))
+}
+
+func (m MOZap) Free() {}
+
+const moInternalFiledKeyNoopReport = "moInternalFiledKeyNoopReport"
+
+func NoReportFiled() zap.Field {
+	return zap.Bool(moInternalFiledKeyNoopReport, false)
+}
+
+func ReportZap(jsonEncoder zapcore.Encoder, entry zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
+	var needReport = true
+	log := newMOZap()
+	log.LoggerName = entry.LoggerName
+	log.Level = entry.Level
+	log.Message = entry.Message
+	log.Caller = entry.Caller.TrimmedPath()
+	log.Timestamp = entry.Time
+	log.SpanContext = DefaultSpanContext()
+	for _, v := range fields {
+		if IsSpanField(v) {
+			log.SpanContext = v.Interface.(*SpanContext)
+			break
+		}
+		if v.Type == zapcore.BoolType && v.Key == moInternalFiledKeyNoopReport {
+			needReport = false
+			break
+		}
+	}
+	if !needReport {
+		log.Free()
+		return jsonEncoder.EncodeEntry(entry, []zap.Field{})
+	}
+	buffer, err := jsonEncoder.EncodeEntry(entry, fields)
+	log.Extra = buffer.String()
+	export.GetGlobalBatchProcessor().Collect(DefaultContext(), log)
+	return buffer, err
+}
diff --git a/pkg/util/trace/report_log_test.go b/pkg/util/trace/report_log_test.go
new file mode 100644
index 000000000..c4388a8bc
--- /dev/null
+++ b/pkg/util/trace/report_log_test.go
@@ -0,0 +1,113 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"context"
+	"github.com/matrixorigin/matrixone/pkg/util"
+	"github.com/stretchr/testify/require"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+	"testing"
+	"time"
+)
+
+func TestReportLog(t *testing.T) {
+	type args struct {
+		ctx       context.Context
+		level     zapcore.Level
+		depth     int
+		formatter string
+		args      []any
+	}
+	tests := []struct {
+		name         string
+		enableTracer bool
+		args         args
+	}{
+		{
+			name:         "close",
+			enableTracer: false,
+			args: args{
+				ctx:       ContextWithSpanContext(context.Background(), SpanContextWithIDs(0, 0)),
+				level:     zapcore.InfoLevel,
+				depth:     3,
+				formatter: "info message",
+				args:      []any{},
+			},
+		},
+		{
+			name:         "collect",
+			enableTracer: true,
+			args: args{
+				ctx:       ContextWithSpanContext(context.Background(), SpanContextWithIDs(0, 0)),
+				level:     zapcore.InfoLevel,
+				depth:     3,
+				formatter: "info message",
+				args:      []any{},
+			},
+		},
+	}
+	for _, tt := range tests {
+		gTracerProvider.enableTracer = tt.enableTracer
+		t.Run(tt.name, func(t *testing.T) {
+			ReportLog(tt.args.ctx, tt.args.level, tt.args.depth, tt.args.formatter, tt.args.args...)
+		})
+	}
+}
+
+func TestReportZap(t *testing.T) {
+	type args struct {
+		jsonEncoder zapcore.Encoder
+		entry       zapcore.Entry
+		fields      []zapcore.Field
+	}
+	tests := []struct {
+		name string
+		args args
+		want string
+	}{
+		{
+			name: "normal",
+			args: args{
+				jsonEncoder: zapcore.NewJSONEncoder(
+					zapcore.EncoderConfig{
+						StacktraceKey:  "stacktrace",
+						LineEnding:     zapcore.DefaultLineEnding,
+						EncodeLevel:    zapcore.LowercaseLevelEncoder,
+						EncodeTime:     zapcore.EpochTimeEncoder,
+						EncodeDuration: zapcore.SecondsDurationEncoder,
+						EncodeCaller:   zapcore.ShortCallerEncoder,
+					}),
+				entry: zapcore.Entry{
+					Level:      zapcore.InfoLevel,
+					Time:       time.Unix(0, 0),
+					LoggerName: "test",
+					Message:    "info message",
+					Caller:     zapcore.NewEntryCaller(uintptr(util.Caller(3)), "file", 123, true),
+				},
+				fields: []zapcore.Field{zap.Int("key", 1)},
+			},
+			want: `{"key":1}` + "\n",
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got, err := ReportZap(tt.args.jsonEncoder, tt.args.entry, tt.args.fields)
+			require.Equal(t, nil, err)
+			require.Equal(t, tt.want, got.String())
+		})
+	}
+}
diff --git a/pkg/util/trace/report_statement.go b/pkg/util/trace/report_statement.go
new file mode 100644
index 000000000..907c94b6e
--- /dev/null
+++ b/pkg/util/trace/report_statement.go
@@ -0,0 +1,84 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"context"
+	"unsafe"
+
+	"github.com/matrixorigin/matrixone/pkg/util"
+	"github.com/matrixorigin/matrixone/pkg/util/export"
+)
+
+var _ IBuffer2SqlItem = &StatementInfo{}
+
+type StatementInfo struct {
+	StatementID          uint64              `json:"statement_id"`
+	TransactionID        uint64              `json:"transaction_id"`
+	SessionID            uint64              `jons:"session_id"`
+	Account              string              `json:"account"`
+	User                 string              `json:"user"`
+	Host                 string              `json:"host"`
+	Database             string              `json:"database"`
+	Statement            string              `json:"statement"`
+	StatementFingerprint string              `json:"statement_fingerprint"`
+	StatementTag         string              `json:"statement_tag"`
+	RequestAt            util.TimeNano       `json:"request_at"` // see WithRequestAt
+	Status               StatementInfoStatus `json:"status"`
+	ExecPlan             string              `json:"exec_plan"`
+}
+
+func (s StatementInfo) GetName() string {
+	return MOStatementType
+}
+
+func (s StatementInfo) Size() int64 {
+	return int64(unsafe.Sizeof(s)) + int64(
+		len(s.Account)+len(s.User)+len(s.Host)+
+			len(s.Database)+len(s.Statement)+len(s.StatementFingerprint)+len(s.StatementTag),
+	)
+}
+
+func (s StatementInfo) Free() {}
+
+type StatementInfoStatus int
+
+const (
+	StatementStatusRunning StatementInfoStatus = iota
+	StatementStatusSuccess
+	StatementStatusFailed
+)
+
+func (s StatementInfoStatus) String() string {
+	switch s {
+	case StatementStatusSuccess:
+		return "Success"
+	case StatementStatusRunning:
+		return "Running"
+	case StatementStatusFailed:
+		return "Failed"
+	}
+	return "Unknown"
+}
+
+type StatementOption interface {
+	Apply(*StatementInfo)
+}
+
+type StatementOptionFunc func(*StatementInfo)
+
+func ReportStatement(ctx context.Context, s *StatementInfo) error {
+	return export.GetGlobalBatchProcessor().Collect(ctx, s)
+}
diff --git a/pkg/util/trace/resource.go b/pkg/util/trace/resource.go
new file mode 100644
index 000000000..d6ef27893
--- /dev/null
+++ b/pkg/util/trace/resource.go
@@ -0,0 +1,86 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import "encoding/json"
+
+const (
+	MOStatementType = "MOStatementType"
+	MOSpanType      = "MOSpan"
+	MOLogType       = "MOLog"
+	MOZapType       = "MOZap"
+	MOErrorType     = "MOError"
+)
+
+const (
+	B int64 = 1 << (iota * 10)
+	KB
+	MB
+	GB
+)
+
+type Resource struct {
+	m map[string]any
+}
+
+func newResource() *Resource {
+	return &Resource{m: make(map[string]any)}
+
+}
+
+func (r *Resource) Put(key string, val any) {
+	r.m[key] = val
+}
+
+func (r *Resource) Get(key string) (any, bool) {
+	val, has := r.m[key]
+	return val, has
+}
+
+// String need to improve
+func (r *Resource) String() string {
+	buf, _ := json.Marshal(r.m)
+	return string(buf)
+
+}
+
+type NodeType int
+
+const (
+	NodeTypeNode NodeType = iota
+	NodeTypeCN
+	NodeTypeDN
+	NodeTypeLogService
+)
+
+func (t NodeType) String() string {
+	switch t {
+	case NodeTypeNode:
+		return "Node"
+	case NodeTypeCN:
+		return "CN"
+	case NodeTypeDN:
+		return "DN"
+	case NodeTypeLogService:
+		return "LogService"
+	default:
+		return "Unknown"
+	}
+}
+
+type MONodeResource struct {
+	NodeID   int64    `json:"node_id"`
+	NodeType NodeType `json:"node_type"`
+}
diff --git a/pkg/util/trace/schema.go b/pkg/util/trace/schema.go
new file mode 100644
index 000000000..c6def8093
--- /dev/null
+++ b/pkg/util/trace/schema.go
@@ -0,0 +1,124 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"fmt"
+	"github.com/matrixorigin/matrixone/pkg/logutil/logutil2"
+	"time"
+
+	ie "github.com/matrixorigin/matrixone/pkg/util/internalExecutor"
+)
+
+const (
+	systemDBConst    = "system"
+	statsDatabase    = systemDBConst
+	spanInfoTbl      = "span_info"
+	logInfoTbl       = "log_info"
+	statementInfoTbl = "statement_info"
+	errorInfoTbl     = "error_info"
+)
+
+const (
+	sqlCreateDBConst = `create database if not exists ` + statsDatabase
+
+	sqlCreateSpanInfoTable = `CREATE TABLE IF NOT EXISTS span_info(
+ span_id BIGINT UNSIGNED,
+ statement_id BIGINT UNSIGNED,
+ parent_span_id BIGINT UNSIGNED,
+ node_id BIGINT COMMENT "node uuid in MO",
+ node_type varchar(64) COMMENT "node type in MO, enum: DN, CN, LogService;",
+ name varchar(1024) COMMENT "span name, for example: step name of execution plan, function name in code, ...",
+ start_time datetime,
+ end_time datetime,
+ duration BIGINT COMMENT "execution time, unit: ns",
+ resource varchar(4096) COMMENT "json, static resource informations /*should by json type*/"
+)`
+	sqlCreateLogInfoTable = `CREATE TABLE IF NOT EXISTS log_info(
+ statement_id BIGINT UNSIGNED,
+ span_id BIGINT UNSIGNED,
+ node_id BIGINT COMMENT "node uuid in MO",
+ node_type varchar(64) COMMENT "node type in MO, enum: DN, CN, LogService;",
+ timestamp datetime COMMENT "log timestamp",
+ name varchar(1024) COMMENT "logger name",
+ level varchar(32) COMMENT "log level, enum: debug, info, warn, error, panic, fatal",
+ caller varchar(4096) ,
+ message varchar(4096) COMMENT "log message/*TODO: text with max length*/",
+ extra varchar(4096) COMMENT "log extra fields, json"
+)`
+	sqlCreateStatementInfoTable = `CREATE TABLE IF NOT EXISTS statement_info(
+ statement_id BIGINT UNSIGNED,
+ transaction_id BIGINT UNSIGNED,
+ session_id BIGINT UNSIGNED,
+ ` + "`account`" + ` varchar(1024) COMMENT 'account name',
+ user varchar(1024) COMMENT 'user name',
+ host varchar(1024) COMMENT 'user client ip',
+ ` + "`database`" + ` varchar(1024) COMMENT 'database name',
+ statement varchar(10240) COMMENT 'sql statement/*TODO: should by TEXT, or BLOB */',
+ statement_tag varchar(1024),
+ statement_fingerprint varchar(40960) COMMENT 'sql statement fingerprint/*TYPE should by TEXT, longer*/',
+ node_id BIGINT COMMENT "node uuid in MO, which node accept this request",
+ node_type varchar(64) COMMENT "node type in MO, enum: DN, CN, LogService;",
+ request_at datetime,
+ status varchar(1024) COMMENT 'sql statement running status, enum: Running, Success, Failed',
+ exec_plan varchar(4096) COMMENT "sql execution plan; /*TODO: 搴斾负JSON 绫诲瀷*/"
+)`
+	sqlCreateErrorInfoTable = `CREATE TABLE IF NOT EXISTS error_info(
+ statement_id BIGINT UNSIGNED,
+ span_id BIGINT UNSIGNED,
+ node_id BIGINT COMMENT "node uuid in MO, which node accept this request",
+ node_type varchar(64) COMMENT "node type in MO, enum: DN, CN, LogService;",
+ err_code varchar(1024),
+ stack varchar(4096),
+ timestamp datetime
+)`
+)
+
+// InitSchemaByInnerExecutor just for standalone version, which can access db itself by io.InternalExecutor on any Node.
+func InitSchemaByInnerExecutor(ieFactory func() ie.InternalExecutor) {
+	// fixme: need errors.Recover()
+	exec := ieFactory()
+	if exec == nil {
+		return
+	}
+	exec.ApplySessionOverride(ie.NewOptsBuilder().Database(statsDatabase).Internal(true).Finish())
+	mustExec := func(sql string) {
+		if err := exec.Exec(sql, ie.NewOptsBuilder().Finish()); err != nil {
+			panic(fmt.Sprintf("[Metric] init metric tables error: %v, sql: %s", err, sql))
+		}
+	}
+
+	mustExec(sqlCreateDBConst)
+	var createCost time.Duration
+	defer func() {
+		logutil2.Debugf(
+			DefaultContext(),
+			"[Metric] init metrics tables: create cost %d ms",
+			createCost.Milliseconds())
+	}()
+	instant := time.Now()
+
+	var initCollectors = []string{
+		sqlCreateStatementInfoTable,
+		sqlCreateSpanInfoTable,
+		sqlCreateLogInfoTable,
+		sqlCreateErrorInfoTable,
+	}
+	for _, sql := range initCollectors {
+		mustExec(sql)
+	}
+
+	createCost = time.Since(instant)
+}
diff --git a/pkg/util/trace/schema_test.go b/pkg/util/trace/schema_test.go
new file mode 100644
index 000000000..c0b89653e
--- /dev/null
+++ b/pkg/util/trace/schema_test.go
@@ -0,0 +1,96 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	ie "github.com/matrixorigin/matrixone/pkg/util/internalExecutor"
+	"sync"
+	"testing"
+)
+
+func Test_showSchema(t *testing.T) {
+
+	t.Logf("%s", sqlCreateStatementInfoTable)
+	t.Logf("%s", sqlCreateSpanInfoTable)
+	t.Logf("%s", sqlCreateLogInfoTable)
+	t.Logf("%s", sqlCreateErrorInfoTable)
+}
+
+var _ ie.InternalExecutor = &dummySqlExecutor{}
+
+type dummySqlExecutor struct {
+	opts ie.SessionOverrideOptions
+	ch   chan<- string
+}
+
+func (e *dummySqlExecutor) ApplySessionOverride(opts ie.SessionOverrideOptions) {}
+func (e *dummySqlExecutor) Query(s string, options ie.SessionOverrideOptions) ie.InternalExecResult {
+	return nil
+}
+func (e *dummySqlExecutor) Exec(sql string, opts ie.SessionOverrideOptions) error {
+	e.ch <- sql
+	return nil
+}
+
+// copy from /Users/jacksonxie/go/src/github.com/matrixorigin/matrixone/pkg/util/metric/metric_collector_test.go
+func newExecutorFactory(sqlch chan string) func() ie.InternalExecutor {
+	return func() ie.InternalExecutor {
+		return &dummySqlExecutor{
+			opts: ie.NewOptsBuilder().Finish(),
+			ch:   sqlch,
+		}
+	}
+}
+
+func TestInitSchemaByInnerExecutor(t *testing.T) {
+	type args struct {
+		ieFactory func() ie.InternalExecutor
+	}
+	c := make(chan string, 10)
+	tests := []struct {
+		name string
+		args args
+	}{
+		{
+			name: "fake",
+			args: args{newExecutorFactory(c)},
+		},
+	}
+	wg := sync.WaitGroup{}
+	startedC := make(chan struct{}, 1)
+	wg.Add(1)
+	go func() {
+		startedC <- struct{}{}
+	loop:
+		for {
+			sql, ok := <-c
+			if ok {
+				t.Logf("exec sql: %s", sql)
+			} else {
+				t.Log("exec sql Done.")
+				break loop
+			}
+		}
+		wg.Done()
+	}()
+	<-startedC
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			InitSchemaByInnerExecutor(tt.args.ieFactory)
+		})
+	}
+	close(c)
+	wg.Wait()
+}
diff --git a/pkg/util/trace/trace.go b/pkg/util/trace/trace.go
new file mode 100644
index 000000000..3b4b48dbd
--- /dev/null
+++ b/pkg/util/trace/trace.go
@@ -0,0 +1,136 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"context"
+	goErrors "errors"
+	"sync/atomic"
+	"time"
+	"unsafe"
+
+	"github.com/matrixorigin/matrixone/pkg/logutil"
+	"github.com/matrixorigin/matrixone/pkg/logutil/logutil2"
+	"github.com/matrixorigin/matrixone/pkg/util/batchpipe"
+	"github.com/matrixorigin/matrixone/pkg/util/errors"
+	"github.com/matrixorigin/matrixone/pkg/util/export"
+
+	"go.uber.org/zap"
+)
+
+const (
+	InternalExecutor = "InternalExecutor"
+	FileService      = "FileService"
+)
+
+type TraceID uint64
+type SpanID uint64
+
+var gTracerProvider *MOTracerProvider
+var gTracer Tracer
+var gTraceContext context.Context = context.Background()
+var gSpanContext atomic.Value
+
+func Init(ctx context.Context, opts ...TracerProviderOption) (context.Context, error) {
+
+	// init tool dependence
+	logutil.SetLogReporter(&logutil.TraceReporter{ReportLog: ReportLog, ReportZap: ReportZap, LevelSignal: SetLogLevel, ContextField: ContextField})
+	logutil.SpanFieldKey.Store(SpanFieldKey)
+	errors.SetErrorReporter(HandleError)
+	export.SetDefaultContextFunc(DefaultContext)
+
+	// init TraceProvider
+	gTracerProvider = newMOTracerProvider(opts...)
+	config := &gTracerProvider.tracerProviderConfig
+
+	// init Tracer
+	gTracer = gTracerProvider.Tracer("MatrixOrigin",
+		WithReminder(batchpipe.NewConstantClock(5*time.Second)),
+	)
+
+	// init Node DefaultContext
+	sc := SpanContextWithIDs(TraceID(0), SpanID(config.getNodeResource().NodeID))
+	gSpanContext.Store(&sc)
+	gTraceContext = ContextWithSpanContext(ctx, sc)
+
+	initExport(config)
+
+	errors.WithContext(DefaultContext(), goErrors.New("finish trace init"))
+
+	return gTraceContext, nil
+}
+
+func initExport(config *tracerProviderConfig) {
+	if !config.enableTracer {
+		logutil2.Infof(context.TODO(), "initExport pass.")
+		return
+	}
+	var p export.BatchProcessor
+	// init BatchProcess for trace/log/error
+	switch {
+	case config.batchProcessMode == InternalExecutor:
+		// init schema
+		InitSchemaByInnerExecutor(config.sqlExecutor)
+		// register buffer pipe implements
+		export.Register(&MOSpan{}, NewBufferPipe2SqlWorker(
+			bufferWithSizeThreshold(MB),
+		))
+		export.Register(&MOLog{}, NewBufferPipe2SqlWorker())
+		export.Register(&MOZap{}, NewBufferPipe2SqlWorker())
+		export.Register(&StatementInfo{}, NewBufferPipe2SqlWorker())
+		export.Register(&MOErrorHolder{}, NewBufferPipe2SqlWorker())
+		logutil2.Infof(context.TODO(), "init GlobalBatchProcessor")
+		// init BatchProcessor for standalone mode.
+		p = export.NewMOCollector()
+		export.SetGlobalBatchProcessor(p)
+		p.Start()
+	case config.batchProcessMode == FileService:
+		// TODO: will write csv file.
+	}
+	if p != nil {
+		config.spanProcessors = append(config.spanProcessors, NewBatchSpanProcessor(p))
+		logutil2.Infof(context.TODO(), "trace span processor")
+		logutil2.Info(context.TODO(), "[Debug]", zap.String("operation", "value1"), zap.String("operation_1", "value2"))
+	}
+}
+
+func Shutdown(ctx context.Context) error {
+	if !gTracerProvider.enableTracer {
+		return nil
+	}
+
+	gTracerProvider.enableTracer = false
+	tracer := noopTracer{}
+	_ = atomic.SwapPointer((*unsafe.Pointer)(unsafe.Pointer(gTracer.(*MOTracer))), unsafe.Pointer(&tracer))
+
+	// fixme: need stop timeout
+	return export.GetGlobalBatchProcessor().Stop(true)
+}
+
+func Start(ctx context.Context, spanName string, opts ...SpanOption) (context.Context, Span) {
+	return gTracer.Start(ctx, spanName, opts...)
+}
+
+func DefaultContext() context.Context {
+	return gTraceContext
+}
+
+func DefaultSpanContext() *SpanContext {
+	return gSpanContext.Load().(*SpanContext)
+}
+
+func GetNodeResource() *MONodeResource {
+	return gTracerProvider.getNodeResource()
+}
diff --git a/pkg/util/trace/trace_test.go b/pkg/util/trace/trace_test.go
new file mode 100644
index 000000000..5d66176d9
--- /dev/null
+++ b/pkg/util/trace/trace_test.go
@@ -0,0 +1,120 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"context"
+	"fmt"
+	"github.com/matrixorigin/matrixone/pkg/config"
+	"github.com/matrixorigin/matrixone/pkg/util/export"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"reflect"
+	"testing"
+)
+
+func Test_initExport(t *testing.T) {
+	type args struct {
+		config *tracerProviderConfig
+	}
+	ch := make(chan string, 10)
+	tests := []struct {
+		name  string
+		args  args
+		empty bool
+	}{
+		{
+			name: "disable",
+			args: args{
+				config: &tracerProviderConfig{enableTracer: false},
+			},
+			empty: true,
+		},
+		{
+			name: "enable_InternalExecutor",
+			args: args{
+				config: &tracerProviderConfig{
+					enableTracer: true, batchProcessMode: InternalExecutor, sqlExecutor: newExecutorFactory(ch),
+				}},
+			empty: false,
+		},
+	}
+	sysVar := &config.GlobalSystemVariables
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			sysVar.SetEnableTrace(tt.args.config.enableTracer)
+			export.ResetGlobalBatchProcessor()
+			initExport(tt.args.config)
+			if tt.empty {
+				require.Equal(t, "*export.noopBatchProcessor", fmt.Sprintf("%v", reflect.ValueOf(export.GetGlobalBatchProcessor()).Type()))
+			} else {
+				require.Equal(t, "*export.MOCollector", fmt.Sprintf("%v", reflect.ValueOf(export.GetGlobalBatchProcessor()).Type()))
+			}
+			require.Equal(t, Shutdown(context.Background()), nil)
+		})
+	}
+}
+
+func TestDefaultContext(t *testing.T) {
+	tests := []struct {
+		name string
+		want context.Context
+	}{
+		{
+			name: "normal",
+			want: ContextWithSpanContext(context.Background(), SpanContextWithIDs(0, 0)),
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			assert.Equalf(t, tt.want, DefaultContext(), "DefaultContext()")
+		})
+	}
+}
+
+func TestDefaultSpanContext(t *testing.T) {
+	sc := SpanContextWithIDs(0, 0)
+	tests := []struct {
+		name string
+		want *SpanContext
+	}{
+		{
+			name: "normal",
+			want: &sc,
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			assert.Equalf(t, tt.want, DefaultSpanContext(), "DefaultSpanContext()")
+		})
+	}
+}
+
+func TestGetNodeResource(t *testing.T) {
+	tests := []struct {
+		name string
+		want *MONodeResource
+	}{
+		{
+			name: "normal",
+			want: &MONodeResource{0, NodeTypeNode},
+		},
+	}
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			assert.Equalf(t, tt.want, GetNodeResource(), "GetNodeResource()")
+		})
+	}
+}
diff --git a/pkg/util/trace/type.go b/pkg/util/trace/type.go
new file mode 100644
index 000000000..e73d10fcd
--- /dev/null
+++ b/pkg/util/trace/type.go
@@ -0,0 +1,51 @@
+// Copyright 2022 Matrix Origin
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import "context"
+
+type TracerProvider interface {
+	Tracer(instrumentationName string, opts ...TracerOption) Tracer
+}
+
+type Tracer interface {
+	// Start creates a span and a context.Context containing the newly-created span.
+	Start(ctx context.Context, spanName string, opts ...SpanOption) (context.Context, Span)
+}
+
+type Span interface {
+	// End completes the Span. The Span is considered complete and ready to be
+	// delivered through the rest of the telemetry pipeline after this method
+	// is called. Therefore, updates to the Span are not allowed after this
+	// method has been called.
+	End(options ...SpanEndOption)
+
+	// SpanContext returns the SpanContext of the Span. The returned SpanContext
+	// is usable even after the End method has been called for the Span.
+	SpanContext() SpanContext
+
+	ParentSpanContext() SpanContext
+}
+
+type SpanProcessor interface {
+	OnStart(ctx context.Context, s Span)
+	OnEnd(s Span)
+	Shutdown(ctx context.Context) error
+}
+
+type IDGenerator interface {
+	NewIDs() (TraceID, SpanID)
+	NewSpanID() SpanID
+}
diff --git a/test/result/database/create_database.result b/test/result/database/create_database.result
index 420479a58..515e0a262 100755
--- a/test/result/database/create_database.result
+++ b/test/result/database/create_database.result
@@ -12,6 +12,7 @@ show databases;
 Database
 mo_catalog
 information_schema
+system
 system_metrics
 create_database
 test01
@@ -33,7 +34,8 @@ show schemas;
 Database
 mo_catalog
 information_schema
+system
 system_metrics
 create_database
 db1
-drop database if exists db1;
\ No newline at end of file
+drop database if exists db1;
diff --git a/test/result/database/drop_database.result b/test/result/database/drop_database.result
index 8958aad59..c9890abfa 100755
--- a/test/result/database/drop_database.result
+++ b/test/result/database/drop_database.result
@@ -6,4 +6,5 @@ show databases;
 Database
 mo_catalog
 information_schema
+system
 system_metrics
diff --git a/test/result/database/new_database.result b/test/result/database/new_database.result
index b46fff5e5..e38bc8bce 100644
--- a/test/result/database/new_database.result
+++ b/test/result/database/new_database.result
@@ -10,6 +10,7 @@ show databases;
 Database
 mo_catalog
 information_schema
+system
 system_metrics
 new_database
 test01
@@ -29,6 +30,7 @@ show databases;
 Database
 mo_catalog
 information_schema
+system
 system_metrics
 new_database
 ABCDE
-- 
GitLab