Merge branch 'dev_performance' of https://gitee.com/he3db/he3pg

This commit is contained in:
peitingwei 2023-07-05 15:05:12 +08:00
commit e8741f6214
155 changed files with 19590 additions and 6220 deletions

View File

@ -178,7 +178,7 @@ pg_prewarm(PG_FUNCTION_ARGS)
for (block = first_block; block <= last_block; ++block)
{
CHECK_FOR_INTERRUPTS();
smgrread(rel->rd_smgr, forkNumber, block, blockbuffer.data, GetXLogWriteRecPtr());
smgrread(rel->rd_smgr, forkNumber, block, blockbuffer.data);
++blocks_done;
}
}

View File

@ -22,6 +22,7 @@
#include "storage/smgr.h"
#include "utils/rel.h"
#include "utils/snapmgr.h"
#include "postmaster/bgwriter.h"
PG_MODULE_MAGIC;
@ -385,6 +386,7 @@ pg_truncate_visibility_map(PG_FUNCTION_ARGS)
Relation rel;
ForkNumber fork;
BlockNumber block;
XLogRecPtr lsn = InvalidXLogRecPtr;
rel = relation_open(relid, AccessExclusiveLock);
@ -394,13 +396,6 @@ pg_truncate_visibility_map(PG_FUNCTION_ARGS)
RelationOpenSmgr(rel);
rel->rd_smgr->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = InvalidBlockNumber;
block = visibilitymap_prepare_truncate(rel, 0);
if (BlockNumberIsValid(block))
{
fork = VISIBILITYMAP_FORKNUM;
smgrtruncate(rel->rd_smgr, &fork, 1, &block);
}
if (RelationNeedsWAL(rel))
{
xl_smgr_truncate xlrec;
@ -411,8 +406,36 @@ pg_truncate_visibility_map(PG_FUNCTION_ARGS)
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, sizeof(xlrec));
lsn = XLogInsert(RM_SMGR_ID, XLOG_SMGR_TRUNCATE | XLR_SPECIAL_REL_UPDATE);
XLogFlush(lsn);
if (IsBootstrapProcessingMode() != true && InitdbSingle != true) {
RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT
| CHECKPOINT_FLUSH_ALL);
}
}
XLogInsert(RM_SMGR_ID, XLOG_SMGR_TRUNCATE | XLR_SPECIAL_REL_UPDATE);
block = visibilitymap_prepare_truncate(rel, 0);
if (BlockNumberIsValid(block))
{
fork = VISIBILITYMAP_FORKNUM;
/*
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
* just drop them without bothering to write the contents.
*/
DropRelFileNodeBuffers(rel->rd_smgr, &fork, 1, &block);
/*
* Send a shared-inval message to force other backends to close any smgr
* references they may have for this rel. This is useful because they
* might have open file pointers to segments that got removed, and/or
* smgr_targblock variables pointing past the new rel end. (The inval
* message will come back to our backend, too, causing a
* probably-unnecessary local smgr flush. But we don't expect that this
* is a performance-critical path.) As in the unlink code, we want to be
* sure the message is sent before we start changing things on-disk.
*/
CacheInvalidateSmgr(rel->rd_smgr->smgr_rnode);
smgrtruncatelsn(rel->rd_smgr, &fork, 1, &block, lsn);
}
/*

View File

@ -0,0 +1,107 @@
# 部署
## 1 启动原生PG作为主
### 1.1 PG14.2源码编译安装
```shell
./configure --enable-depend --enable-cassert --enable-debug CFLAGS="-ggdb -O0" --prefix=/home/postgres/psql14_pg
make && make install
```
其中configure选项参考[CONFIGURE-OPTIONS](https://www.postgresql.org/docs/current/install-procedure.html#CONFIGURE-OPTIONS)
### 1.2 初始化数据
```shell
cd /home/postgres/psql14_pg
./bin/initdb -D /home/postgres/pgdata_14
```
### 1.3 修改配置文件
```shell
vim /home/postgres/pgdata_14/postgresql.conf
port=15432
wal_level = replica
wal_recycle=off
```
修改访问控制文件
```shell
vim /home/postgres/pgdata_14/pg_hba.conf
host repl all 0.0.0.0/0 trust
```
### 1.4 启动服务
```shell
./bin/pg_ctl -D /home/postgres/pgdata_14 start -l logfile
```
### 1.5 创建流复制用户
```shell
./bin/psql -h127.0.0.1 -p15432
postgres=# CREATE ROLE repl login replication encrypted password 'repl';
```
## 2 启动He3DB作为备
### 2.1 编译安装PG He3DB
```shell
//编译需要依赖静态库 he3pg/src/backend/storage/file/librust_log.a
./configure --enable-depend --enable-cassert --enable-debug CFLAGS="-ggdb -O0" --prefix=/home/postgres/psqlhe3_mirror
make && make install
```
### 2.2 从主备份数据
```shell
cd /home/postgres/psqlhe3_mirror
./bin/pg_basebackup -h 127.0.0.1 -p 15432 -U repl -R -Fp -Xs -Pv -D /home/postgres/pgdata_mirror
```
### 2.3 修改postgres.conf配置
```shell
vim /home/postgres/pgdata_mirror/postgresql.conf
// 配置文件最后添加配置
primary_conninfo = 'application_name=pushstandby user=repl host=127.0.0.1 port=15432 sslmode=disable sslcompression=0 gssencmode=disable target_session_attrs=any'
hot_standby=on
port = 5434
push_standby=on
wal_recycle=off
fsync=off
wal_keep_size=10000
full_page_writes=off
he3mirror=true
```
### 2.4 启动服务
```shell
./bin/pg_ctl -D /home/postgres/pgdata_mirror start -l logfile
```
## 3 验证
### 3.1 链接主插入新数据
```shell
./bin/psql -h127.0.0.1 -p15432
postgres=# create table "t1" (id int);
CREATE TABLE
postgres=# insert into t1 values(1);
INSERT 0 1
```
### 3.2 备机验证数据
```shell
./bin/psql -h127.0.0.1 -p5434
postgres=# select * from t1;
id
----
1
(1 row)
```
### 3.3 链接主插入新数据
```
./bin/psql -h127.0.0.1 -p15432
postgres=# insert into t1 values(2);
INSERT 0 1
```
### 3.4 备机验证数据
```shell
./bin/psql -h127.0.0.1 -p5434
postgres=# select * from t1;
id
----
1
2
(2 row)
```

118
hbr-raw/cmd/archive-raw.go Normal file
View File

@ -0,0 +1,118 @@
package cmd
import (
"bytes"
"fmt"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/store/tikv"
"github.com/spf13/cobra"
)
var archiveCmd = &cobra.Command{
Use: "archive",
Short: "Archive He3DB Xlog KV",
Long: "Welcome to use hbr for He3DB xlog archive",
Run: runArchive,
}
func init() {
rootCmd.AddCommand(archiveCmd)
}
func runArchive(cmd *cobra.Command, args []string) {
var sem = make(chan bool, concurrency)
archiveStart := time.Now()
access_key, _ := cmd.Flags().GetString("access_key")
secret_key, _ := cmd.Flags().GetString("secret_key")
endpoint, _ := cmd.Flags().GetString("endpoint")
region, _ := cmd.Flags().GetString("region")
bucket, _ := cmd.Flags().GetString("bucket")
pd, _ := cmd.Flags().GetString("pd")
backup_name, _ := cmd.Flags().GetString("name")
archive_start_time_line, _ := cmd.Flags().GetString("archive_start_time_line")
archive_start_lsn, _ := cmd.Flags().GetString("archive_start_lsn")
if access_key == "" || secret_key == "" || endpoint == "" || region == "" || bucket == "" || pd == "" || backup_name == "" || archive_start_time_line == "" || archive_start_lsn == "" {
fmt.Printf("PARAMETER ERROR!\n")
return
}
client, err := tikv.NewRawKVClient([]string{pd}, config.Security{})
if err != nil {
fmt.Printf("Connect Tikv Error!\n%v\n", err)
return
}
sess, err := session.NewSession(&aws.Config{
Region: aws.String(region),
Endpoint: aws.String(endpoint),
Credentials: credentials.NewStaticCredentials(access_key, secret_key, ""),
S3ForcePathStyle: aws.Bool(true),
})
if err != nil {
fmt.Printf("Connect S3 Error!\n%v\n", err)
return
}
s3_client := s3.New(sess)
var filename string = ""
wlCount := 0
// archive wal kv
fmt.Printf("archive wal kv!\n")
for id := 0; id < 8; id++ {
//06000000000000000100000000000000070000000000000000
//因为加了个id字段目前不能跨时间线备份
retStartString := fmt.Sprintf("06%s000000000000000%d%s", archive_start_time_line, id, archive_start_lsn)
//retEndString := fmt.Sprintf("06ffffffffffffffff000000000000000%dffffffffffffffff", id)
retEndString := fmt.Sprintf("06%s000000000000000%dffffffffffffffff", archive_start_time_line, id)
retStart := make([]byte, 25)
retEnd := make([]byte, 25)
index := 0
for i := 0; i < len(retStartString); i += 2 {
value, _ := strconv.ParseUint(retStartString[i:i+2], 16, 8)
retStart[index] = byte(0xff & value)
value, _ = strconv.ParseUint(retEndString[i:i+2], 16, 8)
retEnd[index] = byte(0xff & value)
index++
}
fmt.Printf("%x\n", retStart)
fmt.Printf("%x\n", retEnd)
limit := 10240
for {
keys, values, _ := client.Scan(retStart, retEnd, limit)
for k, _ := range keys {
fmt.Printf("%x\n", keys[k])
filename = fmt.Sprintf("%x", keys[k])
wg.Add(1)
sem <- true
go s3PutKV(s3_client, bucket, backup_name, filename, values[k], sem)
if bytes.Compare(retStart, keys[k]) < 0 {
retStart = keys[k]
}
wlCount++
}
if len(keys) < limit {
break
}
wlCount--
}
}
wg.Wait()
client.Close()
fmt.Printf("wal kv count:%v\n", wlCount)
fmt.Println("backup time:", time.Since(archiveStart))
}

110
hbr-raw/cmd/help.go Normal file
View File

@ -0,0 +1,110 @@
package cmd
import (
"bytes"
//"context"
"fmt"
"io/ioutil"
"strconv"
"sync"
"os"
//"time"
"github.com/aws/aws-sdk-go/aws"
//"github.com/aws/aws-sdk-go/aws/credentials"
//"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
//"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/store/tikv"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "hbr",
Short: "He3DB backup&restore",
Long: "Welcome to use hbr for He3DB backup&restore",
Run: runRoot,
}
var wg sync.WaitGroup
var concurrency int
func init() {
rootCmd.PersistentFlags().String("access_key", "", "S3 Access Key")
rootCmd.PersistentFlags().String("secret_key", "", "S3 Secret Key")
rootCmd.PersistentFlags().String("endpoint", "", "S3 endpoint")
rootCmd.PersistentFlags().String("region", "", "S3 region")
rootCmd.PersistentFlags().String("bucket", "", "S3 bucket")
rootCmd.PersistentFlags().String("pd", "http://127.0.0.1:2379", "Tikv placement driber")
rootCmd.PersistentFlags().String("name", "", "Backup name")
rootCmd.PersistentFlags().String("archive_start_file", "000000010000000000000001", "start key of archive[included]")
rootCmd.PersistentFlags().String("archive_start_time_line", "0000000000000001", "start time line of archive[included]")
rootCmd.PersistentFlags().String("archive_start_lsn", "0000000000000000", "start lsn of archive[included]")
rootCmd.PersistentFlags().IntVar(&concurrency, "concurrency", 100, "concurrency")
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
panic(err)
}
}
func runRoot(cmd *cobra.Command, args []string) {
fmt.Printf("Welcome to use hbr for He3DB backup&restore\n")
}
func s3PutKV(s3_client *s3.S3, bucket string, backup_name string, filename string, v []byte, sem chan bool) {
defer wg.Done()
defer func() {
<-sem
}()
_, err := s3_client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(backup_name + "/" + filename),
Body: bytes.NewReader(v),
})
if err != nil {
fmt.Printf("S3 PutObject Error!\n%v\n", err)
os.Exit(1)
}
//fmt.Printf("S3 PutObject!\n")
}
func s3RestoreKVRaw(s3_client *s3.S3, bucket string, backup_name string, keys *s3.Object, client *tikv.RawKVClient, sem chan bool) {
defer wg.Done()
defer func() {
<-sem
}()
out, err := s3_client.GetObject(&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(*keys.Key),
})
if err != nil {
fmt.Printf("S3 GetObject Error!\n%v\n", err)
os.Exit(1)
}
defer out.Body.Close()
data, err := ioutil.ReadAll(out.Body)
if err != nil {
fmt.Printf("out.Body.Read!\n%v\n", err)
os.Exit(1)
}
fmt.Printf("filename:%s\n", (*keys.Key)[len(backup_name)+1:])
ret := make([]byte, (len(*keys.Key)-len(backup_name)-1)/2)
index := 0
for i := len(backup_name) + 1; i < len(*keys.Key); i += 2 {
value, _ := strconv.ParseUint((*keys.Key)[i:i+2], 16, 8)
ret[index] = byte(0xff & value)
index++
}
if err := client.Put(ret, data); err != nil {
fmt.Printf("Tikv Set Error!\n%v\n", err)
os.Exit(1)
}
}

View File

@ -0,0 +1,95 @@
package cmd
import (
//"context"
"fmt"
//"io/ioutil"
//"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/spf13/cobra"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/store/tikv"
)
var restoreCmd = &cobra.Command{
Use: "restore",
Short: "Restore He3DB",
Long: "Welcome to use hbr for He3DB restore",
Run: runRestore,
}
func init() {
rootCmd.AddCommand(restoreCmd)
}
func runRestore(cmd *cobra.Command, args []string) {
var sem = make(chan bool, concurrency)
restoreStart := time.Now()
access_key, _ := cmd.Flags().GetString("access_key")
secret_key, _ := cmd.Flags().GetString("secret_key")
endpoint, _ := cmd.Flags().GetString("endpoint")
region, _ := cmd.Flags().GetString("region")
bucket, _ := cmd.Flags().GetString("bucket")
pd, _ := cmd.Flags().GetString("pd")
backup_name, _ := cmd.Flags().GetString("name")
if access_key == "" || secret_key == "" || endpoint == "" || region == "" || bucket == "" || pd == "" || backup_name == "" {
fmt.Printf("PARAMETER ERROR!\n")
return
}
client, err := tikv.NewRawKVClient([]string{pd}, config.Security{})
if err != nil {
fmt.Printf("Connect Tikv Error!\n%v\n", err)
return
}
sess, err := session.NewSession(&aws.Config{
Region: aws.String(region),
Endpoint: aws.String(endpoint),
Credentials: credentials.NewStaticCredentials(access_key, secret_key, ""),
S3ForcePathStyle: aws.Bool(true),
})
if err != nil {
fmt.Printf("Connect S3 Error!\n%v\n", err)
return
}
s3_client := s3.New(sess)
count := 0
input := &s3.ListObjectsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(backup_name),
}
for {
resp, err := s3_client.ListObjects(input)
if err != nil {
fmt.Printf("S3 ListObjects Error!\n%v\n", err)
return
}
for _, keys := range resp.Contents {
wg.Add(1)
sem <- true
go s3RestoreKVRaw(s3_client, bucket, backup_name, keys, client, sem)
count++
}
if resp.NextMarker == nil {
fmt.Printf("Done!\n")
break
}
input.Marker = resp.NextMarker
}
wg.Wait()
fmt.Printf("N:%v\n", count)
fmt.Println("restore time:", time.Since(restoreStart))
}

92
hbr-raw/cmd/scan.go Normal file
View File

@ -0,0 +1,92 @@
package cmd
import (
"bytes"
"fmt"
"strconv"
"time"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/store/tikv"
"github.com/spf13/cobra"
)
var archive3Cmd = &cobra.Command{
Use: "scan",
Short: "Archive He3DB Xlog KV",
Long: "Welcome to use hbr for He3DB xlog archive",
Run: runArchive3,
}
func init() {
rootCmd.AddCommand(archive3Cmd)
}
func runArchive3(cmd *cobra.Command, args []string) {
archiveStart := time.Now()
access_key, _ := cmd.Flags().GetString("access_key")
secret_key, _ := cmd.Flags().GetString("secret_key")
endpoint, _ := cmd.Flags().GetString("endpoint")
region, _ := cmd.Flags().GetString("region")
bucket, _ := cmd.Flags().GetString("bucket")
pd, _ := cmd.Flags().GetString("pd")
backup_name, _ := cmd.Flags().GetString("name")
archive_start_time_line, _ := cmd.Flags().GetString("archive_start_time_line")
archive_start_lsn, _ := cmd.Flags().GetString("archive_start_lsn")
if access_key == "" || secret_key == "" || endpoint == "" || region == "" || bucket == "" || pd == "" || backup_name == "" || archive_start_time_line == "" || archive_start_lsn == "" {
fmt.Printf("PARAMETER ERROR!\n")
return
}
client, err := tikv.NewRawKVClient([]string{pd}, config.Security{})
if err != nil {
fmt.Printf("Connect Tikv Error!\n%v\n", err)
return
}
wlCount := 0
// archive wal kv
fmt.Printf("archive wal kv!\n")
//0600000000000000010000000000000000
retStartString := fmt.Sprintf("06%s%s", archive_start_time_line, archive_start_lsn)
retEndString := "06ffffffffffffffffffffffffffffffff"
retStart := make([]byte, 17)
retEnd := make([]byte, 17)
index := 0
for i := 0; i < len(retStartString); i += 2 {
value, _ := strconv.ParseUint(retStartString[i:i+2], 16, 8)
retStart[index] = byte(0xff & value)
value, _ = strconv.ParseUint(retEndString[i:i+2], 16, 8)
retEnd[index] = byte(0xff & value)
index++
}
fmt.Printf("%x\n", retStart)
fmt.Printf("%x\n", retEnd)
limit := 10240
for {
keys, _, _ := client.Scan(retStart, retEnd, limit)
for k, _ := range keys {
fmt.Printf("%x\n", keys[k])
if bytes.Compare(retStart, keys[k]) < 0 {
retStart = keys[k]
}
wlCount++
}
if len(keys) < limit {
break
}
wlCount--
}
//wg.Wait()
client.Close()
fmt.Printf("wal kv count:%v\n", wlCount)
fmt.Println("backup time:", time.Since(archiveStart))
}

21
hbr-raw/cmd/version.go Normal file
View File

@ -0,0 +1,21 @@
package cmd
import (
"fmt"
"github.com/spf13/cobra"
)
var versionCmd = &cobra.Command{
Use: "version",
Short: "Show Version",
Run: runVersion,
}
func init() {
rootCmd.AddCommand(versionCmd)
}
func runVersion(cmd *cobra.Command, args []string) {
fmt.Println("Version 1.0.0 ")
}

62
hbr-raw/go.mod Normal file
View File

@ -0,0 +1,62 @@
module hbr-raw
go 1.18
require (
github.com/aws/aws-sdk-go v1.30.24
github.com/pingcap/tidb v1.1.0-beta.0.20210419034717-00632fb3c710
github.com/spf13/cobra v1.0.0
)
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37 // indirect
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
github.com/go-ole/go-ole v1.2.4 // indirect
github.com/gogo/protobuf v1.3.1 // indirect
github.com/golang/protobuf v1.3.4 // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/uuid v1.1.1 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 // indirect
github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce // indirect
github.com/pingcap/kvproto v0.0.0-20201126113434-70db5fb4b0dc // indirect
github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8 // indirect
github.com/pingcap/parser v0.0.0-20210107054750-53e33b4018fe // indirect
github.com/pingcap/tipb v0.0.0-20200618092958-4fad48b4c8c3 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.5.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.9.1 // indirect
github.com/prometheus/procfs v0.0.8 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 // indirect
github.com/shirou/gopsutil v2.20.3+incompatible // indirect
github.com/sirupsen/logrus v1.6.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/tikv/pd v0.0.0-20210105112549-e5be7fd38659 // indirect
github.com/uber/jaeger-client-go v2.22.1+incompatible // indirect
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.16.0 // indirect
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc // indirect
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.3.4 // indirect
google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f // indirect
google.golang.org/grpc v1.26.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
)

784
hbr-raw/go.sum Normal file
View File

@ -0,0 +1,784 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM=
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Jeffail/gabs/v2 v2.5.1/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/mysqlerr v0.0.0-20200629151747-c28746d985dd/go.mod h1:f3HiCrHjHBdcm6E83vGaXh1KomZMA2P6aeo3hKx/wg0=
github.com/Xeoncross/go-aesctr-with-hmac v0.0.0-20200623134604-12b17a7ff502/go.mod h1:pmnBM9bxWSiHvC/gSWunUIyDvGn33EkP2CUjxFKtTTM=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
github.com/appleboy/gin-jwt/v2 v2.6.3/go.mod h1:MfPYA4ogzvOcVkRwAxT7quHOtQmVKDpTwxyUrC2DNw0=
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/aws/aws-sdk-go v1.30.24 h1:y3JPD51VuEmVqN3BEDVm4amGpDma2cKJcDPuAU1OR58=
github.com/aws/aws-sdk-go v1.30.24/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU=
github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets=
github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb/v3 v3.0.4 h1:QZEPYOj2ix6d5oEg63fbHmpolrnNiwjUsk+h74Yt4bM=
github.com/cheggaaa/pb/v3 v3.0.4/go.mod h1:7rgWxLrAUcFMkvJuv09+DYi7mMUYi8nO9iOWcvGJPfw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/corona10/goimagehash v1.0.2/go.mod h1:/l9umBhvcHQXVtQO1V6Gp1yD20STawkhRnnX0D1bvVI=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1/go.mod h1:2B43mz36vGZNZEwkWi8ayRSSUXLfjL8OkbzwW4NcPMM=
github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 h1:LpMLYGyy67BoAFGda1NeOBQwqlv7nUXpm+rIVHGxZZ4=
github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
github.com/cznic/y v0.0.0-20170802143616-045f81c6662a/go.mod h1:1rk5VM7oSnA4vjp+hrLQ3HWHa+Y4yPCa3/CsJrcNnvs=
github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37 h1:X6mKGhCFOxrKeeHAjv/3UvT6e5RRxW6wRdlqlV6/H4w=
github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37/go.mod h1:DC3JtzuG7kxMvJ6dZmf2ymjNyoXwgtklr7FN+Um2B0U=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94=
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w=
github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y=
github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w=
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/goccy/go-graphviz v0.0.5/go.mod h1:wXVsXxmyMQU6TN3zGRttjNn3h+iCAS7xQFC6TlNvLhk=
github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200407044318-7d83b28da2e9 h1:K+lX49/3eURCE1IjlaZN//u6c+9nfDAMnyQ9E2dsJbY=
github.com/google/pprof v0.0.0-20200407044318-7d83b28da2e9/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg=
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
github.com/grpc-ecosystem/grpc-gateway v1.14.3 h1:OCJlWkOUoTnl0neNGlf4fUm3TmbEtguw7vR+nGtnDjY=
github.com/grpc-ecosystem/grpc-gateway v1.14.3/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0=
github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69/go.mod h1:YLEMZOtU+AZ7dhN9T/IpGhXVGly2bvkJQ+zxj3WeVQo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hypnoglow/gormzap v0.3.0/go.mod h1:5Wom8B7Jl2oK0Im9hs6KQ+Kl92w4Y7gKCrj66rhyvw0=
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8=
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/joomcode/errorx v1.0.1/go.mod h1:kgco15ekB6cs+4Xjzo7SPeXzx38PbJzBwbnu9qfVNHQ=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY=
github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/montanaflynn/stats v0.5.0 h1:2EkzeTSqBB4V4bJwWrt5gIIrZmpJBcoIRGS2kWLgzmk=
github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nfnt/resize v0.0.0-20160724205520-891127d8d1b5/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8=
github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c=
github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI=
github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k=
github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8=
github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/oleiade/reflections v1.0.0/go.mod h1:RbATFBbKYkVdqmSFtx13Bb/tVhR0lgOBXunWTZKeL4w=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d h1:U+PMnTlV2tu7RuMK5etusZG3Cf+rpow5hqQByeCzJ2g=
github.com/phf/go-queue v0.0.0-20170504031614-9abe38d0371d/go.mod h1:lXfE4PvvTW5xOjO6Mba8zDPyw8M93B6AQ7frTGnMlA8=
github.com/pingcap-incubator/tidb-dashboard v0.0.0-20201126111827-6c8be2240067/go.mod h1:EONGys2gM5n14pII2vjmU/5VG3Dtj6kpqUT1GUZ4ysw=
github.com/pingcap/br v4.0.9-0.20201215065036-804aa9087197+incompatible h1:Ceeu3/hX1LSdKpcaI8Sc6STOAxurxa9tDo0mqHmQ/Yc=
github.com/pingcap/br v4.0.9-0.20201215065036-804aa9087197+incompatible/go.mod h1:ymVmo50lQydxib0tmK5hHk4oteB7hZ0IMCArunwy3UQ=
github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
github.com/pingcap/check v0.0.0-20191107115940-caf2b9e6ccf4/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc=
github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc=
github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 h1:R8gStypOBmpnHEx1qi//SaqxJVI4inOqljg/Aj5/390=
github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc=
github.com/pingcap/errcode v0.3.0/go.mod h1:4b2X8xSqxIroj/IZ9MX/VGZhAwc11wB9wRIzHvz6SeM=
github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pingcap/errors v0.11.5-0.20201029093017-5a7df2af2ac7/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 h1:LllgC9eGfqzkfubMgjKIDyZYaa609nNWAyNZtpy2B3M=
github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3/go.mod h1:G7x87le1poQzLB/TqvTJI2ILrSgobnq4Ut7luOwvfvI=
github.com/pingcap/failpoint v0.0.0-20191029060244-12f4ac2fd11d/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI=
github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce h1:Y1kCxlCtlPTMtVcOkjUcuQKh+YrluSo7+7YMCQSzy30=
github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce/go.mod h1:w4PEZ5y16LeofeeGwdgZB4ddv9bLyDuIX+ljstgKZyk=
github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059/go.mod h1:fMRU1BA1y+r89AxUoaAar4JjrhUkVDt0o0Np6V8XbDQ=
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 h1:surzm05a8C9dN8dIUmo4Be2+pMRb6f55i+UIYrluu2E=
github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw=
github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w=
github.com/pingcap/kvproto v0.0.0-20200411081810-b85805c9476c/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
github.com/pingcap/kvproto v0.0.0-20200907074027-32a3a0accf7d/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
github.com/pingcap/kvproto v0.0.0-20201126113434-70db5fb4b0dc h1:BtszN3YR5EScxiGGTD3tAf4CQE90bczkOY0lLa07EJA=
github.com/pingcap/kvproto v0.0.0-20201126113434-70db5fb4b0dc/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI=
github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8 h1:M+DNpOu/I3uDmwee6vcnoPd6GgSMqND4gxvDQ/W584U=
github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8=
github.com/pingcap/parser v0.0.0-20210107054750-53e33b4018fe h1:sukVKRva68HNGZ4nuPvQS/wMvH7NMxTXV2NIhmoYP4Y=
github.com/pingcap/parser v0.0.0-20210107054750-53e33b4018fe/go.mod h1:GbEr2PgY72/4XqPZzmzstlOU/+il/wrjeTNFs6ihsSE=
github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI=
github.com/pingcap/sysutil v0.0.0-20201130064824-f0c8aa6a6966 h1:JI0wOAb8aQML0vAVLHcxTEEC0VIwrk6gtw3WjbHvJLA=
github.com/pingcap/sysutil v0.0.0-20201130064824-f0c8aa6a6966/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI=
github.com/pingcap/tidb v1.1.0-beta.0.20210419034717-00632fb3c710 h1:PlH7u1SkJNXlUtFzh+NHkM8fgXoDsT7BIzX/7+sOZcg=
github.com/pingcap/tidb v1.1.0-beta.0.20210419034717-00632fb3c710/go.mod h1:WbISBEy4rQRvGhvFJsjK3WHYl14OpZeqchjrlQbIeHc=
github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible h1:ceznmu/lLseGHP/jKyOa/3u/5H3wtLLLqkH2V3ssSjg=
github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM=
github.com/pingcap/tipb v0.0.0-20200618092958-4fad48b4c8c3 h1:ESL3eIt1kUt8IMvR1011ejZlAyDcOzw89ARvVHvpD5k=
github.com/pingcap/tipb v0.0.0-20200618092958-4fad48b4c8c3/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA=
github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237 h1:HQagqIiBmr8YXawX/le3+O26N+vPPC1PtjaF3mwnook=
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil v2.19.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/gopsutil v2.20.3+incompatible h1:0JVooMPsT7A7HqEYdydp/OfjSOYSjhXV7w1hkKj/NPQ=
github.com/shirou/gopsutil v2.20.3+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:gxQT6pBGRuIGunNf/+tSOB5OHvguWi8Tbt82WOkf35E=
github.com/swaggo/gin-swagger v1.2.0/go.mod h1:qlH2+W7zXGZkczuL+r2nEBR2JTT+/lX05Nn6vPhc7OI=
github.com/swaggo/http-swagger v0.0.0-20200308142732-58ac5e232fba/go.mod h1:O1lAbCgAAX/KZ80LM/OXwtWFI/5TvZlwxSg8Cq08PV0=
github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y=
github.com/swaggo/swag v1.6.3/go.mod h1:wcc83tB4Mb2aNiL/HP4MFeQdpHUrca+Rp/DRNgWAUio=
github.com/swaggo/swag v1.6.6-0.20200529100950-7c765ddd0476/go.mod h1:xDhTyuFIujYiN3DKWC/H/83xcfHp+UE/IzWWampG7Zc=
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285 h1:uSDYjYejelKyceA6DiCsngFof9jAyeaSyX9XC5a1a7Q=
github.com/syndtr/goleveldb v1.0.1-0.20190625010220-02440ea7a285/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
github.com/thoas/go-funk v0.7.0/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q=
github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU=
github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tikv/pd v0.0.0-20210105112549-e5be7fd38659 h1:k7pQD4T2iTVphdaYRjRhv7lZ+dlUpsdAK+ogDVYkBbk=
github.com/tikv/pd v0.0.0-20210105112549-e5be7fd38659/go.mod h1:Zh9gNK7Q02Q0DByC05P+HJETLelP8R8RMYeyQ1EMMJA=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o=
github.com/uber-go/atomic v1.4.0/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
github.com/uber/jaeger-client-go v2.22.1+incompatible h1:NHcubEkVbahf9t3p75TOCR83gdUHXjRJvjoBh1yACsM=
github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/unrolled/render v1.0.1/go.mod h1:gN9T0NhL4Bfbwu8ann7Ry/TGHYfosul+J0obPf6NBdM=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ=
github.com/urfave/negroni v0.3.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
github.com/vmihailenco/msgpack/v5 v5.0.0-beta.1/go.mod h1:xlngVLeyQ/Qi05oQxhQ+oTuqa03RjMwMfk/7/TCs+QI=
github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 h1:lWF4f9Nypl1ZqSb4gLeh/DGvBYVaUYHuiB93teOmwgc=
go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU=
go.uber.org/dig v1.8.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw=
go.uber.org/fx v1.10.0/go.mod h1:vLRicqpG/qQEzno4SYU86iCwfT95EZza+Eba0ItuxqY=
go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4=
go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM=
go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc h1:zK/HqS5bZxDptfPJNq8v7vJfXtkU7r9TLIoSr1bXaP4=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200819171115-d785dc25833f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191114200427-caa0b0f7d508/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200225230052-807dcd883420/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200410194907-79a7a3126eef/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200527183253-8e7acdbce89d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200820010801-b793a1359eac h1:DugppSxw0LSF8lcjaODPJZoDzq0ElTGskTst3ZaBkHI=
golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.1 h1:5mMS6mYvK5LVB8+ujVBC33Y8gltBo/kT6HBm6kU80G4=
google.golang.org/api v0.15.1/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f h1:2wh8dWY8959cBGQvk1RD+/eQBgRYYDaZ+hT0/zsARoA=
google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/oleiade/reflections.v1 v1.0.0/go.mod h1:SpA8pv+LUnF0FbB2hyRxc8XSng78D6iLBZ11PDb8Z5g=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.6 h1:W18jzjh8mfPez+AwGLxmOImucz/IFjpNlrKVnaj2YVc=
honnef.co/go/tools v0.0.1-2020.1.6/go.mod h1:pyyisuGw24ruLjrr1ddx39WE0y9OooInRzEYLhQB2YY=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 h1:ucqkfpjg9WzSUubAO62csmucvxl4/JeW3F4I4909XkM=
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k=

9
hbr-raw/main.go Normal file
View File

@ -0,0 +1,9 @@
package main
import (
"hbr-raw/cmd"
)
func main() {
cmd.Execute()
}

313
hbr/cmd/archive.go Normal file
View File

@ -0,0 +1,313 @@
package cmd
import (
"bytes"
"context"
"encoding/binary"
"encoding/json"
"fmt"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/spf13/cobra"
"github.com/tikv/client-go/txnkv"
)
var archiveCmd = &cobra.Command{
Use: "archive",
Short: "Archive He3DB Xlog",
Long: "Welcome to use hbr for He3DB xlog archive",
Run: runArchive,
}
func init() {
rootCmd.AddCommand(archiveCmd)
}
type Inode struct {
Ino uint64 `json:"ino"`
}
func Unt64ToBytes(n uint64) []byte {
x := uint64(n)
bytesBuffer := bytes.NewBuffer([]byte{})
binary.Write(bytesBuffer, binary.BigEndian, x)
return bytesBuffer.Bytes()
}
func runArchive(cmd *cobra.Command, args []string) {
access_key, _ := cmd.Flags().GetString("access_key")
secret_key, _ := cmd.Flags().GetString("secret_key")
endpoint, _ := cmd.Flags().GetString("endpoint")
region, _ := cmd.Flags().GetString("region")
bucket, _ := cmd.Flags().GetString("bucket")
pd, _ := cmd.Flags().GetString("pd")
backup_name, _ := cmd.Flags().GetString("name")
archive_start_file, _ := cmd.Flags().GetString("archive_start_file")
if access_key == "" || secret_key == "" || endpoint == "" || region == "" || bucket == "" || pd == "" || backup_name == "" || archive_start_file == "" {
fmt.Printf("PARAMETER ERROR!\n")
return
}
client, err := txnkv.NewClient([]string{pd})
if err != nil {
fmt.Printf("Connect Tikv Error!\n%v\n", err)
return
}
txn, err := client.Begin()
if err != nil {
fmt.Printf("Tikv Transaction Begin Error!\n%v\n", err)
return
}
sess, err := session.NewSession(&aws.Config{
Region: aws.String(region),
Endpoint: aws.String(endpoint),
Credentials: credentials.NewStaticCredentials(access_key, secret_key, ""),
S3ForcePathStyle: aws.Bool(true),
})
if err != nil {
fmt.Printf("Connect S3 Error!\n%v\n", err)
return
}
s3_client := s3.New(sess)
var filename string = ""
allCount := 0
//1:meta
fmt.Printf("Backup meta!\n")
ret := make([]byte, 1)
value, _ := strconv.ParseUint("00", 16, 8)
ret[0] = byte(0xff & value)
metaValue, err := txn.Get(context.TODO(), ret)
if err != nil {
fmt.Printf("Client Get meta Error!\n%v\n", err)
return
}
filename = fmt.Sprintf("%x", ret)
fmt.Printf("filename:%s\n", filename)
_, err = s3_client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(backup_name + "/" + filename),
Body: bytes.NewReader(metaValue),
})
if err != nil {
fmt.Printf("S3 Put meta Error!\n%v\n", err)
return
}
allCount++
//2:pg_wal inode
fmt.Printf("Backup pg_wal inode!\n")
pwValue, err := txn.Get(context.TODO(), []byte("pg_wal"))
if err != nil {
fmt.Printf("Client Get pg_wal Error!\n%v\n", err)
return
}
var pwi Inode
json.Unmarshal(pwValue, &pwi)
pwiKeyString := fmt.Sprintf("01%x", Unt64ToBytes(pwi.Ino))
fmt.Printf("%v\n", pwiKeyString)
ret = make([]byte, 9)
index := 0
for i := 0; i < len(pwiKeyString); i += 2 {
value, _ := strconv.ParseUint(pwiKeyString[i:i+2], 16, 8)
ret[index] = byte(0xff & value)
index++
}
pwiValue, err := txn.Get(context.TODO(), ret)
if err != nil {
fmt.Printf("Client Get pwiValue Error!\n%v\n", err)
return
}
filename = fmt.Sprintf("%x", ret)
fmt.Printf("filename:%s\n", filename)
_, err = s3_client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(backup_name + "/" + filename),
Body: bytes.NewReader(pwiValue),
})
if err != nil {
fmt.Printf("S3 Put pwiValue Error!\n%v\n", err)
return
}
allCount++
//3:pg_wal directory block
fmt.Printf("Backup pg_wal directory block!\n")
pwbKeyString := fmt.Sprintf("02%x0000000000000000", Unt64ToBytes(pwi.Ino))
ret = make([]byte, 17)
index = 0
for i := 0; i < len(pwbKeyString); i += 2 {
value, _ := strconv.ParseUint(pwbKeyString[i:i+2], 16, 8)
ret[index] = byte(0xff & value)
index++
}
pwbValue, err := txn.Get(context.TODO(), ret)
if err != nil {
fmt.Printf("Client Get pwbValue Error!\n%v\n", err)
return
}
filename = fmt.Sprintf("%x", ret)
fmt.Printf("filename:%s\n", filename)
_, err = s3_client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket), // bucket名称
Key: aws.String(backup_name + "/" + filename),
Body: bytes.NewReader(pwbValue),
})
if err != nil {
fmt.Printf("S3 Put pwbValue Error!\n%v\n", err)
return
}
allCount++
//4:new file index
fmt.Printf("Backup new file index!\n")
pwiPrefixString := fmt.Sprintf("04%x", Unt64ToBytes(pwi.Ino))
retStart := []byte(fmt.Sprintf("400000002%s", archive_start_file))
retEnd := []byte("400000002ffffffffffffffffffffffff")
index = 0
for i := 0; i < len(pwiPrefixString); i += 2 {
value, _ := strconv.ParseUint(pwiPrefixString[i:i+2], 16, 8)
retStart[index] = byte(0xff & value)
retEnd[index] = byte(0xff & value)
index++
}
fiiter, err := txn.Iter(retStart, retEnd)
if err != nil {
fmt.Printf("new file index Iter Error!\n%v\n", err)
return
}
newFileIndexCount := 0
for fiiter.Valid() {
k, v := fiiter.Key(), fiiter.Value()
filename = fmt.Sprintf("%x", k)
fmt.Printf("filename:%s\n", filename)
_, err = s3_client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(backup_name + "/" + filename),
Body: bytes.NewReader(v),
})
if err != nil {
fmt.Printf("S3 PutObject Error!\n%v\n", err)
return
}
if err := fiiter.Next(); err != nil {
fmt.Printf("Iter Next Error!\n%v\n", err)
return
}
newFileIndexCount++
allCount++
//5:new xlog inode
fmt.Printf("Backup new xlog inode!\n")
var wali Inode
json.Unmarshal(v, &wali)
waliKeyString := fmt.Sprintf("01%x", Unt64ToBytes(wali.Ino))
ret = make([]byte, 9)
index = 0
for i := 0; i < len(waliKeyString); i += 2 {
value, _ := strconv.ParseUint(waliKeyString[i:i+2], 16, 8)
ret[index] = byte(0xff & value)
index++
}
waliValue, err := txn.Get(context.TODO(), ret)
if err != nil {
fmt.Printf("Client Get waliValue Error!\n%v\n", err)
return
}
filename = fmt.Sprintf("%x", ret)
fmt.Printf("filename:%s\n", filename)
_, err = s3_client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(backup_name + "/" + filename),
Body: bytes.NewReader(waliValue),
})
if err != nil {
fmt.Printf("S3 Put waliValue Error!\n%v\n", err)
return
}
allCount++
//6:new file block
fmt.Printf("Backup new file block!\n")
walbPrefixString := fmt.Sprintf("02%x", Unt64ToBytes(wali.Ino))
retStartString := fmt.Sprintf("%s0000000000000000", walbPrefixString)
retEndString := fmt.Sprintf("%s0000000000000100", walbPrefixString)
retStart := make([]byte, 17)
retEnd := make([]byte, 17)
index = 0
for i := 0; i < len(retStartString); i += 2 {
value, _ := strconv.ParseUint(retStartString[i:i+2], 16, 8)
retStart[index] = byte(0xff & value)
value, _ = strconv.ParseUint(retEndString[i:i+2], 16, 8)
retEnd[index] = byte(0xff & value)
index++
}
walbIter, err := txn.Iter(retStart, retEnd)
if err != nil {
fmt.Printf("walbIter Error!\n%v\n", err)
return
}
walbCount := 0
for walbIter.Valid() {
k1, v1 := walbIter.Key(), walbIter.Value()
filename = fmt.Sprintf("%x", k1)
fmt.Printf("filename:%s\n", filename)
_, err = s3_client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket), // bucket名称
Key: aws.String(backup_name + "/" + filename),
Body: bytes.NewReader(v1),
})
if err != nil {
fmt.Printf("S3 PutObject Error!\n%v\n", err)
return
}
if err := walbIter.Next(); err != nil {
fmt.Printf("walbIter Next Error!\n%v\n", err)
return
}
walbCount++
allCount++
}
walbIter.Close()
fmt.Printf("walbCount:%v\n", walbCount)
}
if err := txn.Commit(context.TODO()); err != nil {
fmt.Printf("Tikv Transaction Commit Error!\n%v\n", err)
return
}
fiiter.Close()
fmt.Printf("new file index count:%v\n", newFileIndexCount)
fmt.Printf("allCount:%v\n", allCount)
client.Close()
}

100
hbr/cmd/backup.go Normal file
View File

@ -0,0 +1,100 @@
package cmd
import (
"bytes"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/spf13/cobra"
"github.com/tikv/client-go/txnkv"
)
var backupCmd = &cobra.Command{
Use: "backup",
Short: "Backup He3DB",
Long: "Welcome to use hbr for He3DB backup",
Run: runBackup,
}
func init() {
rootCmd.AddCommand(backupCmd)
}
func runBackup(cmd *cobra.Command, args []string) {
access_key, _ := cmd.Flags().GetString("access_key")
secret_key, _ := cmd.Flags().GetString("secret_key")
endpoint, _ := cmd.Flags().GetString("endpoint")
region, _ := cmd.Flags().GetString("region")
bucket, _ := cmd.Flags().GetString("bucket")
pd, _ := cmd.Flags().GetString("pd")
backup_name, _ := cmd.Flags().GetString("name")
if access_key == "" || secret_key == "" || endpoint == "" || region == "" || bucket == "" || pd == "" || backup_name == "" {
fmt.Printf("PARAMETER ERROR!\n")
return
}
client, err := txnkv.NewClient([]string{pd})
if err != nil {
fmt.Printf("Connect Tikv Error!\n%v\n", err)
return
}
defer func() {
client.Close()
}()
sess, err := session.NewSession(&aws.Config{
Region: aws.String(region),
Endpoint: aws.String(endpoint),
Credentials: credentials.NewStaticCredentials(access_key, secret_key, ""),
S3ForcePathStyle: aws.Bool(true),
})
if err != nil {
fmt.Printf("Connect S3 Error!\n%v\n", err)
return
}
s3_client := s3.New(sess)
ts, err := client.CurrentTimestamp("global")
if err != nil {
fmt.Printf("Set Timestamp Error!\n%v\n", err)
return
}
snapshot := client.GetSnapshot(ts)
iter, err := snapshot.Iter([]byte(""), []byte(""))
if err != nil {
fmt.Printf("Iter Error!\n%v\n", err)
return
}
defer iter.Close()
count := 0
for iter.Valid() {
k, v := iter.Key(), iter.Value()
var filename string = ""
filename = fmt.Sprintf("%x", k)
fmt.Printf("filename:%s\n", filename)
_, err = s3_client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(bucket), // bucket名称
Key: aws.String(backup_name + "/" + filename),
Body: bytes.NewReader(v),
})
if err != nil {
fmt.Printf("S3 PutObject Error!\n%v\n", err)
return
}
if err := iter.Next(); err != nil {
fmt.Printf("Iter Next Error!\n%v\n", err)
return
}
count++
}
fmt.Printf("N:%v\n", count)
}

35
hbr/cmd/help.go Normal file
View File

@ -0,0 +1,35 @@
package cmd
import (
"fmt"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "hbr",
Short: "He3DB backup&restore",
Long: "Welcome to use hbr for He3DB backup&restore",
Run: runRoot,
}
func init() {
rootCmd.PersistentFlags().String("access_key", "", "S3 Access Key")
rootCmd.PersistentFlags().String("secret_key", "", "S3 Secret Key")
rootCmd.PersistentFlags().String("endpoint", "", "S3 endpoint")
rootCmd.PersistentFlags().String("region", "", "S3 region")
rootCmd.PersistentFlags().String("bucket", "", "S3 bucket")
rootCmd.PersistentFlags().String("pd", "http://127.0.0.1:2379", "Tikv placement driber")
rootCmd.PersistentFlags().String("name", "", "Backup name")
rootCmd.PersistentFlags().String("archive_start_file", "000000010000000000000001", "start key of archive[included]")
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
panic(err)
}
}
func runRoot(cmd *cobra.Command, args []string) {
fmt.Printf("Welcome to use hbr for He3DB backup&restore\n")
}

128
hbr/cmd/restore.go Normal file
View File

@ -0,0 +1,128 @@
package cmd
import (
"context"
"fmt"
"io/ioutil"
"strconv"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/spf13/cobra"
"github.com/tikv/client-go/txnkv"
)
var restoreCmd = &cobra.Command{
Use: "restore",
Short: "Restore He3DB",
Long: "Welcome to use hbr for He3DB restore",
Run: runRestore,
}
func init() {
rootCmd.AddCommand(restoreCmd)
}
func runRestore(cmd *cobra.Command, args []string) {
access_key, _ := cmd.Flags().GetString("access_key")
secret_key, _ := cmd.Flags().GetString("secret_key")
endpoint, _ := cmd.Flags().GetString("endpoint")
region, _ := cmd.Flags().GetString("region")
bucket, _ := cmd.Flags().GetString("bucket")
pd, _ := cmd.Flags().GetString("pd")
backup_name, _ := cmd.Flags().GetString("name")
if access_key == "" || secret_key == "" || endpoint == "" || region == "" || bucket == "" || pd == "" || backup_name == "" {
fmt.Printf("PARAMETER ERROR!\n")
return
}
client, err := txnkv.NewClient([]string{pd})
if err != nil {
fmt.Printf("Connect Tikv Error!\n%v\n", err)
return
}
defer func() {
client.Close()
}()
sess, err := session.NewSession(&aws.Config{
Region: aws.String(region),
Endpoint: aws.String(endpoint),
Credentials: credentials.NewStaticCredentials(access_key, secret_key, ""),
S3ForcePathStyle: aws.Bool(true),
})
if err != nil {
fmt.Printf("Connect S3 Error!\n%v\n", err)
return
}
s3_client := s3.New(sess)
count := 0
input := &s3.ListObjectsInput{
Bucket: aws.String(bucket),
Prefix: aws.String(backup_name),
}
for {
resp, err := s3_client.ListObjects(input)
if err != nil {
fmt.Printf("S3 ListObjects Error!\n%v\n", err)
return
}
for _, keys := range resp.Contents {
out, err := s3_client.GetObject(&s3.GetObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(*keys.Key),
})
if err != nil {
fmt.Printf("S3 GetObject Error!\n%v\n", err)
return
}
defer out.Body.Close()
data, err := ioutil.ReadAll(out.Body)
if err != nil {
fmt.Printf("out.Body.Read!\n%v\n", err)
return
}
txn, err := client.Begin()
if err != nil {
fmt.Printf("Tikv Transaction Begin Error!\n%v\n", err)
return
}
fmt.Printf("filename:%s\n", (*keys.Key)[len(backup_name)+1:])
ret := make([]byte, (len(*keys.Key)-len(backup_name)-1)/2)
index := 0
for i := len(backup_name) + 1; i < len(*keys.Key); i += 2 {
value, _ := strconv.ParseUint((*keys.Key)[i:i+2], 16, 8)
ret[index] = byte(0xff & value)
index++
}
if err := txn.Set(ret, data); err != nil {
fmt.Printf("Tikv Set Error!\n%v\n", err)
return
}
if err := txn.Commit(context.TODO()); err != nil {
fmt.Printf("Tikv Transaction Commit Error!\n%v\n", err)
return
}
count++
}
if resp.NextMarker == nil {
fmt.Printf("Done!\n")
break
}
input.Marker = resp.NextMarker
}
fmt.Printf("N:%v\n", count)
fmt.Printf("Done!\n")
}

21
hbr/cmd/version.go Normal file
View File

@ -0,0 +1,21 @@
package cmd
import (
"fmt"
"github.com/spf13/cobra"
)
var versionCmd = &cobra.Command{
Use: "version",
Short: "Show Version",
Run: runVersion,
}
func init() {
rootCmd.AddCommand(versionCmd)
}
func runVersion(cmd *cobra.Command, args []string) {
fmt.Println("Version 1.0.0 ")
}

9
hbr/main.go Normal file
View File

@ -0,0 +1,9 @@
package main
import (
"hbr/cmd"
)
func main() {
cmd.Execute()
}

View File

@ -93,7 +93,7 @@ max_connections = 100 # (change requires restart)
# - Authentication -
#authentication_timeout = 1min # 1s-600s
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos

View File

@ -93,7 +93,7 @@ max_connections = 100 # (change requires restart)
# - Authentication -
#authentication_timeout = 1min # 1s-600s
password_encryption = md5 # scram-sha-256 or md5
#db_user_namespace = off
# GSSAPI using Kerberos

View File

@ -0,0 +1,46 @@
#!/bin/bash
export PATH=/home/postgres/psql14/bin:$PATH
export PGDATABASE=postgres
export PGHOST=127.0.0.1
export PGUSER=postgres
export PGPORT=15433
export PGPASSWORD=123456
slaveDataDir=/home/postgres/slavedata/pgdata
slaveConninfo='application_name=pushstandby user=repl password=123456 host=127.0.0.1 port=15433 sslmode=disable sslcompression=0 gssencmode=disable target_session_attrs=any'
pushDataDir=/home/postgres/data/pgdata
pushImdbPageDirectory=/tmp/pushpagedb
pushImdbWalDirectory=/tmp/pushwaldb
pushLogfile=/home/postgres/logfile2
pg_ctl -D $pushDataDir -l $pushLogfile stop
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB push instance stop failed!"
exit 1
fi
sed -i 's/^primary_conninfo/#primary_conninfo/g' $pushDataDir/postgresql.auto.conf
sed -i 's/^primary_conninfo/#primary_conninfo/g' $pushDataDir/postgresql.conf
echo -e "primary_conninfo = '$slaveConninfo'" >> $pushDataDir/postgresql.conf
sed -i 's/^hot_standby/#hot_standby/g' $slaveDataDir/postgresql.conf
sed -i 's/^primary_conninfo/#primary_conninfo/g' $slaveDataDir/postgresql.auto.conf
sed -i 's/^primary_conninfo/#primary_conninfo/g' $slaveDataDir/postgresql.conf
echo -e "hot_standby=off" >> $slaveDataDir/postgresql.conf
psql -c 'SELECT pg_promote(true, 30)'
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB slave instance promote failed!"
exit 1
fi
rm -rf $pushImdbPageDirectory $pushImdbWalDirectory
pg_ctl -D $pushDataDir -l $pushLogfile start
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB push instance start failed!"
exit 1
fi

View File

@ -0,0 +1,44 @@
#!/bin/bash
export PATH=/home/postgres/psql14/bin:$PATH
export PGDATABASE=postgres
export PGHOST=127.0.0.1
export PGUSER=postgres
export PGPORT=15433
export PGPASSWORD=123456
slaveDataDir=/home/postgres/slavedata/pgdata
slavepushDataDir=/home/postgres/slavepushdata/pgdata
sed -i 's/^he3share/#he3share/g' $slaveDataDir/postgresql.auto.conf
sed -i 's/^he3share/#he3share/g' $slaveDataDir/postgresql.conf
sed -i 's/^hot_standby/#hot_standby/g' $slaveDataDir/postgresql.conf
sed -i 's/^primary_conninfo/#primary_conninfo/g' $slaveDataDir/postgresql.auto.conf
sed -i 's/^primary_conninfo/#primary_conninfo/g' $slaveDataDir/postgresql.conf
echo -e "he3share = on" >> $slaveDataDir/postgresql.conf
echo -e "hot_standby=off" >> $slaveDataDir/postgresql.conf
sed -i 's/^mpush/#mpush/g' $slavepushDataDir/postgresql.conf
sed -i 's/^mpush/#mpush/g' $slavepushDataDir/postgresql.auto.conf
echo -e "mpush=on" >> $slavepushDataDir/postgresql.conf
psql -c 'SELECT pg_promote(true, 30)'
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB slave instance promote failed!"
exit 1
fi
pg_ctl -D $slaveDataDir reload
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB slave instance reload failed!"
exit 1
fi
pg_ctl -D $slavepushDataDir reload
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB push instance reload failed!"
exit 1
fi

View File

@ -0,0 +1,75 @@
#!/bin/bash
export PATH=/home/postgres/psql14/bin:$PATH
primaryDataDir=/home/postgres/primary/pgdata
primaryImdbPageDirectory=/tmp/primarypagedb
primaryImdbWalDirectory=/tmp/primarywaldb
primaryLogfile=/home/postgres/primarylogfile
primaryPort=15432
primaryConninfo='application_name=pushstandby user=repl password=123456 host=127.0.0.1 port=15432 sslmode=disable sslcompression=0 gssencmode=disable target_session_attrs=any'
pushDataDir=/home/postgres/push/pgdata
pushImdbPageDirectory=/tmp/pushpagedb
pushImdbWalDirectory=/tmp/pushwaldb
pushLogfile=/home/postgres/pushlogfile
if [ ! -d "$primaryDataDir" ]; then
echo "$primaryDataDir does not exist!"
exit 1
fi
if [ "`ls -A $primaryDataDir`" != "" ]; then
echo "$primaryDataDir is not enpty!"
exit 1
fi
pg_ctl -D $pushDataDir -l $pushLogfile stop
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB stop failed!"
exit 1
fi
sed -i 's/^primary_conninfo/#primary_conninfo/g' $pushDataDir/postgresql.auto.conf
sed -i 's/^primary_conninfo/#primary_conninfo/g' $pushDataDir/postgresql.conf
sed -i 's/^he3mirror/#he3mirror/g' $pushDataDir/postgresql.conf
rsync -av --exclude base --exclude global --exclude standby.signal --exclude backup_label.old --exclude backup_manifest $pushDataDir/* $primaryDataDir/
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): sync data file failed!"
exit 1
fi
ln -s $pushDataDir/base $primaryDataDir/base
ln -s $pushDataDir/global $primaryDataDir/global
echo -e "primary_conninfo = '$primaryConninfo'" >> $pushDataDir/postgresql.conf
echo -e "he3mirror=false" >> $pushDataDir/postgresql.conf
sed -i 's/^push_standby/#push_standby/g' $primaryDataDir/postgresql.conf
sed -i 's/^hot_standby/#hot_standby/g' $primaryDataDir/postgresql.conf
sed -i 's/^port/#port/g' $primaryDataDir/postgresql.conf
sed -i 's/^lmdb_page_directory/#lmdb_page_directory/g' $primaryDataDir/postgresql.conf
sed -i 's/^lmdb_wal_directory/#lmdb_wal_directory/g' $primaryDataDir/postgresql.conf
echo -e "push_standby=off" >> $primaryDataDir/postgresql.conf
echo -e "hot_standby=off" >> $primaryDataDir/postgresql.conf
echo -e "port=$primaryPort" >> $primaryDataDir/postgresql.conf
echo -e "he3mirror=false" >> $primaryDataDir/postgresql.conf
echo -e "lmdb_page_directory='$primaryImdbPageDirectory'" >> $primaryDataDir/postgresql.conf
echo -e "lmdb_wal_directory='$primaryImdbWalDirectory'" >> $primaryDataDir/postgresql.conf
rm -rf $primaryImdbPageDirectory $primaryImdbWalDirectory $pushImdbPageDirectory $pushImdbWalDirectory
pg_ctl -D $primaryDataDir -l $primaryLogfile start
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB primary instance start failed!"
exit 1
fi
pg_ctl -D $pushDataDir -l $pushLogfile start
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB push instance start failed!"
exit 1
fi

View File

@ -0,0 +1,76 @@
#!/bin/bash
export PATH=/home/postgres/psql14/bin:$PATH
primaryDataDir=/home/postgres/primary/pgdata
primaryImdbPageDirectory=/tmp/primarypagedb
primaryImdbWalDirectory=/tmp/primarywaldb
primaryLogfile=/home/postgres/primarylogfile
primaryPort=15432
primaryConninfo='application_name=pushstandby user=repl password=123456 host=127.0.0.1 port=15432 sslmode=disable sslcompression=0 gssencmode=disable target_session_attrs=any'
pushDataDir=/home/postgres/push/pgdata
pushImdbPageDirectory=/tmp/pushpagedb
pushImdbWalDirectory=/tmp/pushwaldb
pushLogfile=/home/postgres/pushlogfile
if [ ! -d "$primaryDataDir" ]; then
echo "$primaryDataDir does not exist!"
exit 1
fi
if [ "`ls -A $primaryDataDir`" != "" ]; then
echo "$primaryDataDir is not enpty!"
exit 1
fi
pg_ctl -D $pushDataDir -l $pushLogfile stop
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB stop failed!"
exit 1
fi
sed -i 's/^primary_conninfo/#primary_conninfo/g' $pushDataDir/postgresql.auto.conf
sed -i 's/^primary_conninfo/#primary_conninfo/g' $pushDataDir/postgresql.conf
sed -i 's/^he3mirror/#he3mirror/g' $pushDataDir/postgresql.conf
rsync -av --exclude base --exclude global --exclude standby.signal --exclude backup_label.old --exclude backup_manifest $pushDataDir/* $primaryDataDir/
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): sync data file failed!"
exit 1
fi
ln -s $pushDataDir/base $primaryDataDir/base
ln -s $pushDataDir/global $primaryDataDir/global
echo -e "primary_conninfo = '$primaryConninfo'" >> $pushDataDir/postgresql.conf
echo -e "he3mirror=false" >> $pushDataDir/postgresql.conf
echo -e "mpush=on" >> $pushDataDir/postgresql.conf
sed -i 's/^push_standby/#push_standby/g' $primaryDataDir/postgresql.conf
sed -i 's/^hot_standby/#hot_standby/g' $primaryDataDir/postgresql.conf
sed -i 's/^port/#port/g' $primaryDataDir/postgresql.conf
sed -i 's/^lmdb_page_directory/#lmdb_page_directory/g' $primaryDataDir/postgresql.conf
sed -i 's/^lmdb_wal_directory/#lmdb_wal_directory/g' $primaryDataDir/postgresql.conf
echo -e "push_standby=off" >> $primaryDataDir/postgresql.conf
echo -e "hot_standby=off" >> $primaryDataDir/postgresql.conf
echo -e "port=$primaryPort" >> $primaryDataDir/postgresql.conf
echo -e "he3mirror=false" >> $primaryDataDir/postgresql.conf
echo -e "lmdb_page_directory='$primaryImdbPageDirectory'" >> $primaryDataDir/postgresql.conf
echo -e "lmdb_wal_directory='$primaryImdbWalDirectory'" >> $primaryDataDir/postgresql.conf
rm -rf $primaryImdbPageDirectory $primaryImdbWalDirectory $pushImdbPageDirectory $pushImdbWalDirectory
pg_ctl -D $primaryDataDir -l $primaryLogfile start
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB primary instance start failed!"
exit 1
fi
pg_ctl -D $pushDataDir -l $pushLogfile start
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB push instance start failed!"
exit 1
fi

View File

@ -0,0 +1,115 @@
#!/bin/bash
export PATH=/home/postgres/psql14/bin:$PATH
export PGPASSWORD=123456
bakDataDir=/home/postgres/bak1
bakInstancePort=15432
bakInstanceUser=repl
bakInstanceHost=127.0.0.1
slaveDataDir=/home/postgres/slavedata2/pgdata
slaveImdbPageDirectory=/tmp/slave2pagedb
slaveImdbWalDirectory=/tmp/slave2waldb
slaveLogfile=/home/postgres/slave2logfile
slavePort=15433
slaveConninfo='application_name=pushstandby2 user=repl password=123456 host=127.0.0.1 port=15433 sslmode=disable sslcompression=0 gssencmode=disable target_session_attrs=any'
pushDataDir=/home/postgres/pushdata2/pgdata
pushImdbPageDirectory=/tmp/push2pagedb
pushImdbWalDirectory=/tmp/push2waldb
pushLogfile=/home/postgres/push2logfile
pushPort=15434
if [ ! -d "$bakDataDir" ]; then
echo "$bakDataDir does not exist!"
exit 1
fi
if [ "`ls -A $bakDataDir`" != "" ]; then
echo "$bakDataDir is not enpty!"
exit 1
fi
if [ ! -d "$slaveDataDir" ]; then
echo "$slaveDataDir does not exist!"
exit 1
fi
if [ "`ls -A $slaveDataDir`" != "" ]; then
echo "$slaveDataDir is not enpty!"
exit 1
fi
if [ ! -d "$pushDataDir" ]; then
echo "$pushDataDir does not exist!"
exit 1
fi
if [ "`ls -A $pushDataDir`" != "" ]; then
echo "$pushDataDir is not enpty!"
exit 1
fi
pg_basebackup -F p --progress -X none -h $bakInstanceHost -p $bakInstancePort -U $bakInstanceUser -v -D $bakDataDir
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB pg_basebackup failed!"
exit 1
fi
rsync -av $bakDataDir/* $pushDataDir/
sed -i 's/^push_standby/#push_standby/g' $pushDataDir/postgresql.conf
sed -i 's/^port/#port/g' $pushDataDir/postgresql.conf
sed -i 's/^lmdb_page_directory/#lmdb_page_directory/g' $pushDataDir/postgresql.conf
sed -i 's/^lmdb_wal_directory/#lmdb_wal_directory/g' $pushDataDir/postgresql.conf
sed -i 's/^he3share/#he3share/g' $pushDataDir/postgresql.conf
sed -i 's/^mpush/#mpush/g' $pushDataDir/postgresql.conf
echo -e "he3_point_in_time_recovery = on" >> $pushDataDir/postgresql.auto.conf
rsync -av --exclude base --exclude global $pushDataDir/* $slaveDataDir/
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): sync data file failed!"
exit 1
fi
ln -s $pushDataDir/base $slaveDataDir/base
ln -s $pushDataDir/global $slaveDataDir/global
echo -e "push_standby=off" >> $slaveDataDir/postgresql.conf
echo -e "port=$slavePort" >> $slaveDataDir/postgresql.conf
echo -e "lmdb_page_directory='$slaveImdbPageDirectory'" >> $slaveDataDir/postgresql.conf
echo -e "lmdb_wal_directory='$slaveImdbWalDirectory'" >> $slaveDataDir/postgresql.conf
echo -e "he3share=off" >> $slaveDataDir/postgresql.conf
sed -i 's/^primary_conninfo/#primary_conninfo/g' $pushDataDir/postgresql.auto.conf
sed -i 's/^primary_conninfo/#primary_conninfo/g' $pushDataDir/postgresql.conf
echo -e "primary_conninfo = '$slaveConninfo'" >> $pushDataDir/postgresql.conf
echo -e "push_standby=on" >> $pushDataDir/postgresql.conf
echo -e "port=$pushPort" >> $pushDataDir/postgresql.conf
echo -e "lmdb_page_directory='$pushImdbPageDirectory'" >> $pushDataDir/postgresql.conf
echo -e "lmdb_wal_directory='$pushImdbWalDirectory'" >> $pushDataDir/postgresql.conf
rm -rf $slaveImdbPageDirectory $slaveImdbWalDirectory $pushImdbPageDirectory $pushImdbWalDirectory
chmod 0750 $slaveDataDir -R
chmod 0750 $pushDataDir -R
pg_ctl -D $slaveDataDir -l $slaveLogfile start
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB slave instance start failed!"
exit 1
fi
pg_ctl -D $pushDataDir -l $pushLogfile start
if [ $? -ne 0 ]
then
echo "$(date "+%F %T"): He3DB push instance start failed!"
exit 1
fi
sed -i 's/^he3_point_in_time_recovery/#he3_point_in_time_recovery/g' $slaveDataDir/postgresql.auto.conf
sed -i 's/^he3_point_in_time_recovery/#he3_point_in_time_recovery/g' $pushDataDir/postgresql.auto.conf

View File

@ -120,10 +120,10 @@ $(top_builddir)/src/port/libpgport_srv.a: | submake-libpgport
# The postgres.o target is needed by the rule in Makefile.global that
# creates the exports file when MAKE_EXPORTS = true.
LIBS += $(libpq)
libredis = -L$(top_builddir)/src/backend/access/transam/ -lhiredis
LIBS += $(libredis)
libfs = -L$(top_builddir)/src/backend/storage/file/ -lfs -lz -lpthread -lm
LIBS += $(libfs)
librust_log = -L$(top_builddir)/src/backend/storage/file/ -lrust_log -lstdc++ -lm -ldl -lpthread -lfuse3 -Wl,-gc-section
LIBS += $(librust_log)
libglib = -L/usr/lib/x86_64-linux-gnu/ -lglib-2.0 -I/usr/include/glib-2.0/ -I/usr/lib/x86_64-linux-gnu/glib-2.0/include/ -lpthread -llmdb
LIBS += $(libglib)
postgres.o: $(OBJS)
$(CC) $(LDREL) $(call expand_subsys,$^) $(LDFLAGS) $(LDFLAGS_EX) $(LIBS) -o $@

View File

@ -211,9 +211,6 @@ brin_xlog_update(XLogReaderState *record)
XLogRecPtr lsn = record->EndRecPtr;
xl_brin_update *xlrec = (xl_brin_update *) XLogRecGetData(record);
Buffer buffer;
RelFileNode rnode;
BlockNumber blkno;
ForkNumber forknum;
XLogRedoAction action;
//XLogRecGetBlockTag(record, 0, &rnode, &forknum, &blkno);
@ -504,59 +501,10 @@ he3_brin_xlog_desummarize_page(XLogReaderState *record)
}
}
static void
brinRedoCommon(XLogReaderState *record){
XLogRecPtr lsn = record->currRecPtr; //最终要推进到的lsn
Buffer buffer; //lsn要修改的页的buffer
uint8 blockNum = (uint8) XLogRecGetBlockNum(record);
RelFileNode rnode;
ForkNumber forkNum;
BlockNumber blkno;
Page page;
XLogRecPtr procLsn; //从page的lsn到最终要推进到的lsn中间的lsn
char *errormsg;
/*
* lsn所要修改的page
*/
XLogRecGetBlockTag(record, blockNum, &rnode, &forkNum, &blkno);
buffer = XLogReadBufferExtended(rnode, forkNum, blkno,RBM_NORMAL);
page = BufferGetPage(buffer);
procLsn = PageGetLSN(page); //获取页的lsn
XLogBeginRead(record, procLsn);
while (procLsn < lsn) {
BlockNumber tmpBlockNo; //页面的lsn+1的lsn修改的页面的页号
uint8 tmpBlockId; //页面的lsn+1的lsn的block_id
//定位到page的lsn
XLogReadRecord(record, &errormsg);
tmpBlockId = (uint8) XLogRecGetBlockNum(record);
tmpBlockNo = record->blocks[tmpBlockId].blkno;
//page的lsn后的第一个lsn是不是还是修改这个page
if (tmpBlockNo != blkno) {
procLsn = record->EndRecPtr;
continue;
}else{
brin_redo(record);
}
}
}
void
brin_redo(XLogReaderState *record)
{
if (data_buffer_for_replay(record) == false) {
return;
}
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
switch (info & XLOG_BRIN_OPMASK)

View File

@ -972,7 +972,6 @@ ginRedoDeleteListPages(XLogReaderState *record)
Buffer metabuffer;
Page metapage;
int i;
uint8 blocknum = XLogRecGetBlockNum(record);
metabuffer = XLogInitBufferForRedo(record, 0);
Assert(BufferGetBlockNumber(metabuffer) == GIN_METAPAGE_BLKNO);
metapage = BufferGetPage(metabuffer);
@ -1052,10 +1051,6 @@ he3GinRedoDeleteListPages(XLogReaderState *record)
void
gin_redo(XLogReaderState *record)
{
if (data_buffer_for_replay(record) == false) {
return;
}
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
MemoryContext oldCtx;

View File

@ -276,7 +276,6 @@ he3gistRedoPageSplitRecord(XLogReaderState *record)
XLogRecPtr lsn = record->EndRecPtr;
gistxlogPageSplit *xldata = (gistxlogPageSplit *) XLogRecGetData(record);
Buffer firstbuffer = InvalidBuffer;
Buffer buffer;
Page page;
bool isrootsplit = false;
@ -319,7 +318,7 @@ he3gistRedoPageSplitRecord(XLogReaderState *record)
nextblkno = decodeNextBlockNumber(data, datalen);
} else {
offset = sizeof(Buffer);
firstbuffer = decodeFirstBuffer(data,datalen);
decodeFirstBuffer(data,datalen);
}
tuples = decodePageSplitRecord(data+offset, datalen-offset, &num);
@ -427,10 +426,6 @@ gistRedoPageReuse(XLogReaderState *record)
void
gist_redo(XLogReaderState *record)
{
if (data_buffer_for_replay(record) == false) {
return;
}
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
MemoryContext oldCxt;

View File

@ -34,7 +34,7 @@ hash_xlog_init_meta_page(XLogReaderState *record)
XLogRecPtr lsn = record->EndRecPtr;
Page page;
Buffer metabuf;
ForkNumber forknum;
// ForkNumber forknum;
xl_hash_init_meta_page *xlrec = (xl_hash_init_meta_page *) XLogRecGetData(record);
@ -53,9 +53,9 @@ hash_xlog_init_meta_page(XLogReaderState *record)
* special handling for init forks as create index operations don't log a
* full page image of the metapage.
*/
XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
if (forknum == INIT_FORKNUM)
FlushOneBuffer(metabuf);
// XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
// if (forknum == INIT_FORKNUM)
// FlushOneBuffer(metabuf);
/* all done */
UnlockReleaseBuffer(metabuf);
@ -97,8 +97,8 @@ he3hash_xlog_init_bitmap_page(XLogReaderState *record)
* full page image of the metapage.
*/
XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
if (forknum == INIT_FORKNUM)
FlushOneBuffer(bitmapbuf);
// if (forknum == INIT_FORKNUM)
// FlushOneBuffer(bitmapbuf);
UnlockReleaseBuffer(bitmapbuf);
break;
}
@ -122,9 +122,9 @@ he3hash_xlog_init_bitmap_page(XLogReaderState *record)
PageSetLSN(page, lsn);
MarkBufferDirty(metabuf);
XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
if (forknum == INIT_FORKNUM)
FlushOneBuffer(metabuf);
// XLogRecGetBlockTag(record, 0, NULL, &forknum, NULL);
// if (forknum == INIT_FORKNUM)
// FlushOneBuffer(metabuf);
}
if (BufferIsValid(metabuf))
UnlockReleaseBuffer(metabuf);
@ -408,8 +408,6 @@ he3hash_xlog_split_allocate_page(XLogReaderState *record)
newbuf = XLogInitBufferForRedo(record, 0);
_hash_initbuf(newbuf, xlrec->new_bucket, xlrec->new_bucket,
xlrec->new_bucket_flag, true);
if (!IsBufferCleanupOK(newbuf))
elog(PANIC, "hash_xlog_split_allocate_page: failed to acquire cleanup lock");
MarkBufferDirty(newbuf);
PageSetLSN(BufferGetPage(newbuf), lsn);
@ -749,6 +747,10 @@ he3hash_xlog_squeeze_page(XLogReaderState *record)
* is to ensure a cleanup lock on primary bucket page.
*/
(void) XLogReadBufferForRedoExtended(record, 0, mode, true, &bucketbuf);
Page writepage;
writepage = (Page) BufferGetPage(bucketbuf);
PageSetLSN(writepage, lsn);
MarkBufferDirty(bucketbuf);
}
if (mode != RBM_NORMAL_VALID && BufferIsValid(bucketbuf))
UnlockReleaseBuffer(bucketbuf);
@ -765,6 +767,7 @@ he3hash_xlog_squeeze_page(XLogReaderState *record)
action = XLogReadBufferForRedoExtended(record, 0, mode, true, &writebuf);
else
{
mode = RBM_NORMAL;
action = XLogReadBufferForRedo(record, 0, &writebuf);
}
@ -962,7 +965,6 @@ he3hash_xlog_delete(XLogReaderState *record)
Buffer deletebuf = InvalidBuffer;
Page page;
XLogRedoAction action;
Buffer buffer = InvalidBuffer;
RelFileNode rnode;
BlockNumber blkno;
ForkNumber forknum;
@ -1007,6 +1009,7 @@ he3hash_xlog_delete(XLogReaderState *record)
action = XLogReadBufferForRedoExtended(record, 0, mode, true, &deletebuf);
else
{
mode = RBM_NORMAL;
action = XLogReadBufferForRedo(record, 0, &deletebuf);
}
/* replay the record for deleting entries in bucket page */
@ -1209,10 +1212,6 @@ he3hash_xlog_vacuum_one_page(XLogReaderState *record)
void
hash_redo(XLogReaderState *record)
{
if (data_buffer_for_replay(record) == false) {
return;
}
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
switch (info)

View File

@ -6065,7 +6065,7 @@ heap_inplace_update(Relation relation, HeapTuple tuple)
/* inplace updates aren't decoded atm, don't log the origin */
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
XLogFlush(recptr);
// XLogFlush(recptr);
PageSetLSN(page, recptr);
}
@ -8491,6 +8491,11 @@ heap_xlog_prune(XLogReaderState *record)
}
if (mode != RBM_NORMAL_VALID && BufferIsValid(buffer))
{
UnlockReleaseBuffer(buffer);
}
/*if (mode != RBM_NORMAL_VALID && BufferIsValid(buffer))
{
Size freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
@ -8505,8 +8510,8 @@ heap_xlog_prune(XLogReaderState *record)
* Do this regardless of a full-page image being applied, since the
* FSM data is not in the page anyway.
*/
XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
}
/*XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
}*/
}
/*
@ -8560,6 +8565,11 @@ heap_xlog_vacuum(XLogReaderState *record)
}
if (BufferIsValid(buffer))
{
UnlockReleaseBuffer(buffer);
}
/*if (BufferIsValid(buffer))
{
Size freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
RelFileNode rnode;
@ -8577,8 +8587,8 @@ heap_xlog_vacuum(XLogReaderState *record)
* Do this regardless of a full-page image being applied, since the
* FSM data is not in the page anyway.
*/
XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
}
/*XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
}*/
}
/*
@ -8642,6 +8652,8 @@ heap_xlog_visible(XLogReaderState *record)
PageSetAllVisible(page);
MarkBufferDirty(buffer);
PageSetLSN(page, lsn);
}
else if (action == BLK_RESTORED)
{
@ -8653,6 +8665,11 @@ heap_xlog_visible(XLogReaderState *record)
}
if (BufferIsValid(buffer))
{
UnlockReleaseBuffer(buffer);
}
/*if (BufferIsValid(buffer))
{
Size space = PageGetFreeSpace(BufferGetPage(buffer));
@ -8675,9 +8692,9 @@ heap_xlog_visible(XLogReaderState *record)
* Do this regardless of a full-page image being applied, since the
* FSM data is not in the page anyway.
*/
if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
/*if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
XLogRecordPageWithFreeSpace(rnode, blkno, space);
}
}*/
/*
* Even if we skipped the heap page update due to the LSN interlock, it's
@ -8842,6 +8859,11 @@ he3_heap_xlog_visible(XLogReaderState *record)
}
if (BufferIsValid(buffer))
{
UnlockReleaseBuffer(buffer);
}
/*if (BufferIsValid(buffer))
{
Size space = PageGetFreeSpace(BufferGetPage(buffer));
@ -8864,18 +8886,11 @@ he3_heap_xlog_visible(XLogReaderState *record)
* Do this regardless of a full-page image being applied, since the
* FSM data is not in the page anyway.
*/
if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
/*if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
XLogRecordPageWithFreeSpace(rnode, blkno, space);
}
}*/
break;
}
}
}
@ -9156,8 +9171,8 @@ heap_xlog_insert(XLogReaderState *record)
* don't bother to update the FSM in that case, it doesn't need to be
* totally accurate anyway.
*/
if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(target_node, blkno, freespace);
// if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
// XLogRecordPageWithFreeSpace(target_node, blkno, freespace);
}
/*
@ -9312,8 +9327,8 @@ heap_xlog_multi_insert(XLogReaderState *record)
* don't bother to update the FSM in that case, it doesn't need to be
* totally accurate anyway.
*/
if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
// if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
// XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
}
/*
@ -9592,8 +9607,8 @@ heap_xlog_update(XLogReaderState *record, bool hot_update)
* don't bother to update the FSM in that case, it doesn't need to be
* totally accurate anyway.
*/
if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(rnode, newblk, freespace);
// if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
// XLogRecordPageWithFreeSpace(rnode, newblk, freespace);
break;
}
@ -9886,10 +9901,6 @@ heap_xlog_inplace(XLogReaderState *record)
void
heap_redo(XLogReaderState *record)
{
if (data_buffer_for_replay(record) == false) {
return;
}
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
/*
@ -9936,10 +9947,6 @@ heap_redo(XLogReaderState *record)
void
heap2_redo(XLogReaderState *record)
{
if (data_buffer_for_replay(record) == false) {
return;
}
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
switch (info & XLOG_HEAP_OPMASK)

View File

@ -91,6 +91,7 @@
#include "access/xlog.h"
#include "miscadmin.h"
#include "port/pg_bitutils.h"
#include "postmaster/secondbuffer.h"
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
#include "storage/smgr.h"
@ -254,8 +255,8 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
#endif
Assert(InRecovery || XLogRecPtrIsInvalid(recptr));
Assert(InRecovery || BufferIsValid(heapBuf));
//Assert(InRecovery || XLogRecPtrIsInvalid(recptr));
//Assert(InRecovery || BufferIsValid(heapBuf));
Assert(flags & VISIBILITYMAP_VALID_BITS);
/* Check that we have the right heap page pinned, if present */
@ -656,6 +657,18 @@ vm_extend(Relation rel, BlockNumber vm_nblocks)
smgrextend(rel->rd_smgr, VISIBILITYMAP_FORKNUM, vm_nblocks_now,
pg.data, false);
if (!(InitdbSingle || IsBootstrapProcessingMode() == true) && !push_standby && !he3mirror)
{
PageKey pageKey;
pageKey.relfileNode.dbNode = rel->rd_smgr->smgr_rnode.node.dbNode;
pageKey.relfileNode.relNode = rel->rd_smgr->smgr_rnode.node.relNode;
pageKey.blkNo = vm_nblocks_now;
pageKey.forkNo = VISIBILITYMAP_FORKNUM;
pageKey.pageLsn = 0;
ReceivePageFromDataBuffer(&pageKey, (uint8_t *) pg.data);
}
vm_nblocks_now++;
}

View File

@ -681,8 +681,9 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno)
else
{
/* overwriting a block we zero-filled before */
smgrwrite(wstate->index->rd_smgr, MAIN_FORKNUM, blkno,
(char *) page, true);
XLogRecPtr lsn = PageGetLSN(page);
he3dbsmgrwrite(wstate->index->rd_smgr, MAIN_FORKNUM, blkno,
(char *) page, true, lsn);
}
pfree(page);

View File

@ -1505,9 +1505,6 @@ spgRedoVacuumRedirect(XLogReaderState *record)
void
spg_redo(XLogReaderState *record)
{
if (data_buffer_for_replay(record) == false) {
return;
}
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
MemoryContext oldCxt;

View File

@ -11,7 +11,7 @@
subdir = src/backend/access/transam
top_builddir = ../../../..
include $(top_builddir)/src/Makefile.global
override CPPFLAGS := -I$(srcdir) -I$(libpq_srcdir) -lhiredis $(CPPFLAGS)
override CPPFLAGS := -I$(srcdir) -I/usr/include/glib-2.0/ -I/usr/lib/x86_64-linux-gnu/glib-2.0/include/ -L/usr/lib/x86_64-linux-gnu/ -lglib-2.0 -lpthread -I$(libpq_srcdir) $(CPPFLAGS)
OBJS = \
clog.o \
@ -34,7 +34,11 @@ OBJS = \
xlogfuncs.o \
xloginsert.o \
xlogreader.o \
xlogutils.o
xlogutils.o \
pagehashqueue.o \
ringbuffer.o \
pthreadpool.o \
pg_mirror.o
include $(top_srcdir)/src/backend/common.mk

View File

@ -477,9 +477,6 @@ applyPageRedo(Page page, const char *delta, Size deltaSize)
void
generic_redo(XLogReaderState *record)
{
if (data_buffer_for_replay(record) == false) {
return;
}
XLogRecPtr lsn = record->EndRecPtr;
Buffer buffers[MAX_GENERIC_XLOG_PAGES];
uint8 block_id;

View File

@ -0,0 +1,909 @@
#include "access/pagehashqueue.h"
#include <stdlib.h>
#include "utils/palloc.h"
#include "utils/hsearch.h"
#include "storage/pmsignal.h"
#include "miscadmin.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "postmaster/interrupt.h"
#include "libpq/pqsignal.h"
#include "storage/ipc.h"
#include "utils/wait_event.h"
#include "c.h"
#include "utils/ps_status.h"
#include "storage/procsignal.h"
#include "utils/memutils.h"
#include "postmaster/fork_process.h"
#include "postmaster/postmaster.h"
#include "storage/proc.h"
#include "access/pushpage.h"
#include "storage/buf_internals.h"
#include "utils/guc.h"
#include "storage/he3db_logindex.h"
#include "utils/hfs.h"
static void WakeupFlushWork(void);
XLogRecPtr *g_redoStartLsn;
static HTAB *PageLogindexHash = NULL;
static int MaxNum(int num) {
if (num <= 0) return 1;
if ((num & (num - 1)) == 0) return num;
num |= num >> 1;
num |= num >> 2;
num |= num >> 4;
num |= num >> 8;
num |= num >> 16;
return num + 1;
}
#define FREELISTBRUCKET 32
typedef struct FreelistManage {
slock_t mutex;
int curNum;
lsn_list_t*head;
}FreelistManage;
static FreelistManage* FreeList;
const int multiple = 1;
static Size freesize = 0;
#define FREELISTSIZE (freesize?freesize:(freesize = MaxNum((NBuffers + NUM_BUFFER_PARTITIONS) * multiple)))
static Size
LogindexFreeListShmemSize(void) {
Size size;
size = 0;
size = add_size(size, mul_size(FREELISTSIZE, sizeof(lsn_list_t)));
return size;
}
static Size
FreeListManageShmemSize(void) {
Size size;
size = 0;
size = add_size(size,sizeof(FreelistManage));
return size;
}
static Size LogindexFreeListAllShmemSize(void) {
Size size;
size = 0;
for (int i = 0; i < FREELISTBRUCKET;i++) {
size = add_size(size, LogindexFreeListShmemSize());
size = add_size(size, FreeListManageShmemSize());
}
return size;
}
static Size LogindexHashShmemSize(void) {
return hash_estimate_size(NBuffers + NUM_BUFFER_PARTITIONS,sizeof(page_head_list_t));
}
Size LogindexHashAllShmemSize(void) {
Size size;
return LogindexFreeListAllShmemSize() + LogindexHashShmemSize();
}
static void
LogindexFreeListShmemInit(void)
{
Size size = LogindexFreeListAllShmemSize();
bool found;
FreeList = (FreelistManage*)
ShmemInitStruct("LogindexSpace",
size,
&found);
if (!found)
{
for (Size i = 0; i < FREELISTBRUCKET;i++) {
FreelistManage* FreePos = (FreelistManage*)(((char*)FreeList)+ i * (LogindexFreeListShmemSize()+FreeListManageShmemSize()));
lsn_list_t* begin = (lsn_list_t*)(((char*)FreePos) + FreeListManageShmemSize());
FreePos->head = begin;
FreePos->curNum = 0;
SpinLockInit(&FreePos->mutex);
int j = 0;
for (;j < FREELISTSIZE-1; j++) {
begin[j].next = &begin[j+1];
}
begin[j].next = NULL;
}
}
}
static FreelistManage* getFreeList(uint32 hashcode) {
uint32 idx = hashcode % FREELISTBRUCKET;
return (FreelistManage*)(((char*)FreeList) + ((Size)idx) * (LogindexFreeListShmemSize()+FreeListManageShmemSize()));
}
static int popLsnListElem(uint32 hashcode,lsn_list_t**data) {
FreelistManage* curFreelist = getFreeList(hashcode);
SpinLockAcquire(&curFreelist->mutex);
if (curFreelist->curNum == FREELISTSIZE) {
SpinLockRelease(&curFreelist->mutex);
return 0;
}
curFreelist->curNum++;
*data = curFreelist->head;
curFreelist->head = curFreelist->head->next;
SpinLockRelease(&curFreelist->mutex);
return 1;
}
static int pushLsnListElemArr(uint32 hashcode,lsn_list_t*head,lsn_list_t*tail,int num) {
FreelistManage* curFreelist = getFreeList(hashcode);
SpinLockAcquire(&curFreelist->mutex);
if (curFreelist->curNum == 0) {
SpinLockRelease(&curFreelist->mutex);
return 0;
}
curFreelist->curNum -= num;
tail->next = curFreelist->head;
curFreelist->head = head;
SpinLockRelease(&curFreelist->mutex);
return 1;
}
page_head_list_t*
PageLogindexInsert(BufferTag *tagPtr, uint32 hashcode, XLogRecPtr lsn,XLogRecPtr endlsn)
{
page_head_list_t *result;
bool found;
lsn_list_t *data = NULL;
int re;
re = popLsnListElem(hashcode,(void**)&data);
if (re == 0) {
return NULL;
}
data->lsn = lsn;
data->endlsn = endlsn;
data->next = NULL;
result = (page_head_list_t *)
hash_search_with_hash_value(PageLogindexHash,
(void *) tagPtr,
hashcode,
HASH_ENTER,
&found);
if (found && result->tail->lsn >= lsn) {
return result;
}
if (found) {
result->count++;
result->tail->next = data;
result->tail = data;
} else {
result->count = 1;
result->tail = data;
result->head = data;
}
return result;
}
void
PageLogindexDelete(BufferTag *tagPtr, uint32 hashcode,XLogRecPtr lsn)
{
page_head_list_t *result;
result = (page_head_list_t *)
hash_search_with_hash_value(PageLogindexHash,
(void *) tagPtr,
hashcode,
HASH_FIND,
NULL);
if (!result) {
return;
} else {
lsn_list_t* tail,*next;
next = result->head;
int delNum = 0;
while(next != NULL && next->lsn < lsn) {
delNum++;
tail = next;
next = tail->next;
result->count--;
}
if (delNum != 0) {
pushLsnListElemArr(hashcode,result->head,tail,delNum);
}
if (next == NULL) {
result = (page_head_list_t *)
hash_search_with_hash_value(PageLogindexHash,
(void *) tagPtr,
hashcode,
HASH_REMOVE,
NULL);
if (!result) /* shouldn't happen */
elog(ERROR, "PageLogindexHash hash table corrupted");
} else {
result->head = next;
}
}
}
uint32
PageLogindexHashCode(BufferTag *tagPtr)
{
return get_hash_value(PageLogindexHash, (void *) tagPtr);
}
void
InitLogindexHashBrucket(void)
{
HASHCTL info;
long init_table_size,
max_table_size;
info.keysize = sizeof(BufferTag);
info.entrysize = sizeof(page_head_list_t);
init_table_size = NBuffers + NUM_BUFFER_PARTITIONS;
max_table_size = NBuffers + NUM_BUFFER_PARTITIONS;
info.num_partitions = NUM_BUFFER_PARTITIONS;
PageLogindexHash = ShmemInitHash("PageLogindexHash",
init_table_size,
max_table_size,
&info,
HASH_ELEM | HASH_BLOBS| HASH_PARTITION);
LogindexFreeListShmemInit();
}
page_head_list_t *
PageLogindexLookup(BufferTag *tagPtr,uint32_t hashcode)
{
page_head_list_t *result;
result = (page_head_list_t *)
hash_search_with_hash_value(PageLogindexHash,
(void *) tagPtr,
hashcode,
HASH_FIND,
NULL);
return result;
}
void cleanOneList(BufferTag *tagPtr,XLogRecPtr cleanLsn) {
uint32 hashcode = PageLogindexHashCode(tagPtr);
LWLock *partition_lock = LOGIndexPartitionLock(hashcode);
LWLockAcquire(partition_lock, LW_EXCLUSIVE);
PageLogindexDelete(tagPtr,hashcode,cleanLsn);
LWLockRelease(partition_lock);
}
static void threadCleanLogIndex(XLogRecPtr cleanLsn)
{
HASH_SEQ_STATUS scan_status;
page_head_list_t *item;
hash_seq_init(&scan_status, PageLogindexHash);
while ((item = (page_head_list_t *) hash_seq_search(&scan_status)) != NULL)
{
uint32 hash = PageLogindexHashCode(&item->tag);
LWLock *partition_lock = LOGIndexPartitionLock(hash);
LWLockAcquire(partition_lock, LW_EXCLUSIVE);
PageLogindexDelete(&item->tag,hash,cleanLsn);
LWLockRelease(partition_lock);
}
}
static void
LogIndexProcShutdownHandler(SIGNAL_ARGS)
{
int save_errno = errno;
ShutdownRequestPending = true;
SetLatch(MyLatch);
errno = save_errno;
}
/*
* CleanLogIndexMain
*/
void
CleanLogIndexMain(int argc, char *argv[])
{
sigjmp_buf local_sigjmp_buf;
MyBackendType = B_CLEAN_LOGINDEX;
MemoryContext CleanLogIndex_context;
init_ps_display(NULL);
SetProcessingMode(InitProcessing);
/*
* Set up signal handlers. We operate on databases much like a regular
* backend, so we use the same signal handling. See equivalent code in
* tcop/postgres.c.
*/
pqsignal(SIGHUP, SIG_IGN);
/*
* SIGINT is used to signal canceling the current table's vacuum; SIGTERM
* means abort and exit cleanly, and SIGQUIT means abandon ship.
*/
pqsignal(SIGINT, SIG_IGN);
pqsignal(SIGTERM, LogIndexProcShutdownHandler);
/* SIGQUIT handler was already set up by InitPostmasterChild */
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, procsignal_sigusr1_handler);
pqsignal(SIGUSR2, SIG_IGN);
pqsignal(SIGCHLD, SIG_DFL);
/*
* Create a memory context that we will do all our work in. We do this so
* that we can reset the context during error recovery and thereby avoid
* possible memory leaks. Formerly this code just ran in
* TopMemoryContext, but resetting that would be a really bad idea.
*/
CleanLogIndex_context = AllocSetContextCreate(TopMemoryContext,
"CleanLogIndexFlush",
ALLOCSET_DEFAULT_SIZES);
MemoryContextSwitchTo(CleanLogIndex_context);
/*
* If an exception is encountered, processing resumes here.
*
* Unlike most auxiliary processes, we don't attempt to continue
* processing after an error; we just clean up and exit. The autovac
* launcher is responsible for spawning another worker later.
*
* Note that we use sigsetjmp(..., 1), so that the prevailing signal mask
* (to wit, BlockSig) will be restored when longjmp'ing to here. Thus,
* signals other than SIGQUIT will be blocked until we exit. It might
* seem that this policy makes the HOLD_INTERRUPTS() call redundant, but
* it is not since InterruptPending might be set already.
*/
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
{
/* since not using PG_TRY, must reset error stack by hand */
error_context_stack = NULL;
/* Prevents interrupts while cleaning up */
HOLD_INTERRUPTS();
/* Report the error to the server log */
EmitErrorReport();
/*
* We can now go away. Note that because we called InitProcess, a
* callback was registered to do ProcKill, which will clean up
* necessary state.
*/
proc_exit(0);
}
/* We can now handle ereport(ERROR) */
PG_exception_stack = &local_sigjmp_buf;
PG_SETMASK(&UnBlockSig);
char strname[128];
char *prefix = "clean logindex ";
int n = pg_snprintf(strname,sizeof(strname),prefix,strlen(prefix));
/*
* Loop forever
*/
SetProcessingMode(NormalProcessing);
XLogRecPtr pushStandbyPoint = 0;
XLogRecPtr pushStandbyPrePoint = 0;
for (;;)
{
/* Clear any already-pending wakeups */
ResetLatch(MyLatch);
if (ShutdownRequestPending)
proc_exit(0);
int hasData = 0;
pushStandbyPrePoint = pushStandbyPoint;
if (push_standby == true || EnableHotStandby == false || *isPromoteIsTriggered) {
pushStandbyPoint = GetXLogPushToDisk();
if (pushStandbyPrePoint < pushStandbyPoint) {
hasData++;
}
} else {
if (LastPushPoint == 0) {
PrevPushPoint = *g_redoStartLsn;
}
if (PrevPushPoint != 0) {
XLogRecPtr lastReplPtr = GetXLogReplayRecPtr(NULL);
// elog(LOG, "deal page from %x to %x", PrevPushPoint, lastReplPtr);
TagNode *tagList = GetBufTagByLsnRange(PrevPushPoint,lastReplPtr-1);
if (tagList->next != NULL && tagList->tag.lsn >= PrevPushPoint) {
LastPushPoint = tagList->tag.lsn;
TagNode *next = tagList->next;
int pageNum = 0;
while(next!=NULL) {
// elog(LOG,"add tag rel %d, fork %d, blk %d",
// next->tag.tag.rnode.relNode, next->tag.tag.forkNum, next->tag.tag.blockNum);
addFileKey(&next->tag.tag);
next = next->next;
pageNum++;
}
FreeTagNode(tagList);
pushSlaveReplayQueue(pageNum);
hasData++;
PrevPushPoint = LastPushPoint+1;
SetXLogPushToDisk(PrevPushPoint);
pushStandbyPoint = GetConsistLsn(PrevPushPoint);
} else {
LastPushPoint = PrevPushPoint = lastReplPtr;
if (pushStandbyPrePoint < PrevPushPoint) {
SetXLogPushToDisk(PrevPushPoint);
pushStandbyPoint = GetConsistLsn(PrevPushPoint+1);
}
}
}
}
int pos;
if (pushStandbyPrePoint < pushStandbyPoint) {
pos = pg_snprintf(strname+n,sizeof(strname)-n,"lsn from %X/%X to %X/%X tasking",LSN_FORMAT_ARGS(pushStandbyPrePoint),LSN_FORMAT_ARGS(pushStandbyPoint));
strname[n+pos] = '\0';
set_ps_display(strname);
}
if (pushStandbyPrePoint < pushStandbyPoint) {
elog(LOG,"start threadCleanLogIndex lsn from %X/%X to %X/%X",LSN_FORMAT_ARGS(pushStandbyPrePoint),LSN_FORMAT_ARGS(pushStandbyPoint));
CleanLogIndexByPage(pushStandbyPoint);
//threadCleanLogIndex(LastPushPoint);
elog(LOG,"end threadCleanLogIndex lsn from %X/%X to %X/%X",LSN_FORMAT_ARGS(pushStandbyPrePoint),LSN_FORMAT_ARGS(pushStandbyPoint));
}
if (hasData != 0) {
continue;
}
pos = pg_snprintf(strname+n,sizeof(strname)-n,"to lsn: %X/%X idle",LSN_FORMAT_ARGS(pushStandbyPoint));
strname[n+pos] = '\0';
set_ps_display(strname);
(void) WaitLatch(MyLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
50L /* convert to ms */ ,
WAIT_EVENT_CLEAN_LOGINDEX_MAIN);
}
}
void SignalStartCleanLogIndexWork(void) {
SendPostmasterSignal(PMSIGNAL_CLEAN_LOGINDEX_WORKER);
}
typedef struct PageValue {
BufferTag tag;
uint16_t num;
} PageValue;
static HTAB *PageCountHash = NULL;
static uint32_t curLatchPos = 0;
typedef struct {
slock_t mutex;
volatile uint32 gpushpos;
volatile bool ready;
volatile uint32 gpos;
pg_atomic_uint32 latchPos;
pg_atomic_uint32 taskNum;
uint32 modifyNum;
Latch pageFlushWakeupLatch[PARALLEL_NUM];
PageValue*gtag[G_QUEUE_LEN];
}PageHashQueueShmemStruct;
static PageHashQueueShmemStruct *PageHashQueueShmem;
void pushSlaveReplayQueue(int pageNum) {
if (PageHashQueueShmem->gpos != 0 && PageHashQueueShmem->ready == false) {
SpinLockAcquire(&PageHashQueueShmem->mutex);
PageHashQueueShmem->ready = true;
SpinLockRelease(&PageHashQueueShmem->mutex);
WakeupFlushWork();
}
while(pageNum > CompletedTaskNum()) {
pg_usleep(1000L);
}
cleanMap();
}
Latch* GetCurrentLatch(uint32_t pos) {
return &PageHashQueueShmem->pageFlushWakeupLatch[pos];
}
void WakeupOneFlushWork(uint32_t pos) {
SetLatch(&PageHashQueueShmem->pageFlushWakeupLatch[pos]);
}
static void WakeupFlushWork(void)
{
for (int i = 0;i<PARALLEL_NUM;i++) {
SetLatch(&PageHashQueueShmem->pageFlushWakeupLatch[i]);
}
}
uint32_t AssignLatchPos(void) {
return pg_atomic_fetch_add_u32(&PageHashQueueShmem->latchPos,1);
}
void ResetFlushLatch(uint32_t pos) {
ResetLatch(&PageHashQueueShmem->pageFlushWakeupLatch[pos]);
}
void OwnFlushLatch(uint32_t pos) {
OwnLatch(&PageHashQueueShmem->pageFlushWakeupLatch[pos]);
}
Size
PageHashQueueShmemSize(void)
{
Size size;
/*
* Currently, the size of the gtag[] array is arbitrarily set equal to
* NBuffers. This may prove too large or small ...
*/
size = offsetof(PageHashQueueShmemStruct, gtag);
size = add_size(size, mul_size(G_QUEUE_LEN, sizeof(PageValue*)));
return size;
}
void
PageHashQueueShmemInit(void)
{
Size size = PageHashQueueShmemSize();
bool found;
PageHashQueueShmem = (PageHashQueueShmemStruct *)
ShmemInitStruct("PageHashQueue",
size,
&found);
if (!found)
{
SpinLockInit(&PageHashQueueShmem->mutex);
SpinLockAcquire(&PageHashQueueShmem->mutex);
PageHashQueueShmem->ready = false;
PageHashQueueShmem->gpushpos = 0;
SpinLockRelease(&PageHashQueueShmem->mutex);
PageHashQueueShmem->gpos = 0;
pg_atomic_init_u32(&PageHashQueueShmem->taskNum,0);
pg_atomic_init_u32(&PageHashQueueShmem->latchPos, 0);
PageHashQueueShmem->modifyNum = 0;
for (int i = 0;i<PARALLEL_NUM;i++) {
InitSharedLatch(&PageHashQueueShmem->pageFlushWakeupLatch[i]);
}
}
}
static Size RedoStartPointSize(void) {
return sizeof(XLogRecPtr);
}
Size PageHashMapSize(void) {
return RedoStartPointSize() + hash_estimate_size(G_QUEUE_LEN,sizeof(PageValue));
}
void
InitBufferPoolHashMap(void)
{
HASHCTL info;
long init_table_size,
max_table_size;
info.keysize = sizeof(BufferTag);
info.entrysize = sizeof(PageValue);
init_table_size = G_QUEUE_LEN;
max_table_size = G_QUEUE_LEN;
PageCountHash = ShmemInitHash("PageHashCount",
init_table_size,
max_table_size,
&info,
HASH_ELEM | HASH_BLOBS);
bool found;
g_redoStartLsn = (XLogRecPtr*)ShmemInitStruct("redoStartPoint",
RedoStartPointSize(),
&found);
if (!found)
{
memset(g_redoStartLsn,0,RedoStartPointSize());
}
}
uint32_t addFileKey(BufferTag*onePage) {
PageValue *result;
bool found;
uint32_t newHash = get_hash_value(PageCountHash,onePage);
result = (PageValue*)
hash_search_with_hash_value(PageCountHash,
(void *) onePage,
newHash,
HASH_ENTER,
&found);
if (found == false) {
result->num = 0;
uint32_t gpos = PageHashQueueShmem->gpos++;
PageHashQueueShmem->gtag[gpos] = result;
}
result->num++;
PageHashQueueShmem->modifyNum++;
return PageHashQueueShmem->modifyNum;
}
void cleanMap(void) {
HASH_SEQ_STATUS scan_status;
PageValue *item;
hash_seq_init(&scan_status, PageCountHash);
while ((item = (PageValue *) hash_seq_search(&scan_status)) != NULL)
{
if (hash_search(PageCountHash, (const void *) &item->tag,
HASH_REMOVE, NULL) == NULL)
elog(ERROR, "hash table corrupted");
}
SpinLockAcquire(&PageHashQueueShmem->mutex);
PageHashQueueShmem->ready = false;
PageHashQueueShmem->gpushpos = 0;
SpinLockRelease(&PageHashQueueShmem->mutex);
PageHashQueueShmem->gpos = 0;
pg_atomic_init_u32(&PageHashQueueShmem->taskNum,0);
PageHashQueueShmem->modifyNum = 0;
}
uint32_t hashMapSize(void) {
return hash_get_num_entries(PageCountHash);
}
static int cmp(const void* a,const void* b) {
return (*((const PageValue**)b))->num - (*((const PageValue**)a))->num;
}
void SortPageQueue(void) {
if (PageHashQueueShmem->gpos != 0 && PageHashQueueShmem->ready == false) {
qsort(PageHashQueueShmem->gtag,PageHashQueueShmem->gpos,sizeof(PageValue*),cmp);
SpinLockAcquire(&PageHashQueueShmem->mutex);
PageHashQueueShmem->ready = true;
SpinLockRelease(&PageHashQueueShmem->mutex);
WakeupFlushWork();
return;
}
}
BufferTag* QueuePushPage(void) {
uint32_t gpushpos;
bool hasData = false;
if (PageHashQueueShmem->ready == true) {
SpinLockAcquire(&PageHashQueueShmem->mutex);
if (PageHashQueueShmem->ready == true && PageHashQueueShmem->gpushpos < PageHashQueueShmem->gpos) {
hasData = true;
gpushpos = PageHashQueueShmem->gpushpos++;
}
SpinLockRelease(&PageHashQueueShmem->mutex);
}
if (hasData == false) {
return NULL;
} else {
return &(PageHashQueueShmem->gtag[gpushpos]->tag);
}
}
void ProcFlushBufferToDisk(BufferTag*tag) {
Buffer buffer = XLogReadBufferExtended(tag->rnode, tag->forkNum, tag->blockNum,
RBM_NORMAL);
if (!BufferIsValid(buffer))
{
elog(PANIC,"ProcFlushBufferToDisk is invalid rel %d,flk %d,blk %d",tag->rnode.relNode,tag->forkNum,tag->blockNum);
pg_atomic_fetch_add_u32(&PageHashQueueShmem->taskNum,1);
return;
}
// elog(LOG, "replay rel %d, fork %d, blkno %d, pagelsn %X/%X", tag->rnode.relNode,
// tag->forkNum,tag->blockNum, LSN_FORMAT_ARGS(PageGetLSN(BufferGetPage(buffer))));
//slave no need to flush disk
if (push_standby == true) {
BufferDesc *buf;
buf = GetBufferDescriptor(buffer-1);
uint32 buf_state = pg_atomic_read_u32(&buf->state);
if (buf_state & BM_DIRTY) {
LWLockAcquire(BufferDescriptorGetContentLock(buf),
LW_SHARED);
FlushOneBuffer(buffer);
LWLockRelease(BufferDescriptorGetContentLock(buf));
ScheduleBufferTagForWriteback(&BackendWritebackContext,
&buf->tag);
}
}
ReleaseBuffer(buffer);
pg_atomic_fetch_add_u32(&PageHashQueueShmem->taskNum,1);
}
uint32_t CompletedTaskNum(void) {
return pg_atomic_read_u32(&PageHashQueueShmem->taskNum);
}
static void
ParallelFlushProcShutdownHandler(SIGNAL_ARGS)
{
int save_errno = errno;
ShutdownRequestPending = true;
WakeupOneFlushWork(curLatchPos);
errno = save_errno;
}
/*
* PageFlushWorkerMain
*/
NON_EXEC_STATIC void
PageFlushWorkerMain(int argc, char *argv[])
{
sigjmp_buf local_sigjmp_buf;
InRecovery = true;
MyBackendType = B_PARALLEL_FLUSH;
MemoryContext parallelflush_context;
init_ps_display(NULL);
SetProcessingMode(InitProcessing);
/*
* Set up signal handlers. We operate on databases much like a regular
* backend, so we use the same signal handling. See equivalent code in
* tcop/postgres.c.
*/
pqsignal(SIGHUP, SIG_IGN);
/*
* SIGINT is used to signal canceling the current table's vacuum; SIGTERM
* means abort and exit cleanly, and SIGQUIT means abandon ship.
*/
pqsignal(SIGINT, SIG_IGN);
pqsignal(SIGTERM, ParallelFlushProcShutdownHandler);
/* SIGQUIT handler was already set up by InitPostmasterChild */
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, procsignal_sigusr1_handler);
pqsignal(SIGUSR2, SIG_IGN);
pqsignal(SIGCHLD, SIG_DFL);
/*
* Create a memory context that we will do all our work in. We do this so
* that we can reset the context during error recovery and thereby avoid
* possible memory leaks. Formerly this code just ran in
* TopMemoryContext, but resetting that would be a really bad idea.
*/
parallelflush_context = AllocSetContextCreate(TopMemoryContext,
"ParallelFlush",
ALLOCSET_DEFAULT_SIZES);
MemoryContextSwitchTo(parallelflush_context);
/* Early initialization */
BaseInit();
/*
* Create a per-backend PGPROC struct in shared memory, except in the
* EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
* this before we can use LWLocks (and in the EXEC_BACKEND case we already
* had to do some stuff with LWLocks).
*/
#ifndef EXEC_BACKEND
InitProcess();
#endif
/*
* If an exception is encountered, processing resumes here.
*
* Unlike most auxiliary processes, we don't attempt to continue
* processing after an error; we just clean up and exit. The autovac
* launcher is responsible for spawning another worker later.
*
* Note that we use sigsetjmp(..., 1), so that the prevailing signal mask
* (to wit, BlockSig) will be restored when longjmp'ing to here. Thus,
* signals other than SIGQUIT will be blocked until we exit. It might
* seem that this policy makes the HOLD_INTERRUPTS() call redundant, but
* it is not since InterruptPending might be set already.
*/
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
{
/* since not using PG_TRY, must reset error stack by hand */
error_context_stack = NULL;
/* Prevents interrupts while cleaning up */
HOLD_INTERRUPTS();
/* Report the error to the server log */
EmitErrorReport();
/*
* We can now go away. Note that because we called InitProcess, a
* callback was registered to do ProcKill, which will clean up
* necessary state.
*/
proc_exit(0);
}
/* We can now handle ereport(ERROR) */
PG_exception_stack = &local_sigjmp_buf;
PG_SETMASK(&UnBlockSig);
curLatchPos = AssignLatchPos();
OwnFlushLatch(curLatchPos);
char strname[64];
char *prefix = "parallel flush workid: ";
int n = pg_snprintf(strname,sizeof(strname),prefix,strlen(prefix));
/*
* Loop forever
*/
for (;;)
{
/* Clear any already-pending wakeups */
ResetFlushLatch(curLatchPos);
if (ShutdownRequestPending)
proc_exit(0);
BufferTag *tag = NULL;
SetProcessingMode(NormalProcessing);
int pos = pg_snprintf(strname+n,sizeof(strname)-n,"%d tasking",curLatchPos);
strname[n+pos] = '\0';
set_ps_display(strname);
while((tag=QueuePushPage())!=NULL) {
ProcFlushBufferToDisk(tag);
}
pos = pg_snprintf(strname+n,sizeof(strname)-n,"%d idle",curLatchPos);
strname[n+pos] = '\0';
set_ps_display(strname);
(void) WaitLatch(GetCurrentLatch(curLatchPos),
WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
1000L /* convert to ms */ ,
WAIT_EVENT_PAGEFLUSH_MAIN);
}
}
void SignalStartFlushWork(void) {
SendPostmasterSignal(PMSIGNAL_PARALLEL_FLUSH_WORKER);
//sleep wait flush work startup
usleep(200000);
}
/*
* Main entry point for autovacuum worker process.
*
* This code is heavily based on pgarch.c, q.v.
*/
int
StartPageFlushWorker(void)
{
pid_t worker_pid;
#ifdef EXEC_BACKEND
switch ((worker_pid = avworker_forkexec()))
#else
switch ((worker_pid = fork_process()))
#endif
{
case -1:
ereport(LOG,
(errmsg("could not fork autovacuum worker process: %m")));
return 0;
#ifndef EXEC_BACKEND
case 0:
/* in postmaster child ... */
InitPostmasterChild();
/* Close the postmaster's sockets */
ClosePostmasterPorts(false);
CreateAuxProcessResourceOwner();
//MyPMChildSlot = AssignPostmasterChildSlot();
IsParallelFlushWorker = true;
PageFlushWorkerMain(0, NULL);
break;
#endif
default:
return (int) worker_pid;
}
/* shouldn't get here */
return 0;
}

View File

@ -0,0 +1,742 @@
#include "access/pg_mirror.h"
#include "postgres.h"
#include "access/xlogrecord.h"
#include "access/heapam_xlog.h"
#include "access/nbtxlog.h"
#include "access/gistxlog.h"
#include "access/spgxlog.h"
#include "access/brin_xlog.h"
#include "assert.h"
#include "common/controldata_utils.h"
#include "miscadmin.h"
#define INSERT_FREESPACE_MIRROR(endptr) \
(((endptr) % XLOG_BLCKSZ == 0) ? 0 : (XLOG_BLCKSZ - (endptr) % XLOG_BLCKSZ))
static ControlFileData *ControlFile = NULL;
//default 16MB
static int WalSegSz = 16777216;
//muti block to one record
typedef struct XLogHe3ToPg {
uint64 CurrBytePos;
uint64 PrevBytePos;
}XLogHe3ToPg;
static XLogHe3ToPg g_walHe3ToPg;
static void ReConvertMainData(XLogRecord* sRecord, char*sMainData, uint32_t*sLen, char* dMainData, uint32_t* dLen) {
RmgrId rmid = sRecord->xl_rmid;
uint8 info = (sRecord->xl_info & ~XLR_INFO_MASK);
bool hasChange = false;
switch(rmid) {
case RM_HEAP2_ID:
{
if ((info & XLOG_HEAP_OPMASK) == XLOG_HEAP2_VISIBLE) {
xl_heap_visible *xlrec = (xl_heap_visible *)sMainData;
xl_old_heap_visible xlrecOld;
xlrecOld.cutoff_xid = xlrec->cutoff_xid;
xlrecOld.flags = xlrec->flags;
*dLen = sizeof(xl_old_heap_visible);
memcpy(dMainData,&xlrecOld,*dLen);
hasChange = true;
}
break;
}
case RM_HEAP_ID:
{
if (((info & XLOG_HEAP_OPMASK) == XLOG_HEAP_UPDATE) ||
((info & XLOG_HEAP_OPMASK) == XLOG_HEAP_HOT_UPDATE)) {
xl_heap_update *xlrec = (xl_heap_update *)sMainData;
xl_old_heap_update xlrecOld;
xlrecOld.old_xmax = xlrec->old_xmax;
xlrecOld.old_offnum = xlrec->old_offnum;
xlrecOld.old_infobits_set = xlrec->old_infobits_set;
xlrecOld.flags = xlrec->flags;
xlrecOld.new_xmax = xlrec->new_xmax;
xlrecOld.new_offnum = xlrec->new_offnum;
*dLen = sizeof(xl_old_heap_update);
memcpy(dMainData,&xlrecOld,*dLen);
hasChange = true;
}
break;
}
case RM_BTREE_ID:
{
if (info == XLOG_BTREE_SPLIT_L || info == XLOG_BTREE_SPLIT_R) {
xl_btree_split *xlrec = (xl_btree_split *)sMainData;
xl_old_btree_split xlrecOld;
xlrecOld.level = xlrec->level;
xlrecOld.firstrightoff = xlrec->firstrightoff;
xlrecOld.newitemoff = xlrec->newitemoff;
xlrecOld.postingoff = xlrec->postingoff;
*dLen = sizeof(xl_old_btree_split);
memcpy(dMainData,&xlrecOld,*dLen);
hasChange = true;
}
break;
}
case RM_GIST_ID:
{
if (info == XLOG_GIST_PAGE_SPLIT) {
gistxlogPageSplit *xlrec = (gistxlogPageSplit *)sMainData;
gistoldxlogPageSplit xlrecOld;
xlrecOld.origrlink = xlrec->origrlink;
xlrecOld.orignsn = xlrec->orignsn;
xlrecOld.origleaf = xlrec->origleaf;
xlrecOld.npage = xlrec->npage;
xlrecOld.markfollowright = xlrec->markfollowright;
*dLen = sizeof(gistoldxlogPageSplit);
memcpy(dMainData,&xlrecOld,*dLen);
hasChange = true;
}
break;
}
case RM_SPGIST_ID:
{
if (info == XLOG_SPGIST_ADD_LEAF) {
spgxlogAddLeaf *xlrec = (spgxlogAddLeaf *)sMainData;
spgoldxlogAddLeaf xlrecOld;
xlrecOld.newPage = xlrec->newPage;
xlrecOld.storesNulls = xlrec->storesNulls;
xlrecOld.offnumLeaf = xlrec->offnumLeaf;
xlrecOld.offnumHeadLeaf = xlrec->offnumHeadLeaf;
xlrecOld.offnumParent = xlrec->offnumParent;
xlrecOld.nodeI = xlrec->nodeI;
*dLen = sizeof(spgoldxlogAddLeaf);
memcpy(dMainData,&xlrecOld,*dLen);
hasChange = true;
} else if (info == XLOG_SPGIST_MOVE_LEAFS) {
spgxlogMoveLeafs *xlrec = (spgxlogMoveLeafs *)sMainData;
spgoldxlogMoveLeafs xlrecOld;
xlrecOld.nMoves = xlrec->nMoves;
xlrecOld.newPage = xlrec->newPage;
xlrecOld.replaceDead = xlrec->replaceDead;
xlrecOld.storesNulls = xlrec->storesNulls;
xlrecOld.offnumParent = xlrec->offnumParent;
xlrecOld.nodeI = xlrec->nodeI;
xlrecOld.stateSrc = xlrec->stateSrc;
*dLen = SizeOfOldSpgxlogMoveLeafs;
memcpy(dMainData,&xlrecOld,*dLen);
memcpy(dMainData+*dLen,xlrec->offsets,*sLen-SizeOfSpgxlogMoveLeafs);
*dLen += *sLen-SizeOfSpgxlogMoveLeafs;
hasChange = true;
} else if (info == XLOG_SPGIST_ADD_NODE) {
spgxlogAddNode *xlrec = (spgxlogAddNode *)sMainData;
spgoldxlogAddNode xlrecOld;
xlrecOld.offnum = xlrec->offnum;
xlrecOld.offnumNew = xlrec->offnumNew;
xlrecOld.newPage = xlrec->newPage;
xlrecOld.parentBlk = xlrec->parentBlk;
xlrecOld.offnumParent = xlrec->offnumParent;
xlrecOld.nodeI = xlrec->nodeI;
xlrecOld.stateSrc = xlrec->stateSrc;
*dLen = sizeof(spgoldxlogAddNode);
memcpy(dMainData,&xlrecOld,*dLen);
hasChange = true;
} else if (info == XLOG_SPGIST_PICKSPLIT) {
spgxlogPickSplit *xlrec = (spgxlogPickSplit *)sMainData;
spgoldxlogPickSplit xlrecOld;
xlrecOld.isRootSplit = xlrec->isRootSplit;
xlrecOld.nDelete = xlrec->nDelete;
xlrecOld.nInsert = xlrec->nInsert;
xlrecOld.initSrc = xlrec->initSrc;
xlrecOld.initDest = xlrec->initDest;
xlrecOld.offnumInner = xlrec->offnumInner;
xlrecOld.initInner = xlrec->initInner;
xlrecOld.storesNulls = xlrec->storesNulls;
xlrecOld.innerIsParent = xlrec->innerIsParent;
xlrecOld.offnumParent = xlrec->offnumParent;
xlrecOld.nodeI = xlrec->nodeI;
xlrecOld.stateSrc = xlrec->stateSrc;
*dLen = SizeOfOldSpgxlogPickSplit;
memcpy(dMainData,&xlrecOld,*dLen);
memcpy(dMainData+*dLen,xlrec->offsets,*sLen-SizeOfSpgxlogPickSplit);
*dLen += *sLen-SizeOfSpgxlogPickSplit;
hasChange = true;
}
break;
}
case RM_BRIN_ID:
{
if (info == XLOG_BRIN_INSERT) {
xl_brin_insert *xlrec = (xl_brin_insert *)sMainData;
xl_old_brin_insert xlrecOld;
xlrecOld.heapBlk = xlrec->heapBlk;
/* extra information needed to update the revmap */
xlrecOld.pagesPerRange = xlrec->pagesPerRange;
xlrecOld.offnum = xlrec->offnum;
*dLen = sizeof(xl_old_brin_insert);
memcpy(dMainData,&xlrecOld,*dLen);
hasChange = true;
} else if ( info == XLOG_BRIN_UPDATE) {
xl_brin_update *xlrec = (xl_brin_update *) sMainData;
xl_old_brin_update xlrecUpdate;
xl_brin_insert *xlrecInsert = &xlrec->insert;
xl_old_brin_insert xlrecOld;
xlrecOld.heapBlk = xlrecInsert->heapBlk;
/* extra information needed to update the revmap */
xlrecOld.pagesPerRange = xlrecInsert->pagesPerRange;
xlrecOld.offnum = xlrecInsert->offnum;
/* offset number of old tuple on old page */
xlrecUpdate.oldOffnum = xlrec->oldOffnum;
xlrecUpdate.insert = xlrecOld;
*dLen = sizeof(xl_old_brin_update);
memcpy(dMainData,&xlrecUpdate,*dLen);
hasChange = true;
}
break;
}
default:
{
break;
}
}
if (hasChange == false) {
*dLen = *sLen;
memcpy(dMainData,sMainData,*dLen);
}
}
static int XlogHe3ToPg(XLogRecord*newRecord[],int n, OldXLogRecord*oldRecord) {
oldRecord->xl_xid = newRecord[0]->xl_xid;
oldRecord->xl_info = newRecord[0]->xl_info;
oldRecord->xl_rmid = newRecord[0]->xl_rmid;
char d_main_data[8192];
int dPos = 0;
char* dst = (char*)oldRecord;
dPos += sizeof(OldXLogRecord);
uint32_t d_main_data_len = 0;
uint32 main_data_len = 0;
uint8_t blkNum = 0;
bool hasblk = false;
char*img_ptr[XLR_MAX_BLOCK_ID + 1] = {0};
char*data_ptr[XLR_MAX_BLOCK_ID + 1] = {0};
uint16_t bimg_len[XLR_MAX_BLOCK_ID + 1] = {0};
uint16_t data_len[XLR_MAX_BLOCK_ID + 1] = {0};
for(int i = 0;i<n;i++) {
int sPos = 0;
char* src = (char*)newRecord[i];
uint32 remaining = newRecord[i]->xl_tot_len - sizeof(XLogRecord);
uint32 datatotal = 0;
sPos += sizeof(XLogRecord);
while(remaining > datatotal) {
uint8_t block_id = *(src + sPos);
if (block_id == XLR_BLOCK_ID_DATA_SHORT) {
sPos += sizeof(block_id);
remaining -= sizeof(block_id);
if (i == n-1) {
memcpy(dst + dPos,&block_id,sizeof(block_id));
dPos += sizeof(block_id);
}
main_data_len = *((uint8_t*)(src + sPos));
//main_data_len type XLR_BLOCK_ID_DATA_SHORT
uint8 d_len;
if (i == n-1) {
ReConvertMainData(newRecord[i],src + sPos + sizeof(d_len)+bimg_len[blkNum]+data_len[blkNum],&main_data_len,d_main_data,&d_main_data_len);
d_len = d_main_data_len;
memcpy(dst + dPos,&d_len,sizeof(d_len));
dPos += sizeof(d_len);
}
sPos += sizeof(d_len);
remaining -= sizeof(d_len);
datatotal += main_data_len;
break;
} else if (block_id == XLR_BLOCK_ID_DATA_LONG) {
sPos += sizeof(block_id);
remaining -= sizeof(block_id);
if (i == n-1) {
memcpy((dst + dPos),&block_id,sizeof(block_id));
dPos += sizeof(block_id);
}
memcpy(&main_data_len,src + sPos,sizeof(uint32));
if (i == n-1) {
ReConvertMainData(newRecord[i],src + sPos + sizeof(main_data_len)+bimg_len[blkNum]+data_len[blkNum],&main_data_len,d_main_data,&d_main_data_len);
if (d_main_data_len > 255) {
memcpy(dst + dPos,&d_main_data_len,sizeof(d_main_data_len));
dPos += sizeof(d_main_data_len);
} else {
*(dst + dPos - 1) = XLR_BLOCK_ID_DATA_SHORT;
uint8_t d_len = d_main_data_len;
memcpy(dst + dPos,&d_len,sizeof(d_len));
dPos += sizeof(d_len);
}
}
sPos += sizeof(main_data_len);
remaining -= sizeof(main_data_len);
datatotal += main_data_len;
break;
} else if (block_id == XLR_BLOCK_ID_ORIGIN) {
sPos += sizeof(block_id);
remaining -= sizeof(block_id);
if (i == n-1) {
memcpy(dst + dPos,&block_id,sizeof(block_id));
dPos += sizeof(block_id);
memcpy(dst + dPos,src+sPos,sizeof(RepOriginId));
dPos += sizeof(RepOriginId);
}
sPos += sizeof(RepOriginId);
remaining -= sizeof(RepOriginId);
} else if (block_id == XLR_BLOCK_ID_TOPLEVEL_XID) {
sPos += sizeof(block_id);
remaining -= sizeof(block_id);
if (i == n - 1) {
memcpy(dst + dPos,&block_id,sizeof(block_id));
dPos += sizeof(block_id);
memcpy(dst + dPos,src+sPos,sizeof(TransactionId));
dPos += sizeof(TransactionId);
}
sPos += sizeof(TransactionId);
remaining -= sizeof(TransactionId);
} else if (block_id <= XLR_MAX_BLOCK_ID) {
memcpy(dst + dPos, src + sPos, SizeOfXLogRecordBlockHeader);
uint8_t fork_flags = *(src + sPos + sizeof(block_id));
*(dst + dPos) = blkNum;
hasblk = true;
data_len[blkNum] = *((uint16_t*)(src + sPos + sizeof(block_id) + sizeof(fork_flags)));
datatotal += data_len[blkNum];
sPos += SizeOfXLogRecordBlockHeader;
dPos += SizeOfXLogRecordBlockHeader;
remaining -= SizeOfXLogRecordBlockHeader;
if ((fork_flags & BKPBLOCK_HAS_IMAGE) != 0) {
bimg_len[blkNum] = *((uint16_t*)(src + sPos));
datatotal += bimg_len[blkNum];
uint16_t hole_offset = *((uint16_t*)(src + sPos + sizeof(bimg_len)));
uint8_t bimg_info = *((uint16_t*)(src + sPos + sizeof(bimg_len) + sizeof(hole_offset)));
memcpy(dst + dPos, src + sPos, SizeOfXLogRecordBlockImageHeader);
sPos += SizeOfXLogRecordBlockImageHeader;
dPos += SizeOfXLogRecordBlockImageHeader;
remaining -= SizeOfXLogRecordBlockImageHeader;
if ((bimg_info & BKPIMAGE_IS_COMPRESSED) != 0) {
if ((bimg_info & BKPIMAGE_HAS_HOLE) != 0) {
memcpy(dst + dPos, src + sPos, SizeOfXLogRecordBlockCompressHeader);
sPos += SizeOfXLogRecordBlockCompressHeader;
dPos += SizeOfXLogRecordBlockCompressHeader;
remaining -= SizeOfXLogRecordBlockCompressHeader;
}
}
}
if (!(fork_flags & BKPBLOCK_SAME_REL)) {
memcpy(dst + dPos, src + sPos, sizeof(RelFileNode));
sPos += sizeof(RelFileNode);
dPos += sizeof(RelFileNode);
remaining -= sizeof(RelFileNode);
}
memcpy(dst + dPos, src + sPos, sizeof(BlockNumber));
sPos += sizeof(BlockNumber);
dPos += sizeof(BlockNumber);
remaining -= sizeof(BlockNumber);
} else {
printf("invalid block_id %u",block_id);
}
}
assert(remaining == datatotal);
if (bimg_len[blkNum] != 0 ) {
img_ptr[blkNum] = src + sPos;
sPos += bimg_len[blkNum];
}
if (data_len[blkNum] != 0) {
data_ptr[blkNum] = src + sPos;
sPos += data_len[blkNum];
}
if (hasblk == true) {
blkNum++;
}
sPos += main_data_len;
assert(sPos == newRecord[i]->xl_tot_len);
}
int idx = 0;
while(idx < blkNum) {
if (bimg_len[idx] != 0) {
memcpy(dst + dPos, img_ptr[idx], bimg_len[idx]);
dPos += bimg_len[idx];
}
if (data_len[idx] != 0){
memcpy(dst + dPos, data_ptr[idx], data_len[idx]);
dPos += data_len[idx];
}
idx++;
}
memcpy(dst + dPos, d_main_data, d_main_data_len);
dPos += d_main_data_len;
oldRecord->xl_tot_len = dPos;
return dPos;
}
static int OldUsableBytesInSegment =
(DEFAULT_XLOG_SEG_SIZE / XLOG_BLCKSZ * (XLOG_BLCKSZ - SizeOfXLogShortPHD)) -
(SizeOfXLogLongPHD - SizeOfXLogShortPHD);
static XLogRecPtr
OldXLogBytePosToRecPtr(uint64 bytepos)
{
uint64 fullsegs;
uint64 fullpages;
uint64 bytesleft;
uint32 seg_offset;
XLogRecPtr result;
fullsegs = bytepos / OldUsableBytesInSegment;
bytesleft = bytepos % OldUsableBytesInSegment;
if (bytesleft < XLOG_BLCKSZ - SizeOfXLogLongPHD)
{
/* fits on first page of segment */
seg_offset = bytesleft + SizeOfXLogLongPHD;
}
else
{
/* account for the first page on segment with long header */
seg_offset = XLOG_BLCKSZ;
bytesleft -= XLOG_BLCKSZ - SizeOfXLogLongPHD;
fullpages = bytesleft / (XLOG_BLCKSZ - SizeOfXLogShortPHD);
bytesleft = bytesleft % (XLOG_BLCKSZ - SizeOfXLogShortPHD);
seg_offset += fullpages * XLOG_BLCKSZ + bytesleft + SizeOfXLogShortPHD;
}
XLogSegNoOffsetToRecPtr(fullsegs, seg_offset, DEFAULT_XLOG_SEG_SIZE, result);
return result;
}
static XLogRecPtr
OldXLogBytePosToEndRecPtr(uint64 bytepos)
{
uint64 fullsegs;
uint64 fullpages;
uint64 bytesleft;
uint32 seg_offset;
XLogRecPtr result;
fullsegs = bytepos / OldUsableBytesInSegment;
bytesleft = bytepos % OldUsableBytesInSegment;
if (bytesleft < XLOG_BLCKSZ - SizeOfXLogLongPHD)
{
/* fits on first page of segment */
if (bytesleft == 0)
seg_offset = 0;
else
seg_offset = bytesleft + SizeOfXLogLongPHD;
}
else
{
/* account for the first page on segment with long header */
seg_offset = XLOG_BLCKSZ;
bytesleft -= XLOG_BLCKSZ - SizeOfXLogLongPHD;
fullpages = bytesleft / (XLOG_BLCKSZ - SizeOfXLogShortPHD);
bytesleft = bytesleft % (XLOG_BLCKSZ - SizeOfXLogShortPHD);
if (bytesleft == 0)
seg_offset += fullpages * XLOG_BLCKSZ + bytesleft;
else
seg_offset += fullpages * XLOG_BLCKSZ + bytesleft + SizeOfXLogShortPHD;
}
XLogSegNoOffsetToRecPtr(fullsegs, seg_offset, DEFAULT_XLOG_SEG_SIZE, result);
return result;
}
static uint64
OldXLogRecPtrToBytePos(XLogRecPtr ptr)
{
uint64 fullsegs;
uint32 fullpages;
uint32 offset;
uint64 result;
XLByteToSeg(ptr, fullsegs, DEFAULT_XLOG_SEG_SIZE);
fullpages = (XLogSegmentOffset(ptr, DEFAULT_XLOG_SEG_SIZE)) / XLOG_BLCKSZ;
offset = ptr % XLOG_BLCKSZ;
if (fullpages == 0)
{
result = fullsegs * OldUsableBytesInSegment;
if (offset > 0)
{
Assert(offset >= SizeOfXLogLongPHD);
result += offset - SizeOfXLogLongPHD;
}
}
else
{
result = fullsegs * OldUsableBytesInSegment +
(XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */
(fullpages - 1) * (XLOG_BLCKSZ - SizeOfXLogShortPHD); /* full pages */
if (offset > 0)
{
Assert(offset >= SizeOfXLogShortPHD);
result += offset - SizeOfXLogShortPHD;
}
}
return result;
}
static bool
ReserveXLogWalSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr)
{
XLogHe3ToPg *Insert = &g_walHe3ToPg;
uint64 startbytepos;
uint64 endbytepos;
uint64 prevbytepos;
uint32 size = MAXALIGN(SizeOfOldXLogRecord);
XLogRecPtr ptr;
uint32 segleft;
startbytepos = Insert->CurrBytePos;
ptr = OldXLogBytePosToEndRecPtr(startbytepos);
if (XLogSegmentOffset(ptr, DEFAULT_XLOG_SEG_SIZE) == 0)
{
*EndPos = *StartPos = ptr;
return false;
}
endbytepos = startbytepos + size;
prevbytepos = Insert->PrevBytePos;
*StartPos = OldXLogBytePosToRecPtr(startbytepos);
*EndPos = OldXLogBytePosToEndRecPtr(endbytepos);
segleft = DEFAULT_XLOG_SEG_SIZE - XLogSegmentOffset(*EndPos, DEFAULT_XLOG_SEG_SIZE);
if (segleft != DEFAULT_XLOG_SEG_SIZE)
{
/* consume the rest of the segment */
*EndPos += segleft;
endbytepos = OldXLogRecPtrToBytePos(*EndPos);
}
Insert->CurrBytePos = endbytepos;
Insert->PrevBytePos = startbytepos;
*PrevPtr = OldXLogBytePosToRecPtr(prevbytepos);
Assert(XLogSegmentOffset(*EndPos, DEFAULT_XLOG_SEG_SIZE) == 0);
Assert(OldXLogRecPtrToBytePos(*EndPos) == endbytepos);
Assert(OldXLogRecPtrToBytePos(*StartPos) == startbytepos);
Assert(OldXLogRecPtrToBytePos(*PrevPtr) == prevbytepos);
return true;
}
static void
ReserveXLogWalInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos,
XLogRecPtr *PrevPtr)
{
XLogHe3ToPg *Insert = &g_walHe3ToPg;
uint64 startbytepos;
uint64 endbytepos;
uint64 prevbytepos;
size = MAXALIGN(size);
/* All (non xlog-switch) records should contain data. */
Assert(size > SizeOfOldXLogRecord);
/*
* The duration the spinlock needs to be held is minimized by minimizing
* the calculations that have to be done while holding the lock. The
* current tip of reserved WAL is kept in CurrBytePos, as a byte position
* that only counts "usable" bytes in WAL, that is, it excludes all WAL
* page headers. The mapping between "usable" byte positions and physical
* positions (XLogRecPtrs) can be done outside the locked region, and
* because the usable byte position doesn't include any headers, reserving
* X bytes from WAL is almost as simple as "CurrBytePos += X".
*/
startbytepos = Insert->CurrBytePos;
endbytepos = startbytepos + size;
prevbytepos = Insert->PrevBytePos;
Insert->CurrBytePos = endbytepos;
Insert->PrevBytePos = startbytepos;
*StartPos = OldXLogBytePosToRecPtr(startbytepos);
*EndPos = OldXLogBytePosToEndRecPtr(endbytepos);
*PrevPtr = OldXLogBytePosToRecPtr(prevbytepos);
/*
* Check that the conversions between "usable byte positions" and
* XLogRecPtrs work consistently in both directions.
*/
Assert(OldXLogRecPtrToBytePos(*StartPos) == startbytepos);
Assert(OldXLogRecPtrToBytePos(*EndPos) == endbytepos);
Assert(OldXLogRecPtrToBytePos(*PrevPtr) == prevbytepos);
}
static void CopyXLogRecordToPgWAL(int write_len,OldXLogRecord* rechdr,XLogRecPtr StartPos, XLogRecPtr EndPos,
char*dBuf,int* dLen) {
char *currpos;
int freespace;
int written;
XLogRecPtr CurrPos;
XLogPageHeader pagehdr;
CurrPos = StartPos;
XLogPageHeader page;
XLogLongPageHeader longpage;
currpos = dBuf;
if (CurrPos % XLOG_BLCKSZ == SizeOfXLogShortPHD &&
XLogSegmentOffset(CurrPos, DEFAULT_XLOG_SEG_SIZE) > XLOG_BLCKSZ) {
page = (XLogPageHeader)currpos;
page->xlp_magic = XLOG_PAGE_MAGIC;
page->xlp_info = 0;
page->xlp_tli = ControlFile->checkPointCopy.ThisTimeLineID;
page->xlp_pageaddr = CurrPos - (CurrPos % XLOG_BLCKSZ);
currpos += SizeOfXLogShortPHD;
}
else if (CurrPos % XLOG_BLCKSZ == SizeOfXLogLongPHD &&
XLogSegmentOffset(CurrPos, DEFAULT_XLOG_SEG_SIZE) < XLOG_BLCKSZ) {
page = (XLogPageHeader)currpos;
page->xlp_magic = XLOG_PAGE_MAGIC;
page->xlp_info = XLP_LONG_HEADER;
page->xlp_tli = ControlFile->checkPointCopy.ThisTimeLineID;
page->xlp_pageaddr = CurrPos - (CurrPos % XLOG_BLCKSZ);
longpage = (XLogLongPageHeader) page;
longpage->xlp_sysid = ControlFile->system_identifier;
longpage->xlp_seg_size = WalSegSz;
longpage->xlp_xlog_blcksz = XLOG_BLCKSZ;
currpos += SizeOfXLogLongPHD;
}
freespace = INSERT_FREESPACE_MIRROR(CurrPos);
Assert(freespace >= sizeof(uint32));
/* Copy record data */
written = 0;
if (rechdr != NULL) {
char *rdata_data = rechdr;
int rdata_len = rechdr->xl_tot_len;
while (rdata_len > freespace)
{
Assert(CurrPos % XLOG_BLCKSZ >= SizeOfXLogShortPHD || freespace == 0);
memcpy(currpos, rdata_data, freespace);
rdata_data += freespace;
rdata_len -= freespace;
written += freespace;
CurrPos += freespace;
currpos += freespace;
pagehdr = (XLogPageHeader) currpos;
pagehdr->xlp_info = 0;
pagehdr->xlp_tli = ControlFile->checkPointCopy.ThisTimeLineID;
pagehdr->xlp_magic = XLOG_PAGE_MAGIC;
pagehdr->xlp_pageaddr = CurrPos - (CurrPos % XLOG_BLCKSZ);
pagehdr->xlp_rem_len = write_len - written;
pagehdr->xlp_info |= XLP_FIRST_IS_CONTRECORD;
if (XLogSegmentOffset(CurrPos, DEFAULT_XLOG_SEG_SIZE) == 0) {
CurrPos += SizeOfXLogLongPHD;
currpos += SizeOfXLogLongPHD;
pagehdr->xlp_info |= XLP_LONG_HEADER;
longpage = (XLogLongPageHeader) pagehdr;
longpage->xlp_sysid = ControlFile->system_identifier;
longpage->xlp_seg_size = WalSegSz;
longpage->xlp_xlog_blcksz = XLOG_BLCKSZ;
} else {
CurrPos += SizeOfXLogShortPHD;
currpos += SizeOfXLogShortPHD;
}
freespace = INSERT_FREESPACE_MIRROR(CurrPos);
}
Assert(CurrPos % XLOG_BLCKSZ >= SizeOfXLogShortPHD || rdata_len == 0);
memcpy(currpos, rdata_data, rdata_len);
currpos += rdata_len;
CurrPos += rdata_len;
freespace -= rdata_len;
written += rdata_len;
}
Assert(written == write_len);
int extra_space = MAXALIGN64(CurrPos) - CurrPos;
CurrPos = MAXALIGN64(CurrPos);
if (CurrPos != EndPos)
printf("ERROR space reserved for WAL record does not match what was written");
currpos += extra_space;
*dLen = (int)(currpos - dBuf);
}
void readControlFile(char*pathstr) {
bool crc_ok;
ControlFile = get_controlfile(pathstr,&crc_ok);
if (!crc_ok)
printf(_("WARNING: Calculated CRC checksum does not match value stored in file.\n"
"Either the file is corrupt, or it has a different layout than this program\n"
"is expecting. The results below are untrustworthy.\n\n"));
/* set wal segment size */
WalSegSz = ControlFile->xlog_seg_size;
}
void setControlFile(ControlFileData *cfile) {
ControlFile = cfile;
}
int ArrayXlogHe3ToPg(char*sBuf,int sLen, char*dBuf,int* dLen,uint64 *startLsn,uint64 *endLsn) {
XLogRecord*one = (XLogRecord*)sBuf;
//32kB
static char tBuf[32768];
int tLen = 0;
int MtrLen = 0;
int iLen = 0;
int oLen = 0;
*dLen = 0;
for(;iLen<sLen;) {
int n = 0;
XLogRecord*newRecord[XLR_MAX_BLOCK_ID + 1];
while(one->mtr == false) {
newRecord[n++] = one;
iLen += one->xl_tot_len;
one = (((char*)one) + one->xl_tot_len);
if (iLen > sLen) {
break;
}
}
newRecord[n++] = one;
iLen += one->xl_tot_len;
one = (((char*)one) + one->xl_tot_len);
if (iLen > sLen) {
break;
}
XlogHe3ToPg(newRecord,n,tBuf+tLen);
uint64 StartPos,EndPos;
XLogRecPtr reduceV = 0;
if (g_walHe3ToPg.PrevBytePos == 0) {
uint64 xl_prev = newRecord[0]->xl_end - newRecord[0]->xl_tot_len;
g_walHe3ToPg.PrevBytePos = g_walHe3ToPg.CurrBytePos = xl_prev;
bool Insert = ReserveXLogWalSwitch(&StartPos,&EndPos,&xl_prev);
g_walHe3ToPg.PrevBytePos = g_walHe3ToPg.CurrBytePos;
reduceV = 1;
}
OldXLogRecord* rechdr = (OldXLogRecord*)(tBuf + tLen);
ReserveXLogWalInsertLocation(rechdr->xl_tot_len,&StartPos,&EndPos,&rechdr->xl_prev);
//for pg check
if (rechdr->xl_rmid == RM_XLOG_ID &&
(rechdr->xl_info == XLOG_CHECKPOINT_SHUTDOWN || rechdr->xl_info == XLOG_CHECKPOINT_ONLINE)) {
CheckPoint*cp = (CheckPoint*)(((char*)rechdr)+SizeOfOldXLogRecord + SizeOfXLogRecordDataHeaderShort);
cp->redo = StartPos;
rechdr->xl_prev = rechdr->xl_prev-reduceV;
}
pg_crc32c rdata_crc;
INIT_CRC32C(rdata_crc);
COMP_CRC32C(rdata_crc, ((char*)rechdr) + SizeOfOldXLogRecord, rechdr->xl_tot_len - SizeOfOldXLogRecord);
COMP_CRC32C(rdata_crc, rechdr, offsetof(OldXLogRecord, xl_crc));
FIN_CRC32C(rdata_crc);
rechdr->xl_crc = rdata_crc;
CopyXLogRecordToPgWAL(rechdr->xl_tot_len,rechdr,StartPos,EndPos,dBuf+*dLen,&oLen);
if (*startLsn == 0) {
*startLsn = StartPos;
}
*endLsn = EndPos;
*dLen += oLen;
tLen += rechdr->xl_tot_len;
MtrLen = iLen;
}
return MtrLen;
}

View File

@ -0,0 +1,86 @@
#include <stdbool.h>
#include "postgres.h"
#include "access/pthreadpool.h"
#include "access/xlog.h"
#include "access/xlogrecord.h"
#include <glib.h>
#include "utils/guc.h"
#include "utils/hfs.h"
GThreadPool *gpool = NULL;
static __thread GError *gerr = NULL;
static bool IsInitPool = false;
static void getWalFunc(gpointer data, gpointer user_data) {
bool walStoreToLocal = false;
if (EnableHotStandby && *isPromoteIsTriggered == false && !push_standby)
walStoreToLocal = true;
wal_batch_t* elem = (wal_batch_t*)data;
if (elem != NULL) {
//elem->status = STARTSTATUS;
int r;
//clock_t start = clock();
r = batchRead((uint8_t *) elem->data, ThisTimeLineID2>ThisTimeLineID?ThisTimeLineID2:ThisTimeLineID, elem->startLsn, elem->endLsn, walStoreToLocal);
//clock_t end = clock();
//printf("====LSN %X/%X==pid %d==len %d===time %u\n",LSN_FORMAT_ARGS(elem->startLsn),pthread_self(),r,end-start);
elem->dataLen = r;
if (r > sizeof(XLogRecord)) {
XLogRecord* record = ((XLogRecord*)elem->data);
elem->startLsn = record->xl_end - record->xl_tot_len;
} else {
elem->startLsn = 0;
}
pg_atomic_exchange_u32(&elem->status,(uint32_t)COMPLETEDSTATUS);
}
}
static void produceWalFunc(gpointer data, gpointer user_data) {
wal_batch_t* elem = (wal_batch_t*)data;
elem->dataLen = elem->endLsn - elem->startLsn;
if (elem->dataLen != 0) {
pushXlogToTikv(elem->data,elem->dataLen);
}
pg_atomic_exchange_u32(&elem->status,(uint32_t)COMPLETEDSTATUS);
}
int initPthreadPool(void) {
if (IsInitPool == true) {
return 0;
}
if (!g_thread_supported())
{
elog(FATAL,"Not support g_thread!");
return -1;
}
//default 8 thread read
if(he3mirror){
gpool = g_thread_pool_new(produceWalFunc,NULL,4,FALSE,NULL);
} else{
gpool = g_thread_pool_new(getWalFunc,NULL,8,FALSE,NULL);
}
elog(LOG,"thread pool max threads is %d,num thread is %d",
g_thread_pool_get_max_threads(gpool),g_thread_pool_get_num_threads(gpool));
return 0;
}
int WalTaskPool(wal_batch_t*data) {
g_thread_pool_push(gpool,(gpointer)data,&gerr);
if (gerr != NULL) {
elog(FATAL,"WalTaskPool err %s",gerr->message);
return -1;
}
return 0;
}
void WalTaskFree(void) {
return g_thread_pool_free(gpool,FALSE,TRUE);
}
void WalTaskImmediateFree(void) {
g_thread_pool_free(gpool,TRUE,TRUE);
gpool = NULL;
}
bool IsFreePthreadPool(void) {
return gpool == NULL;
}

View File

@ -14,6 +14,13 @@ clock_t start_time;
XLogRecPtr PushPtr = 0;
XLogRecPtr ApplyLsn = 0;
XLogRecPtr PrePushPtr = 0;
XLogRecPtr CheckPointPtr = InvalidXLogRecPtr;
XLogRecPtr FileCheckPointPtr = InvalidXLogRecPtr;
//this for cut logindex
XLogRecPtr PrevPushPoint = InvalidXLogRecPtr;
XLogRecPtr LastPushPoint = InvalidXLogRecPtr;
CheckPoint GlobalCheckPoint;
uint8 GlobalState;
@ -22,83 +29,31 @@ uint8 GlobalState;
static PGconn *pushconn = NULL;
static PGconn *connToPushStandby = NULL;
pid_t startupPid = 0;
static redisContext *redisconn = NULL;
static bool ConnectRedis() {
redisconn = redisConnect("127.0.0.1", 6379);
if (redisconn == NULL) {
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("connect to redis failed")));
return false;
}
if (redisconn->err) {
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("connect to redis failed: %s",redisconn->errstr)));
redisconn = NULL;
return false;
}
char*redis_password = "VlJi7uBV";
redisReply *reply = (redisReply *)redisCommand(redisconn, "AUTH %s", redis_password);
if (reply->type == REDIS_REPLY_ERROR) {
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("connect to redis passwd failed: %s",redisconn->errstr)));
redisconn = NULL;
return false;
}
return true;
}
bool pushRedisList(const char*str) {
if (redisconn == NULL) {
if (ConnectRedis() == false) {
return false;
}
}
redisReply* r = (redisReply*)redisCommand(redisconn, str);
if (NULL == r) {
redisFree(redisconn);
redisconn = NULL;
return false;
}
if (!(r->type == REDIS_REPLY_STATUS && strcasecmp(r->str,"OK") == 0) && (r->type!=REDIS_REPLY_INTEGER)) {
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("connect to redis failed1: %s",r->str)));
freeReplyObject(r);
redisFree(redisconn);
redisconn = NULL;
return false;
}
freeReplyObject(r);
return true;
}
static bool ConnectPushStandbyDB() {
char *err;
const char *keys[] = {"dbname","user","password","host","port",NULL};
const char *vals[] = {"postgres","repl","123456","127.0.0.1","15431",NULL};
connToPushStandby = PQconnectdbParams(keys, vals, false);
if (PQstatus(connToPushStandby) == CONNECTION_BAD)
{
err = pchomp(PQerrorMessage(connToPushStandby));
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("push standby could not connect to the push standby server: %s", err)));
return false;
}
return true;
// static bool ConnectPushStandbyDB() {
// char *err;
// const char *keys[] = {"dbname","user","password","host","port",NULL};
// const char *vals[] = {"postgres","repl","123456","100.73.36.123","15431",NULL};
// connToPushStandby = PQconnectdbParams(keys, vals, false);
// if (PQstatus(connToPushStandby) == CONNECTION_BAD)
// {
// err = pchomp(PQerrorMessage(connToPushStandby));
// ereport(ERROR,
// (errcode(ERRCODE_CONNECTION_FAILURE),
// errmsg("push standby could not connect to the push standby server: %s", err)));
// return false;
// }
// return true;
}
// }
static bool ConnectPrimaryDB(void);
static bool ConnectPrimaryDB() {
static bool ConnectPrimaryDB(void) {
char *err;
char conninfo[maxconnlen];
const char *keys[] = {"dbname","user","password","host","port",NULL};
const char *vals[] = {"postgres","repl","123456","127.0.0.1","15432",NULL};
// const char *keys[] = {"dbname","user","password","host","port",NULL};
// const char *vals[] = {"postgres","repl","123456","100.73.36.123","15432",NULL};
strlcpy(conninfo, (char *) PrimaryConnInfo, maxconnlen);
/* Establish the connection to the primary for query Min Lsn*/
/*
@ -106,11 +61,12 @@ static bool ConnectPrimaryDB() {
* URI), and pass some extra options.
*/
/* Note we do not want libpq to re-expand the dbname parameter */
pushconn = PQconnectdbParams(keys, vals, true);
pushconn = PQconnectdb(conninfo);
// pushconn = PQconnectdbParams(keys, vals, true);
if (PQstatus(pushconn) == CONNECTION_BAD)
{
err = pchomp(PQerrorMessage(pushconn));
ereport(ERROR,
ereport(WARNING,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("push standby could not connect to the primary server: %s", err)));
return false;
@ -118,41 +74,77 @@ static bool ConnectPrimaryDB() {
return true;
}
XLogRecPtr QueryPushLsn()
{
StringInfoData cmd;
XLogRecPtr replylsn = InvalidXLogRecPtr;
char *replyptr;
initStringInfo(&cmd);
appendStringInfoString(&cmd,"select pg_last_wal_replay_lsn()");
replylsn = InvalidXLogRecPtr;
if (connToPushStandby == NULL) {
if (ConnectPushStandbyDB() == false) {
return InvalidXLogRecPtr;
}
}
PGresult *pgres = NULL;
pgres = PQexec(connToPushStandby, cmd.data);
if (PQresultStatus(pgres) == PGRES_TUPLES_OK && PQntuples(pgres) == 1) {
replyptr = PQgetvalue(pgres, 0, 0);
bool flag;
replylsn = pg_lsn_in_internal(replyptr,&flag);
bool ReConnectPrimaryDB(void) {
if (push_standby == true && pushconn!=NULL) {
PQfinish(pushconn);
pushconn = NULL;
if (ConnectPrimaryDB() == true) {
return true;
}
}
PQfinish(connToPushStandby);
connToPushStandby = NULL;
PQclear(pgres);
return replylsn;
return false;
}
XLogRecPtr QueryPushChkpointLsn()
// static bool ConnectPrimaryDB4ReplyLSN() {
// char *err;
// char conninfo[maxconnlen];
// const char *keys[] = {"dbname","user","password","host","port",NULL};
// const char *vals[] = {"postgres","postgres","","100.73.36.123","15432",NULL};
// strlcpy(conninfo, (char *) PrimaryConnInfo, maxconnlen);
// /* Establish the connection to the primary for query Min Lsn*/
// /*
// * We use the expand_dbname parameter to process the connection string (or
// * URI), and pass some extra options.
// */
// /* Note we do not want libpq to re-expand the dbname parameter */
// pushconn = PQconnectdbParams(keys, vals, true);
// if (PQstatus(pushconn) == CONNECTION_BAD)
// {
// err = pchomp(PQerrorMessage(pushconn));
// ereport(WARNING,
// (errcode(ERRCODE_CONNECTION_FAILURE),
// errmsg("push standby could not connect to the primary server: %s", err)));
// return false;
// }
// return true;
// }
// XLogRecPtr QueryPushLsn()
// {
// StringInfoData cmd;
// XLogRecPtr replylsn = InvalidXLogRecPtr;
// char *replyptr;
// initStringInfo(&cmd);
// appendStringInfoString(&cmd,"select pg_last_wal_replay_lsn()");
// replylsn = InvalidXLogRecPtr;
// if (connToPushStandby == NULL) {
// if (ConnectPushStandbyDB() == false) {
// return InvalidXLogRecPtr;
// }
// }
// PGresult *pgres = NULL;
// pgres = PQexec(connToPushStandby, cmd.data);
// if (PQresultStatus(pgres) == PGRES_TUPLES_OK && PQntuples(pgres) == 1) {
// replyptr = PQgetvalue(pgres, 0, 0);
// bool flag;
// replylsn = pg_lsn_in_internal(replyptr,&flag);
// }
// PQfinish(connToPushStandby);
// connToPushStandby = NULL;
// PQclear(pgres);
// return replylsn;
// }
XLogRecPtr QueryPushChkpointLsn(void)
{
ControlFileData *ControlFile;
int fd;
char ControlFilePath[MAXPGPATH];
pg_crc32c crc;
int r;
XLogRecPtr checkPoint;
ControlFile = palloc(sizeof(ControlFileData));
@ -179,8 +171,10 @@ XLogRecPtr QueryPushChkpointLsn()
XLOG_CONTROL_FILE, r, sizeof(ControlFileData))));
}
close(fd);
return ControlFile->checkPoint;
checkPoint = ControlFile->checkPoint;
pfree(ControlFile);
return checkPoint;
}
XLogRecPtr QueryMinLsn(XLogRecPtr lsn)
@ -188,9 +182,6 @@ XLogRecPtr QueryMinLsn(XLogRecPtr lsn)
StringInfoData cmd;
XLogRecPtr replylsn;
PGresult *pgres = NULL;
char *appname;
char *state;
char *syncstate;
char *replyptr;
replylsn = InvalidXLogRecPtr;
if (pushconn == NULL) {
@ -200,90 +191,89 @@ XLogRecPtr QueryMinLsn(XLogRecPtr lsn)
}
initStringInfo(&cmd);
appendStringInfoString(&cmd, "SELECT t.application_name, t.replay_lsn, t.state, t.sync_state FROM pg_catalog.pg_stat_replication t WHERE t.application_name <> \'");
appendStringInfoString(&cmd, "pushstandby");
appendStringInfoString(&cmd, "\' order by t.replay_lsn limit 1");
appendStringInfoString(&cmd, "SELECT t.application_name, t.replay_lsn, t.state, t.sync_state FROM pg_catalog.pg_stat_replication t WHERE t.application_name not like \'");
appendStringInfoString(&cmd, "push%");
appendStringInfoString(&cmd, "\' and t.application_name not like \'priv%\' order by t.replay_lsn limit 1");
pgres = PQexec(pushconn, cmd.data);
if (PQresultStatus(pgres) == PGRES_TUPLES_OK && PQntuples(pgres) == 1) {
appname = PQgetvalue(pgres, 0, 0);
replyptr = PQgetvalue(pgres, 0, 1);
bool flag;
replylsn = pg_lsn_in_internal(replyptr,&flag);
//replylsn = atol(replyptr);
state = PQgetvalue(pgres, 0, 2);
syncstate = PQgetvalue(pgres, 0, 3);
if (PQresultStatus(pgres) == PGRES_TUPLES_OK) {
if (PQntuples(pgres) == 1) {
replyptr = PQgetvalue(pgres, 0, 1);
bool flag;
replylsn = pg_lsn_in_internal(replyptr,&flag);
if (replylsn == InvalidXLogRecPtr) {
elog(ERROR,"query pg_stat_replication replylsn failed");
PQclear(pgres);
return 1;
}
}
//no slave,pushstandby no need wait
}
else if (PQresultStatus(pgres) == PGRES_FATAL_ERROR)
{
//master crash,pushstandby need replay to master crash point for private
PQclear(pgres);
return InvalidXLogRecPtr;
}
else if (PQresultStatus(pgres) == PGRES_BAD_RESPONSE ||
PQresultStatus(pgres) == PGRES_NONFATAL_ERROR ||
PQresultStatus(pgres) == PGRES_FATAL_ERROR)
else
{
PQfinish(pushconn);
pushconn = NULL;
PQclear(pgres);
return InvalidXLogRecPtr;
}
return 1;
}
//elog(LOG,"appnamelsn: %x: replylsn %x",lsn,replylsn);
if (lsn !=InvalidXLogRecPtr && lsn < replylsn||replylsn == InvalidXLogRecPtr) {
if ((lsn !=InvalidXLogRecPtr && lsn < replylsn)||(replylsn == InvalidXLogRecPtr)) {
replylsn = lsn;
}
PQclear(pgres);
return replylsn;
}
Queue DirtyPq = {
NULL,
NULL
};
void QueuePush(QDataType x)
{
Queue* pq = &DirtyPq;
QNode* newnode = (QNode*)malloc(sizeof(QNode));
newnode->next = NULL;
memcpy(&newnode->data,&x,sizeof(x));
if (pq->tail == NULL)
{
pq->head = pq->tail = newnode;
}
else
{
pq->tail->next = newnode;
pq->tail = newnode;
}
}
//出队列
QDataType QueuePop()
{
Queue* pq = &DirtyPq;
QDataType data;
memcpy(&data,&pq->head->data,sizeof(QDataType));
if (pq->head->next == NULL)
{
free(pq->head);
pq->head = pq->tail = NULL;
}
else
{
QNode* next = pq->head->next;
free(pq->head);
pq->head = next;
}
return data;
}
bool QueueEmpty()
{
Queue* pq = &DirtyPq;
return pq->head == NULL;
}
XLogRecPtr QueueHeadEndLsn()
{
Queue* pq = &DirtyPq;
return pq->head->data.endlsn;
}
// XLogRecPtr QueryReplyLsn(XLogRecPtr lsn)
// {
// StringInfoData cmd;
// XLogRecPtr replylsn;
// PGresult *pgres = NULL;
// char *appname;
// char *state;
// char *syncstate;
// char *replyptr;
// replylsn = InvalidXLogRecPtr;
// if (pushconn == NULL) {
// if (ConnectPrimaryDB4ReplyLSN() == false) {
// return InvalidXLogRecPtr;
// }
// }
// initStringInfo(&cmd);
// appendStringInfoString(&cmd, "SELECT t.application_name, t.replay_lsn, t.state, t.sync_state FROM pg_catalog.pg_stat_replication t WHERE t.application_name <> \'");
// appendStringInfoString(&cmd, "pushstandby");
// appendStringInfoString(&cmd, "\' order by t.replay_lsn limit 1");
// pgres = PQexec(pushconn, cmd.data);
// if (PQresultStatus(pgres) == PGRES_TUPLES_OK && PQntuples(pgres) == 1) {
// appname = PQgetvalue(pgres, 0, 0);
// replyptr = PQgetvalue(pgres, 0, 1);
// bool flag;
// replylsn = pg_lsn_in_internal(replyptr,&flag);
// //replylsn = atol(replyptr);
// state = PQgetvalue(pgres, 0, 2);
// syncstate = PQgetvalue(pgres, 0, 3);
// }
// else if (PQresultStatus(pgres) == PGRES_BAD_RESPONSE ||
// PQresultStatus(pgres) == PGRES_NONFATAL_ERROR ||
// PQresultStatus(pgres) == PGRES_FATAL_ERROR)
// {
// PQfinish(pushconn);
// pushconn = NULL;
// PQclear(pgres);
// return InvalidXLogRecPtr;
// }
// //elog(LOG,"appnamelsn: %x: replylsn %x",lsn,replylsn);
// if (lsn !=InvalidXLogRecPtr && lsn < replylsn||replylsn == InvalidXLogRecPtr) {
// replylsn = lsn;
// }
// PQclear(pgres);
// return replylsn;
// }

View File

@ -0,0 +1,193 @@
#include "access/ringbuffer.h"
#include <string.h>
#include "access/xlogrecord.h"
/**
* @file
* Implementation of ring buffer functions.
*/
void ring_buffer_init(ring_buffer_t *buffer, wal_batch_t *buf, size_t buf_size) {
RING_BUFFER_ASSERT(RING_BUFFER_IS_POWER_OF_TWO(buf_size) == 1);
SpinLockInit(&buffer->mutex);
buffer->buffer = buf;
buffer->buffer_mask = buf_size - 1;
buffer->tail_index = 0;
buffer->head_index = 0;
}
wal_batch_t *ring_buffer_queue(ring_buffer_t *buffer, wal_batch_t data) {
wal_batch_t* curWal = NULL;
SpinLockAcquire(&buffer->mutex);
/* Is buffer full? */
if(ring_buffer_is_full(buffer)) {
SpinLockRelease(&buffer->mutex);
return NULL;
}
buffer->buffer[buffer->head_index].startLsn = data.startLsn;
buffer->buffer[buffer->head_index].endLsn = data.endLsn;
buffer->buffer[buffer->head_index].checkPointLsn = data.checkPointLsn;
pg_atomic_exchange_u32(&buffer->buffer[buffer->head_index].status,(uint32_t)UNKOWNSTATUS);
curWal = &buffer->buffer[buffer->head_index];
buffer->head_index = ((buffer->head_index + 1) & RING_BUFFER_MASK(buffer));
SpinLockRelease(&buffer->mutex);
return curWal;
}
uint8_t ring_buffer_dequeue(ring_buffer_t *buffer, wal_batch_t *data) {
SpinLockAcquire(&buffer->mutex);
if(ring_buffer_is_empty(buffer)) {
/* No items */
SpinLockRelease(&buffer->mutex);
return 0;
}
if (data != NULL) {
*data = buffer->buffer[buffer->tail_index];
}
buffer->tail_index = ((buffer->tail_index + 1) & RING_BUFFER_MASK(buffer));
SpinLockRelease(&buffer->mutex);
return 1;
}
uint8_t ring_buffer_dequeue_arr(ring_buffer_t *buffer, uint32 size) {
SpinLockAcquire(&buffer->mutex);
if(ring_buffer_is_empty(buffer)) {
/* No items */
SpinLockRelease(&buffer->mutex);
return 0;
}
ring_buffer_size_t pos = buffer->tail_index;
for(uint32 i = 0;i<size;i++) {
pg_atomic_exchange_u32(&buffer->buffer[pos].status,(uint32_t)UNKOWNSTATUS);
pos = ((pos+1) & RING_BUFFER_MASK(buffer));
}
buffer->tail_index = ((buffer->tail_index + size) & RING_BUFFER_MASK(buffer));
SpinLockRelease(&buffer->mutex);
return 1;
}
uint8_t ring_buffer_peek(ring_buffer_t *buffer, wal_batch_t **data, ring_buffer_size_t index) {
SpinLockAcquire(&buffer->mutex);
if(index >= ring_buffer_num_items(buffer)) {
/* No items at index */
SpinLockRelease(&buffer->mutex);
return 0;
}
/* Add index to pointer */
ring_buffer_size_t data_index = ((buffer->tail_index + index) & RING_BUFFER_MASK(buffer));
*data = &buffer->buffer[data_index];
SpinLockRelease(&buffer->mutex);
return 1;
}
uint8_t ring_buffer_will_full(ring_buffer_t *buffer) {
ring_buffer_size_t num = ((buffer->head_index - buffer->tail_index) & RING_BUFFER_MASK(buffer));
return num > 0.9 * RING_BUFFER_MASK(buffer);
}
ring_buffer_t* gRingBufferManger;
Size WalReadBufferShmemSize(void) {
Size size;
size = 0;
size = add_size(size,sizeof(ring_buffer_t));
//spaceNum of numbers wal batchs to manage wal buffer
size = add_size(size, spaceNum * sizeof(wal_batch_t));
//256MB cache for wal parallel read
size = add_size(size, spaceNum * 4 * XLOG_BLCKSZ);
return size;
}
// one elem max receive no more than 32k,64MB = spaceNum * 4 * XLOG_BLCKSZ
const int spaceNum = 8192;
void InitRingBufferSpace(void) {
//default 256MB for cache
char* gFreeSpace = NULL;
bool found;
gFreeSpace = (char *)
ShmemInitStruct("walreadbuffer",
sizeof(ring_buffer_t) + spaceNum * sizeof(wal_batch_t) + spaceNum * 4 * XLOG_BLCKSZ,
&found);
if (gFreeSpace == NULL) {
elog(FATAL,"gFreeSpace malloc failed");
}
gRingBufferManger = (ring_buffer_t*)gFreeSpace;
gFreeSpace += sizeof(ring_buffer_t);
wal_batch_t* gManageFreeList;
gManageFreeList = (wal_batch_t*)gFreeSpace ;
gFreeSpace += spaceNum * sizeof(wal_batch_t);
int i = 0;
for(;i<spaceNum; i++) {
gManageFreeList[i].data = &gFreeSpace[i*4*XLOG_BLCKSZ];
}
ring_buffer_init(gRingBufferManger,gManageFreeList,spaceNum);
}
int walRecordQuery(char**buffer,int* curpos,int* maxspace,uint64 lsn) {
ring_buffer_size_t maxIdx = gRingBufferManger->maxIdx;
if (maxIdx == 0) {
return -1;
}
ring_buffer_size_t tailIdx = gRingBufferManger->tail_index;
int low = tailIdx,high = ((tailIdx+maxIdx) & RING_BUFFER_MASK(gRingBufferManger)), mid = 0;
if (low > high) {
if (gRingBufferManger->buffer[gRingBufferManger->buffer_mask].startLsn + gRingBufferManger->buffer[gRingBufferManger->buffer_mask].dataLen > lsn) {
high = gRingBufferManger->buffer_mask+1;
} else {
low = 0;
}
}
if (gRingBufferManger->buffer[high-1].startLsn == 0) {
high -= 2;
} else {
high -=1;
}
bool find = false;
while(low <= high) {
mid = (low + high) / 2;
if (gRingBufferManger->buffer[mid].startLsn > lsn) {
high = mid - 1;
} else if (gRingBufferManger->buffer[mid].startLsn < lsn) {
low = mid + 1;
} else {
find = true;
break;
}
}
XLogRecord* record = NULL;
int xllen = -1;
bool extandFlag = false;
if (find == true) {
record = (XLogRecord*)gRingBufferManger->buffer[mid].data;
xllen = record->xl_tot_len;
} else {
record = (XLogRecord*)gRingBufferManger->buffer[high].data;
if (gRingBufferManger->buffer[high].startLsn + gRingBufferManger->buffer[high].dataLen <= lsn) {
return -1;
} else {
record = (XLogRecord*)(gRingBufferManger->buffer[high].data + (lsn-gRingBufferManger->buffer[high].startLsn));
xllen = record->xl_tot_len;
}
}
if (xllen != -1) {
while (*curpos + xllen > *maxspace) {
*maxspace += *maxspace;
extandFlag = true;
}
if (extandFlag == true) {
char* ptr = malloc(*maxspace);
memcpy(ptr,*buffer,*curpos);
free(*buffer);
*buffer = ptr;
}
memcpy(*buffer+*curpos,record,xllen);
*curpos += xllen;
}
return xllen;
}
extern inline uint8_t ring_buffer_is_empty(ring_buffer_t *buffer);
extern inline uint8_t ring_buffer_is_full(ring_buffer_t *buffer);
extern inline ring_buffer_size_t ring_buffer_num_items(ring_buffer_t *buffer);

View File

@ -1387,9 +1387,8 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
TimeLineID save_currtli = ThisTimeLineID;
xlogreader = XLogReaderAllocate(wal_segment_size, NULL,
XL_ROUTINE(.page_read = &read_local_xlog_page,
.segment_open = &wal_segment_open,
.segment_close = &wal_segment_close),
XL_ROUTINE(.batch_read = &read_local_xlog_batch,
),
NULL);
if (!xlogreader)
ereport(ERROR,
@ -1398,8 +1397,8 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
errdetail("Failed while allocating a WAL reading processor.")));
XLogBeginRead(xlogreader, lsn);
record = XLogReadRecord(xlogreader, &errormsg);
// record = XLogReadRecord(xlogreader, &errormsg);
record = He3DBXLogReadRecord(xlogreader, &errormsg);
/*
* Restore immediately the timeline where it was previously, as
* read_local_xlog_page() could have changed it if the record was read

View File

@ -43,6 +43,7 @@
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "postmaster/bgwriter.h"
#include "replication/logical.h"
#include "replication/logicallauncher.h"
#include "replication/origin.h"
@ -1318,6 +1319,14 @@ RecordTransactionCommit(void)
/* Tell bufmgr and smgr to prepare for commit */
BufmgrCommit();
/*
* He3DB: do checkpoint ahead when existing pendingDelete relations. Avoid pushstandby shutdown before checkpoint and
* after redo commit, which cause redo failed when primary restart.
*/
if (nrels > 0 && IsBootstrapProcessingMode() != true && InitdbSingle != true)
RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT
| CHECKPOINT_FLUSH_ALL);
/*
* Mark ourselves as within our "commit critical section". This
* forces any concurrent checkpoint to wait until we've updated
@ -1682,6 +1691,15 @@ RecordTransactionAbort(bool isSubXact)
nrels = smgrGetPendingDeletes(false, &rels);
nchildren = xactGetCommittedChildren(&children);
/*
* He3DB: do checkpoint ahead when existing pendingDelete relations. Avoid pushstandby shutdown before checkpoint and
* after redo abort, which cause redo failed when primary restart.
*/
if (nrels > 0 && IsBootstrapProcessingMode() != true && InitdbSingle != true)
RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT
| CHECKPOINT_FLUSH_ALL);
/* XXX do we really need a critical section here? */
START_CRIT_SECTION();
@ -5808,7 +5826,7 @@ static void
xact_redo_commit(xl_xact_parsed_commit *parsed,
TransactionId xid,
XLogRecPtr lsn,
RepOriginId origin_id)
RepOriginId origin_id, XLogRecPtr startlsn)
{
TransactionId max_xid;
TimestampTz commit_time;
@ -5913,6 +5931,20 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
*/
XLogFlush(lsn);
if (push_standby || !EnableHotStandby)
{
pushTikv(0, hashMapSize(), true);
} else {
XLogRecPtr consistPtr;
consistPtr = GetXLogPushToDisk();
while (consistPtr < startlsn)
{
pg_usleep(100000L);
elog(LOG, "standby consist lsn %ld, commit lsn %ld", consistPtr, startlsn);
consistPtr = GetXLogPushToDisk();
}
}
/* Make sure files supposed to be dropped are dropped */
DropRelationFiles(parsed->xnodes, parsed->nrels, true);
}
@ -5952,7 +5984,7 @@ xact_redo_commit(xl_xact_parsed_commit *parsed,
*/
static void
xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid,
XLogRecPtr lsn, RepOriginId origin_id)
XLogRecPtr lsn, RepOriginId origin_id, XLogRecPtr startlsn)
{
TransactionId max_xid;
@ -6017,6 +6049,20 @@ xact_redo_abort(xl_xact_parsed_abort *parsed, TransactionId xid,
*/
XLogFlush(lsn);
if (push_standby || !EnableHotStandby)
{
pushTikv(0, hashMapSize(), true);
} else {
XLogRecPtr consistPtr;
consistPtr = GetXLogPushToDisk();
while (consistPtr < startlsn)
{
pg_usleep(100000L);
elog(LOG, "standby consist lsn %ld, abort lsn %ld", consistPtr, startlsn);
consistPtr = GetXLogPushToDisk();
}
}
DropRelationFiles(parsed->xnodes, parsed->nrels, true);
}
}
@ -6036,7 +6082,7 @@ xact_redo(XLogReaderState *record)
ParseCommitRecord(XLogRecGetInfo(record), xlrec, &parsed);
xact_redo_commit(&parsed, XLogRecGetXid(record),
record->EndRecPtr, XLogRecGetOrigin(record));
record->EndRecPtr, XLogRecGetOrigin(record), record->ReadRecPtr);
}
else if (info == XLOG_XACT_COMMIT_PREPARED)
{
@ -6045,7 +6091,7 @@ xact_redo(XLogReaderState *record)
ParseCommitRecord(XLogRecGetInfo(record), xlrec, &parsed);
xact_redo_commit(&parsed, parsed.twophase_xid,
record->EndRecPtr, XLogRecGetOrigin(record));
record->EndRecPtr, XLogRecGetOrigin(record), record->ReadRecPtr);
/* Delete TwoPhaseState gxact entry and/or 2PC file. */
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
@ -6059,7 +6105,7 @@ xact_redo(XLogReaderState *record)
ParseAbortRecord(XLogRecGetInfo(record), xlrec, &parsed);
xact_redo_abort(&parsed, XLogRecGetXid(record),
record->EndRecPtr, XLogRecGetOrigin(record));
record->EndRecPtr, XLogRecGetOrigin(record), record->ReadRecPtr);
}
else if (info == XLOG_XACT_ABORT_PREPARED)
{
@ -6068,7 +6114,7 @@ xact_redo(XLogReaderState *record)
ParseAbortRecord(XLogRecGetInfo(record), xlrec, &parsed);
xact_redo_abort(&parsed, parsed.twophase_xid,
record->EndRecPtr, XLogRecGetOrigin(record));
record->EndRecPtr, XLogRecGetOrigin(record), record->ReadRecPtr);
/* Delete TwoPhaseState gxact entry and/or 2PC file. */
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);

File diff suppressed because it is too large Load Diff

View File

@ -31,7 +31,15 @@
#include "replication/origin.h"
#include "storage/bufmgr.h"
#include "storage/proc.h"
#include "storage/spin.h"
#include "utils/memutils.h"
#include "access/heapam_xlog.h"
#include "access/nbtxlog.h"
#include "access/nbtree.h"
#include "access/gistxlog.h"
#include "access/gist_private.h"
#include "access/spgxlog.h"
#include "access/brin_xlog.h"
/* Buffer size required to store a compressed version of backup block image */
#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
@ -68,10 +76,12 @@ static int max_registered_block_id = 0; /* highest block_id + 1 currently
int group_total_len;
int grouo_rec_count;
int grouo_rec_cur_count;
XLogRecord *grouphead[XLR_MAX_BLOCK_ID + 1];
int grouplens[XLR_MAX_BLOCK_ID + 1];
XLogRecData groupRecData[XLR_MAX_BLOCK_ID + 1];
XLogRecPtr groupEndLsn[XLR_MAX_BLOCK_ID + 1];
/*
* A chain of XLogRecDatas to hold the "main data" of a WAL record, registered
@ -525,7 +535,8 @@ XLogInsert(RmgrId rmid, uint8 info)
EndPos = SizeOfXLogLongPHD; /* start of 1st chkpt record */
return EndPos;
}
do
{
XLogRecPtr RedoRecPtr;
@ -543,15 +554,442 @@ XLogInsert(RmgrId rmid, uint8 info)
rdt = XLogRecordAssemble(rmid, info, RedoRecPtr, doPageWrites,
&fpw_lsn, &num_fpi);
EndPos = XLogInsertRecord(rdt, fpw_lsn, curinsert_flags, num_fpi);
EndPos = He3DBXLogInsertRecord(rdt, fpw_lsn, curinsert_flags, num_fpi);
} while (EndPos == InvalidXLogRecPtr);
XLogResetInsertion();
return EndPos;
}
static XLogRecData g_bkp_rdatas[XLR_MAX_BLOCK_ID + 1][2];
static XLogRecData g_main_data;
static void extendMainData(XLogReaderState *state) {
int extendSize = 64;
if (state->main_data_len + extendSize > state->main_data_bufsz)
{
if (state->main_data)
pfree(state->main_data);
/*
* main_data_bufsz must be MAXALIGN'ed. In many xlog record
* types, we omit trailing struct padding on-disk to save a few
* bytes; but compilers may generate accesses to the xlog struct
* that assume that padding bytes are present. If the palloc
* request is not large enough to include such padding bytes then
* we'll get valgrind complaints due to otherwise-harmless fetches
* of the padding bytes.
*
* In addition, force the initial request to be reasonably large
* so that we don't waste time with lots of trips through this
* stanza. BLCKSZ / 2 seems like a good compromise choice.
*/
state->main_data_bufsz = MAXALIGN(Max(state->main_data_len + extendSize,
BLCKSZ / 2));
state->main_data = palloc(state->main_data_bufsz);
}
}
static void convertMainData(XLogReaderState *state, OldXLogRecord *record) {
RmgrId rmid = record->xl_rmid;
uint8 info = (record->xl_info & ~XLR_INFO_MASK);
switch(rmid) {
case RM_HEAP2_ID:
{
if ((info & XLOG_HEAP_OPMASK) == XLOG_HEAP2_VISIBLE) {
xl_old_heap_visible *xlrec = (xl_old_heap_visible *) XLogRecGetData(state);
xl_heap_visible xlrecNew;
xlrecNew.rnode = state->blocks[1].rnode;
xlrecNew.blkno = state->blocks[1].blkno;
xlrecNew.cutoff_xid = xlrec->cutoff_xid;
xlrecNew.flags = xlrec->flags;
extendMainData(state);
state->main_data_len = sizeof(xl_heap_visible);
memcpy(state->main_data,&xlrecNew,state->main_data_len);
}
break;
}
case RM_HEAP_ID:
{
if (((info & XLOG_HEAP_OPMASK) == XLOG_HEAP_UPDATE) ||
((info & XLOG_HEAP_OPMASK) == XLOG_HEAP_HOT_UPDATE)) {
xl_old_heap_update *xlrec = (xl_old_heap_update *) XLogRecGetData(state);
xl_heap_update xlrecNew;
xlrecNew.old_xmax = xlrec->old_xmax;
xlrecNew.old_offnum = xlrec->old_offnum;
xlrecNew.old_infobits_set = xlrec->old_infobits_set;
xlrecNew.flags = xlrec->flags;
xlrecNew.new_xmax = xlrec->new_xmax;
xlrecNew.new_offnum = xlrec->new_offnum;
xlrecNew.newblk = state->blocks[0].blkno;
if(state->max_block_id == 0){
xlrecNew.oldblk = state->blocks[0].blkno;
} else{
xlrecNew.oldblk = state->blocks[1].blkno;
}
xlrecNew.rnode = state->blocks[0].rnode;
extendMainData(state);
state->main_data_len = sizeof(xl_heap_update);
memcpy(state->main_data,&xlrecNew,state->main_data_len);
}
break;
}
case RM_BTREE_ID:
{
if (info == XLOG_BTREE_SPLIT_L || info == XLOG_BTREE_SPLIT_R) {
xl_old_btree_split *xlrec = (xl_old_btree_split *) XLogRecGetData(state);
xl_btree_split xlrecNew;
xlrecNew.level = xlrec->level;
xlrecNew.firstrightoff = xlrec->firstrightoff;
xlrecNew.newitemoff = xlrec->newitemoff;
xlrecNew.postingoff = xlrec->postingoff;
xlrecNew.origpagenumber = state->blocks[0].blkno;
xlrecNew.rightpagenumber = state->blocks[1].blkno;
if (!XLogRecGetBlockTag(state, 2, NULL, NULL, &xlrecNew.spagenumber))
xlrecNew.spagenumber = P_NONE;
extendMainData(state);
state->main_data_len = sizeof(xl_btree_split);
memcpy(state->main_data,&xlrecNew,state->main_data_len);
}
break;
}
case RM_GIST_ID:
{
if (info == XLOG_GIST_PAGE_SPLIT) {
gistoldxlogPageSplit *xlrec = (gistoldxlogPageSplit *) XLogRecGetData(state);
gistxlogPageSplit xlrecNew;
xlrecNew.markfollowright = xlrec->markfollowright;
xlrecNew.npage = xlrec->npage;
xlrecNew.origleaf = xlrec->origleaf;
xlrecNew.orignsn = xlrec->orignsn;
xlrecNew.origrlink = xlrec->origrlink;
xlrecNew.isroot = false;
if (xlrec->npage > 0) {
if (state->blocks[1].blkno == GIST_ROOT_BLKNO) {
xlrecNew.isroot = true;
}
}
extendMainData(state);
state->main_data_len = sizeof(gistxlogPageSplit);
memcpy(state->main_data,&xlrecNew,state->main_data_len);
}
break;
}
case RM_SPGIST_ID:
{
if (info == XLOG_SPGIST_ADD_LEAF) {
spgoldxlogAddLeaf *xlrec = (spgoldxlogAddLeaf *) XLogRecGetData(state);
spgxlogAddLeaf xlrecNew;
xlrecNew.newPage = xlrec->newPage; /* init dest page? */
xlrecNew.storesNulls = xlrec->storesNulls; /* page is in the nulls tree? */
xlrecNew.offnumLeaf = xlrec->offnumLeaf; /* offset where leaf tuple gets placed */
xlrecNew.offnumHeadLeaf = xlrec->offnumHeadLeaf; /* offset of head tuple in chain, if any */
xlrecNew.offnumParent = xlrec->offnumParent; /* where the parent downlink is, if any */
xlrecNew.nodeI = xlrec->nodeI;
xlrecNew.blknoLeaf = state->blocks[0].blkno;
extendMainData(state);
state->main_data_len = sizeof(spgxlogAddLeaf);
memcpy(state->main_data,&xlrecNew,state->main_data_len);
} else if (info == XLOG_SPGIST_MOVE_LEAFS) {
spgoldxlogMoveLeafs *xlrec = (spgoldxlogMoveLeafs *) XLogRecGetData(state);
spgxlogMoveLeafs xlrecNew;
xlrecNew.nMoves = xlrec->nMoves; /* number of tuples moved from source page */
xlrecNew.newPage = xlrec->newPage; /* init dest page? */
xlrecNew.replaceDead = xlrec->replaceDead; /* are we replacing a DEAD source tuple? */
xlrecNew.storesNulls = xlrec->storesNulls; /* pages are in the nulls tree? */
/* where the parent downlink is */
xlrecNew.offnumParent = xlrec->offnumParent;
xlrecNew.nodeI = xlrec->nodeI;
xlrecNew.stateSrc = xlrec->stateSrc;
/* for he3pg */
xlrecNew.blknoDst = state->blocks[1].blkno;
/*----------
* data follows:
* array of deleted tuple numbers, length nMoves
* array of inserted tuple numbers, length nMoves + 1 or 1
* list of leaf tuples, length nMoves + 1 or 1 (unaligned!)
*
* Note: if replaceDead is true then there is only one inserted tuple
* number and only one leaf tuple in the data, because we are not copying
* the dead tuple from the source
*----------
*/
char* tmp = palloc(state->main_data_len-SizeOfOldSpgxlogMoveLeafs);
memcpy(tmp,state->main_data+SizeOfOldSpgxlogMoveLeafs,state->main_data_len-SizeOfOldSpgxlogMoveLeafs);
extendMainData(state);
memcpy(state->main_data,&xlrecNew,SizeOfSpgxlogMoveLeafs);
memcpy(state->main_data + SizeOfSpgxlogMoveLeafs, tmp, state->main_data_len-SizeOfOldSpgxlogMoveLeafs);
state->main_data_len += SizeOfSpgxlogMoveLeafs-SizeOfOldSpgxlogMoveLeafs;
pfree(tmp);
} else if (info == XLOG_SPGIST_ADD_NODE) {
spgoldxlogAddNode *xlrec = (spgoldxlogAddNode *) XLogRecGetData(state);
spgxlogAddNode xlrecNew;
xlrecNew.offnum = xlrec->offnum;
/*
* Offset of the new tuple, on the new page (on backup block 1). Invalid,
* if we overwrote the old tuple in the original page).
*/
xlrecNew.offnumNew = xlrec->offnumNew;
xlrecNew.newPage = xlrec->newPage; /* init new page? */
/*----
* Where is the parent downlink? parentBlk indicates which page it's on,
* and offnumParent is the offset within the page. The possible values for
* parentBlk are:
*
* 0: parent == original page
* 1: parent == new page
* 2: parent == different page (blk ref 2)
* -1: parent not updated
*----
*/
xlrecNew.parentBlk = xlrec->parentBlk;
xlrecNew.offnumParent = xlrec->offnumParent; /* offset within the parent page */
xlrecNew.nodeI = xlrec->nodeI;
xlrecNew.blkno1 = state->blocks[0].blkno;
xlrecNew.stateSrc = xlrec->stateSrc;
extendMainData(state);
state->main_data_len = sizeof(spgxlogAddNode);
memcpy(state->main_data,&xlrecNew,state->main_data_len);
} else if (info == XLOG_SPGIST_PICKSPLIT) {
spgoldxlogPickSplit *xlrec = (spgoldxlogPickSplit *) XLogRecGetData(state);
spgxlogPickSplit xlrecNew;
xlrecNew.isRootSplit = xlrec->isRootSplit;
xlrecNew.nDelete = xlrec->nDelete; /* n to delete from Src */
xlrecNew.nInsert = xlrec->nInsert; /* n to insert on Src and/or Dest */
xlrecNew.initSrc = xlrec->initSrc; /* re-init the Src page? */
xlrecNew.initDest = xlrec->initDest; /* re-init the Dest page? */
/* for he3pg */
xlrecNew.blknoInner = state->blocks[2].blkno;
/* where to put new inner tuple */
xlrecNew.offnumInner = xlrec->offnumInner;
xlrecNew.initInner = xlrec->initInner; /* re-init the Inner page? */
xlrecNew.storesNulls = xlrec->storesNulls; /* pages are in the nulls tree? */
/* where the parent downlink is, if any */
xlrecNew.innerIsParent = xlrec->innerIsParent; /* is parent the same as inner page? */
xlrecNew.offnumParent = xlrec->offnumParent;
xlrecNew.nodeI = xlrec->nodeI;
xlrecNew.stateSrc = xlrec->stateSrc;
/*----------
* data follows:
* array of deleted tuple numbers, length nDelete
* array of inserted tuple numbers, length nInsert
* array of page selector bytes for inserted tuples, length nInsert
* new inner tuple (unaligned!)
* list of leaf tuples, length nInsert (unaligned!)
*----------
*/
char* tmp = palloc(state->main_data_len-SizeOfOldSpgxlogPickSplit);
memcpy(tmp,state->main_data+SizeOfOldSpgxlogPickSplit,state->main_data_len-SizeOfOldSpgxlogPickSplit);
extendMainData(state);
memcpy(state->main_data,&xlrecNew,SizeOfSpgxlogPickSplit);
memcpy(state->main_data + SizeOfSpgxlogPickSplit, tmp, state->main_data_len-SizeOfOldSpgxlogPickSplit);
state->main_data_len += SizeOfSpgxlogPickSplit-SizeOfOldSpgxlogPickSplit;
pfree(tmp);
}
break;
}
case RM_BRIN_ID:
{
if (info == XLOG_BRIN_INSERT) {
xl_old_brin_insert *xlrec = (xl_old_brin_insert *) XLogRecGetData(state);
xl_brin_insert xlrecNew;
xlrecNew.heapBlk = xlrec->heapBlk;
/* extra information needed to update the revmap */
xlrecNew.pagesPerRange = xlrec->pagesPerRange;
xlrecNew.block0 = state->blocks[0].blkno;
/* offset number in the main page to insert the tuple to. */
xlrecNew.offnum = xlrec->offnum;
extendMainData(state);
state->main_data_len = sizeof(xl_brin_insert);
memcpy(state->main_data,&xlrecNew,state->main_data_len);
} else if ( info == XLOG_BRIN_UPDATE) {
xl_old_brin_update *xlrec = (xl_old_brin_update *) XLogRecGetData(state);
xl_brin_update xlrecUpdate;
xl_old_brin_insert *xlrecInsert = &xlrec->insert;
xl_brin_insert xlrecNew;
xlrecNew.heapBlk = xlrecInsert->heapBlk;
/* extra information needed to update the revmap */
xlrecNew.pagesPerRange = xlrecInsert->pagesPerRange;
xlrecNew.block0 = state->blocks[0].blkno;
/* offset number in the main page to insert the tuple to. */
xlrecNew.offnum = xlrecInsert->offnum;
xlrecUpdate.oldOffnum = xlrec->oldOffnum;
xlrecUpdate.insert = xlrecNew;
extendMainData(state);
state->main_data_len = sizeof(xl_brin_update);
memcpy(state->main_data,&xlrecUpdate,state->main_data_len);
}
break;
}
default:
break;
}
}
XLogRecData *DecodeXLogRecordAssemble(XLogReaderState *state, OldXLogRecord *record,
XLogRecPtr RedoRecPtr, bool doPageWrites,
XLogRecPtr *fpw_lsn, int *num_fpi)
{
/*
* Make an rdata chain containing all the data portions of all block
* references. This includes the data for full-page images. Also append
* the headers for the block references in the scratch buffer.
*/
RmgrId rmid = record->xl_rmid;
uint8 info = record->xl_info;
*fpw_lsn = InvalidXLogRecPtr;
int block_id;
XLogRecord *rechdr = NULL;
group_total_len = 0;
grouo_rec_count = 0;
grouo_rec_cur_count = 0;
int maxidx = (state->max_block_id < 0 ? 1:state->max_block_id+1);
bool isDone = false;
for (block_id = 0; block_id < maxidx; block_id++)
{
XLogRecData* rdt;
uint32 total_len;
total_len = 0;
pg_crc32c rdata_crc = 0;
XLogRecData *rdt_datas_last;
char *scratch;
// char linkkey[36];
groupRecData[grouo_rec_count].next = NULL;
rdt_datas_last = &groupRecData[grouo_rec_count];
scratch = hdr_scratch + grouo_rec_count * SINGLE_SCRATCH_SIZE;
groupRecData[grouo_rec_count].data = scratch;
/*group_total_len+=HEADER_SCRATCH_SIZE;*/
grouphead[grouo_rec_count]=(XLogRecord *)scratch;
/* The record begins with the fixed-size header */
rechdr = (XLogRecord *)scratch;
scratch += SizeOfXLogRecord;
if (state->max_block_id >= 0) {
DecodedBkpBlock *blkbuf = &state->blocks[block_id];
if (!blkbuf->in_use)
continue;
XLogRecData* bkp_rdatas = g_bkp_rdatas[block_id];
XLogRecordBlockHeader bkpb;
XLogRecordBlockImageHeader bimg;
XLogRecordBlockCompressHeader cbimg = {0};
bkpb.id = 0;
bkpb.fork_flags = blkbuf->flags;
bkpb.data_length = blkbuf->data_len;
//total_len += bkpb.data_length;
/* Ok, copy the header to the scratch buffer */
memcpy(scratch, &bkpb, SizeOfXLogRecordBlockHeader);
scratch += SizeOfXLogRecordBlockHeader;
if (blkbuf->has_image) {
bimg.bimg_info = blkbuf->bimg_info;
bimg.hole_offset = blkbuf->hole_offset;
bimg.length = blkbuf->bimg_len;
memcpy(scratch, &bimg, SizeOfXLogRecordBlockImageHeader);
scratch += SizeOfXLogRecordBlockImageHeader;
rdt_datas_last->next = &bkp_rdatas[0];
rdt_datas_last = rdt_datas_last->next;
bkp_rdatas[0].data = blkbuf->bkp_image;
bkp_rdatas[0].len = blkbuf->bimg_len;
if (bimg.bimg_info & BKPIMAGE_IS_COMPRESSED) {
cbimg.hole_length = blkbuf->hole_length;
if (bimg.bimg_info & BKPIMAGE_HAS_HOLE) {
memcpy(scratch, &cbimg,
SizeOfXLogRecordBlockCompressHeader);
scratch += SizeOfXLogRecordBlockCompressHeader;
}
}
total_len += bimg.length;
*num_fpi += 1;
}
if (blkbuf->has_data) {
rdt_datas_last->next = &bkp_rdatas[1];
rdt_datas_last = rdt_datas_last->next;
bkp_rdatas[1].data = blkbuf->data;
bkp_rdatas[1].len = blkbuf->data_len;
total_len += blkbuf->data_len;
}
memcpy(scratch, &blkbuf->rnode, sizeof(RelFileNode));
scratch += sizeof(RelFileNode);
memcpy(scratch, &blkbuf->blkno, sizeof(BlockNumber));
scratch += sizeof(BlockNumber);
}
if (state->record_origin != InvalidRepOriginId) {
*(scratch++) = (char)XLR_BLOCK_ID_ORIGIN;
memcpy(scratch, &state->record_origin, sizeof(RepOriginId));
scratch += sizeof(RepOriginId);
}
if (state->toplevel_xid != InvalidTransactionId) {
*(scratch++) = (char)XLR_BLOCK_ID_TOPLEVEL_XID;
memcpy(scratch, &state->toplevel_xid, sizeof(TransactionId));
scratch += sizeof(TransactionId);
}
if (state->main_data_len > 0) {
rdt_datas_last->next = &g_main_data;
rdt_datas_last = &g_main_data;
if (isDone == false) {
convertMainData(state,record);
g_main_data.data = state->main_data;
g_main_data.len = state->main_data_len;
isDone = true;
}
if (state->main_data_len > 255) {
*(scratch++) = (char)XLR_BLOCK_ID_DATA_LONG;
memcpy(scratch, &state->main_data_len, sizeof(uint32));
scratch += sizeof(uint32);
} else {
*(scratch++) = (char)XLR_BLOCK_ID_DATA_SHORT;
*(scratch++) = (uint8)state->main_data_len;
}
total_len += state->main_data_len;
}
rdt_datas_last->next = NULL;
groupRecData[grouo_rec_count].len = scratch - groupRecData[grouo_rec_count].data;
total_len += groupRecData[grouo_rec_count].len;
grouplens[grouo_rec_count] = total_len;
/*
* Fill in the fields in the record header. Prev-link is filled in later,
* once we know where in the WAL the record will be inserted. The CRC does
* not include the record header yet.
*/
rechdr->xl_xid = record->xl_xid;
rechdr->xl_tot_len = total_len;
rechdr->xl_info = info;
rechdr->xl_rmid = rmid;
rechdr->xl_prev = InvalidXLogRecPtr;
rechdr->xl_crc = rdata_crc;
rechdr->blocknum = block_id;
rechdr->mtr = false;
group_total_len += total_len;
grouo_rec_count++;
}
rechdr->mtr = true;
return &groupRecData[0];
}
/*
* Assemble a WAL record from the registered data and buffers into an
* XLogRecData chain, ready for insertion with XLogInsertRecord().
@ -598,8 +1036,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
pg_crc32c rdata_crc;
XLogRecData *rdt_datas_last;
char *scratch;
// char linkkey[36];
groupRecData[grouo_rec_count].next = NULL;
rdt_datas_last = &groupRecData[grouo_rec_count];
scratch = hdr_scratch + grouo_rec_count * SINGLE_SCRATCH_SIZE;
groupRecData[grouo_rec_count].data = scratch;
/*group_total_len+=HEADER_SCRATCH_SIZE;*/
@ -609,6 +1049,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
scratch += SizeOfXLogRecord;
if (max_registered_block_id != 0) {
registered_buffer *regbuf = &registered_buffers[block_id];
bool needs_backup;
bool needs_data;
XLogRecordBlockHeader bkpb;
@ -621,7 +1062,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
if (!regbuf->in_use)
continue;
/*
/*
* Note: this function can be called multiple times for the same record.
* All the modifications we do to the rdata chains below must handle that.
*/
@ -823,7 +1264,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
/* followed by the record's origin, if any */
if ((curinsert_flags & XLOG_INCLUDE_ORIGIN) &&
replorigin_session_origin != InvalidRepOriginId)
replorigin_session_origin != InvalidRepOriginId) //1
{
*(scratch++) = (char)XLR_BLOCK_ID_ORIGIN;
memcpy(scratch, &replorigin_session_origin, sizeof(replorigin_session_origin));
@ -831,7 +1272,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
}
/* followed by toplevel XID, if not already included in previous record */
if (IsSubTransactionAssignmentPending())
if (IsSubTransactionAssignmentPending()) //2
{
TransactionId xid = GetTopTransactionIdIfAny();
@ -846,13 +1287,13 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
/* followed by main data, if any */
if (mainrdata_len > 0)
{
if (mainrdata_len > 255)
if (mainrdata_len > 255) //3
{
*(scratch++) = (char)XLR_BLOCK_ID_DATA_LONG;
*(scratch++) = (char)XLR_BLOCK_ID_DATA_LONG;
memcpy(scratch, &mainrdata_len, sizeof(uint32));
scratch += sizeof(uint32);
}
else
else //4
{
*(scratch++) = (char)XLR_BLOCK_ID_DATA_SHORT;
*(scratch++) = (uint8)mainrdata_len;
@ -865,7 +1306,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
rdt_datas_last->next = NULL;
groupRecData[grouo_rec_count].len = scratch - groupRecData[grouo_rec_count].data;
total_len += groupRecData[grouo_rec_count].len;
grouplens[grouo_rec_count]=MAXALIGN(total_len);
//grouplens[grouo_rec_count]=MAXALIGN(total_len);
grouplens[grouo_rec_count]=total_len;
/*
@ -907,7 +1349,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
rechdr->blocknum=block_id;
rechdr->mtr = false;
group_total_len += MAXALIGN(total_len);
group_total_len += total_len;
grouo_rec_count++;
}
rechdr->mtr = true;

File diff suppressed because it is too large Load Diff

View File

@ -369,6 +369,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
/* Caller specified a bogus block_id */
elog(PANIC, "failed to locate backup block with ID %d", block_id);
}
/*
#ifndef PG_NOREPLAY
if (IsBootstrapProcessingMode() != true && InitdbSingle != true) {
//push standby collect dirty page
@ -384,6 +385,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
}
}
#endif
*/
/*
* Make sure that if the block is marked with WILL_INIT, the caller is
@ -427,8 +429,8 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
* force the on-disk state of init forks to always be in sync with the
* state in shared buffers.
*/
if (forknum == INIT_FORKNUM)
FlushOneBuffer(*buf);
// if (forknum == INIT_FORKNUM)
// FlushOneBuffer(*buf);
return BLK_RESTORED;
}
@ -630,13 +632,15 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
else
{
/* hm, page doesn't exist in file */
if (mode == RBM_NORMAL)
{
log_invalid_page(rnode, forknum, blkno, false);
return InvalidBuffer;
}
if (mode == RBM_NORMAL_NO_LOG)
return InvalidBuffer;
if(!he3mirror && he3share){
if (mode == RBM_NORMAL && EnableHotStandby != false && *isPromoteIsTriggered == false)
{
log_invalid_page(rnode, forknum, blkno, false);
return InvalidBuffer;
}
if (mode == RBM_NORMAL_NO_LOG)
return InvalidBuffer;
}
/* OK to extend the file */
/* we do this in recovery only - no rel-extension lock needed */
Assert(InRecovery);
@ -664,7 +668,7 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
}
}
if (mode == RBM_NORMAL)
if (he3share && !he3mirror && mode == RBM_NORMAL)
{
/* check that page has been initialized */
Page page = (Page) BufferGetPage(buffer);
@ -890,135 +894,135 @@ XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum,
* caller must also update ThisTimeLineID with the result of
* GetXLogReplayRecPtr and must check RecoveryInProgress().
*/
void
XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
{
const XLogRecPtr lastReadPage = (state->seg.ws_segno *
state->segcxt.ws_segsize + state->segoff);
// void
// XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
// {
// const XLogRecPtr lastReadPage = (state->seg.ws_segno *
// state->segcxt.ws_segsize + state->segoff);
Assert(wantPage != InvalidXLogRecPtr && wantPage % XLOG_BLCKSZ == 0);
Assert(wantLength <= XLOG_BLCKSZ);
Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
// Assert(wantPage != InvalidXLogRecPtr && wantPage % XLOG_BLCKSZ == 0);
// Assert(wantLength <= XLOG_BLCKSZ);
// Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
/*
* If the desired page is currently read in and valid, we have nothing to
* do.
*
* The caller should've ensured that it didn't previously advance readOff
* past the valid limit of this timeline, so it doesn't matter if the
* current TLI has since become historical.
*/
if (lastReadPage == wantPage &&
state->readLen != 0 &&
lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
return;
// /*
// * If the desired page is currently read in and valid, we have nothing to
// * do.
// *
// * The caller should've ensured that it didn't previously advance readOff
// * past the valid limit of this timeline, so it doesn't matter if the
// * current TLI has since become historical.
// */
// if (lastReadPage == wantPage &&
// state->readLen != 0 &&
// lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
// return;
/*
* If we're reading from the current timeline, it hasn't become historical
* and the page we're reading is after the last page read, we can again
* just carry on. (Seeking backwards requires a check to make sure the
* older page isn't on a prior timeline).
*
* ThisTimeLineID might've become historical since we last looked, but the
* caller is required not to read past the flush limit it saw at the time
* it looked up the timeline. There's nothing we can do about it if
* StartupXLOG() renames it to .partial concurrently.
*/
if (state->currTLI == ThisTimeLineID && wantPage >= lastReadPage)
{
Assert(state->currTLIValidUntil == InvalidXLogRecPtr);
return;
}
// /*
// * If we're reading from the current timeline, it hasn't become historical
// * and the page we're reading is after the last page read, we can again
// * just carry on. (Seeking backwards requires a check to make sure the
// * older page isn't on a prior timeline).
// *
// * ThisTimeLineID might've become historical since we last looked, but the
// * caller is required not to read past the flush limit it saw at the time
// * it looked up the timeline. There's nothing we can do about it if
// * StartupXLOG() renames it to .partial concurrently.
// */
// if (state->currTLI == ThisTimeLineID && wantPage >= lastReadPage)
// {
// Assert(state->currTLIValidUntil == InvalidXLogRecPtr);
// return;
// }
/*
* If we're just reading pages from a previously validated historical
* timeline and the timeline we're reading from is valid until the end of
* the current segment we can just keep reading.
*/
if (state->currTLIValidUntil != InvalidXLogRecPtr &&
state->currTLI != ThisTimeLineID &&
state->currTLI != 0 &&
((wantPage + wantLength) / state->segcxt.ws_segsize) <
(state->currTLIValidUntil / state->segcxt.ws_segsize))
return;
// /*
// * If we're just reading pages from a previously validated historical
// * timeline and the timeline we're reading from is valid until the end of
// * the current segment we can just keep reading.
// */
// if (state->currTLIValidUntil != InvalidXLogRecPtr &&
// state->currTLI != ThisTimeLineID &&
// state->currTLI != 0 &&
// ((wantPage + wantLength) / state->segcxt.ws_segsize) <
// (state->currTLIValidUntil / state->segcxt.ws_segsize))
// return;
/*
* If we reach this point we're either looking up a page for random
* access, the current timeline just became historical, or we're reading
* from a new segment containing a timeline switch. In all cases we need
* to determine the newest timeline on the segment.
*
* If it's the current timeline we can just keep reading from here unless
* we detect a timeline switch that makes the current timeline historical.
* If it's a historical timeline we can read all the segment on the newest
* timeline because it contains all the old timelines' data too. So only
* one switch check is required.
*/
{
/*
* We need to re-read the timeline history in case it's been changed
* by a promotion or replay from a cascaded replica.
*/
List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
XLogRecPtr endOfSegment;
// /*
// * If we reach this point we're either looking up a page for random
// * access, the current timeline just became historical, or we're reading
// * from a new segment containing a timeline switch. In all cases we need
// * to determine the newest timeline on the segment.
// *
// * If it's the current timeline we can just keep reading from here unless
// * we detect a timeline switch that makes the current timeline historical.
// * If it's a historical timeline we can read all the segment on the newest
// * timeline because it contains all the old timelines' data too. So only
// * one switch check is required.
// */
// {
// /*
// * We need to re-read the timeline history in case it's been changed
// * by a promotion or replay from a cascaded replica.
// */
// List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
// XLogRecPtr endOfSegment;
endOfSegment = ((wantPage / state->segcxt.ws_segsize) + 1) *
state->segcxt.ws_segsize - 1;
Assert(wantPage / state->segcxt.ws_segsize ==
endOfSegment / state->segcxt.ws_segsize);
// endOfSegment = ((wantPage / state->segcxt.ws_segsize) + 1) *
// state->segcxt.ws_segsize - 1;
// Assert(wantPage / state->segcxt.ws_segsize ==
// endOfSegment / state->segcxt.ws_segsize);
/*
* Find the timeline of the last LSN on the segment containing
* wantPage.
*/
state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
&state->nextTLI);
// /*
// * Find the timeline of the last LSN on the segment containing
// * wantPage.
// */
// state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
// state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
// &state->nextTLI);
Assert(state->currTLIValidUntil == InvalidXLogRecPtr ||
wantPage + wantLength < state->currTLIValidUntil);
// Assert(state->currTLIValidUntil == InvalidXLogRecPtr ||
// wantPage + wantLength < state->currTLIValidUntil);
list_free_deep(timelineHistory);
// list_free_deep(timelineHistory);
elog(DEBUG3, "switched to timeline %u valid until %X/%X",
state->currTLI,
LSN_FORMAT_ARGS(state->currTLIValidUntil));
}
}
// elog(DEBUG3, "switched to timeline %u valid until %X/%X",
// state->currTLI,
// LSN_FORMAT_ARGS(state->currTLIValidUntil));
// }
// }
/* XLogReaderRoutine->segment_open callback for local pg_wal files */
void
wal_segment_open(XLogReaderState *state, XLogSegNo nextSegNo,
TimeLineID *tli_p)
{
TimeLineID tli = *tli_p;
char path[MAXPGPATH];
void
wal_segment_open(XLogReaderState *state, XLogSegNo nextSegNo,
TimeLineID *tli_p)
{
TimeLineID tli = *tli_p;
char path[MAXPGPATH];
XLogFilePath(path, tli, nextSegNo, state->segcxt.ws_segsize);
state->seg.ws_file = BasicOpenFile(path, O_RDONLY | PG_BINARY);
if (state->seg.ws_file >= 0)
return;
XLogFilePath(path, tli, nextSegNo, state->segcxt.ws_segsize);
state->seg.ws_file = BasicOpenFile(path, O_RDONLY | PG_BINARY);
if (state->seg.ws_file >= 0)
return;
if (errno == ENOENT)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("requested WAL segment %s has already been removed",
path)));
else
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open file \"%s\": %m",
path)));
}
if (errno == ENOENT)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("requested WAL segment %s has already been removed",
path)));
else
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open file \"%s\": %m",
path)));
}
/* stock XLogReaderRoutine->segment_close callback */
void
wal_segment_close(XLogReaderState *state)
{
close(state->seg.ws_file);
/* need to check errno? */
state->seg.ws_file = -1;
}
void
wal_segment_close(XLogReaderState *state)
{
close(state->seg.ws_file);
/* need to check errno? */
state->seg.ws_file = -1;
}
/*
* XLogReaderRoutine->page_read callback for reading local xlog files
@ -1031,9 +1035,132 @@ wal_segment_close(XLogReaderState *state)
* exists for normal backends, so we have to do a check/sleep/repeat style of
* loop for now.
*/
// int
// read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
// int reqLen, XLogRecPtr targetRecPtr, char *cur_page)
// {
// XLogRecPtr read_upto,
// loc;
// TimeLineID tli;
// int count;
// WALReadError errinfo;
// loc = targetPagePtr + reqLen;
// /* Loop waiting for xlog to be available if necessary */
// while (1)
// {
// /*
// * Determine the limit of xlog we can currently read to, and what the
// * most recent timeline is.
// *
// * RecoveryInProgress() will update ThisTimeLineID when it first
// * notices recovery finishes, so we only have to maintain it for the
// * local process until recovery ends.
// */
// if (!RecoveryInProgress())
// read_upto = GetFlushRecPtr();
// else
// read_upto = GetXLogReplayRecPtr(&ThisTimeLineID);
// tli = ThisTimeLineID;
// /*
// * Check which timeline to get the record from.
// *
// * We have to do it each time through the loop because if we're in
// * recovery as a cascading standby, the current timeline might've
// * become historical. We can't rely on RecoveryInProgress() because in
// * a standby configuration like
// *
// * A => B => C
// *
// * if we're a logical decoding session on C, and B gets promoted, our
// * timeline will change while we remain in recovery.
// *
// * We can't just keep reading from the old timeline as the last WAL
// * archive in the timeline will get renamed to .partial by
// * StartupXLOG().
// *
// * If that happens after our caller updated ThisTimeLineID but before
// * we actually read the xlog page, we might still try to read from the
// * old (now renamed) segment and fail. There's not much we can do
// * about this, but it can only happen when we're a leaf of a cascading
// * standby whose primary gets promoted while we're decoding, so a
// * one-off ERROR isn't too bad.
// */
// XLogReadDetermineTimeline(state, targetPagePtr, reqLen);
// if (state->currTLI == ThisTimeLineID)
// {
// if (loc <= read_upto)
// break;
// CHECK_FOR_INTERRUPTS();
// pg_usleep(1000L);
// }
// else
// {
// /*
// * We're on a historical timeline, so limit reading to the switch
// * point where we moved to the next timeline.
// *
// * We don't need to GetFlushRecPtr or GetXLogReplayRecPtr. We know
// * about the new timeline, so we must've received past the end of
// * it.
// */
// read_upto = state->currTLIValidUntil;
// /*
// * Setting tli to our wanted record's TLI is slightly wrong; the
// * page might begin on an older timeline if it contains a timeline
// * switch, since its xlog segment will have been copied from the
// * prior timeline. This is pretty harmless though, as nothing
// * cares so long as the timeline doesn't go backwards. We should
// * read the page header instead; FIXME someday.
// */
// tli = state->currTLI;
// /* No need to wait on a historical timeline */
// break;
// }
// }
// if (targetPagePtr + XLOG_BLCKSZ <= read_upto)
// {
// /*
// * more than one block available; read only that block, have caller
// * come back if they need more.
// */
// count = XLOG_BLCKSZ;
// }
// else if (targetPagePtr + reqLen > read_upto)
// {
// /* not enough data there */
// return -1;
// }
// else
// {
// /* enough bytes available to satisfy the request */
// count = read_upto - targetPagePtr;
// }
// /*
// * Even though we just determined how much of the page can be validly read
// * as 'count', read the whole page anyway. It's guaranteed to be
// * zero-padded up to the page boundary if it's incomplete.
// */
// if (!WALRead(state, cur_page, targetPagePtr, XLOG_BLCKSZ, tli,
// &errinfo))
// WALReadRaiseError(&errinfo);
// /* number of valid bytes in the buffer */
// return count;
// }
int
read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
int reqLen, XLogRecPtr targetRecPtr, char *cur_page)
read_local_xlog_batch(XLogReaderState *state,
XLogRecPtr targetRecPtr, int reqLen, char *cur_page)
{
XLogRecPtr read_upto,
loc;
@ -1041,7 +1168,7 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
int count;
WALReadError errinfo;
loc = targetPagePtr + reqLen;
loc = targetRecPtr + reqLen;
/* Loop waiting for xlog to be available if necessary */
while (1)
@ -1084,7 +1211,7 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
* standby whose primary gets promoted while we're decoding, so a
* one-off ERROR isn't too bad.
*/
XLogReadDetermineTimeline(state, targetPagePtr, reqLen);
// XLogReadDetermineTimeline(state, targetRecPtr, reqLen);
if (state->currTLI == ThisTimeLineID)
{
@ -1122,7 +1249,7 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
}
}
if (targetPagePtr + XLOG_BLCKSZ <= read_upto)
if (targetRecPtr + XLOG_BLCKSZ <= read_upto)
{
/*
* more than one block available; read only that block, have caller
@ -1130,7 +1257,7 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
*/
count = XLOG_BLCKSZ;
}
else if (targetPagePtr + reqLen > read_upto)
else if (targetRecPtr + reqLen > read_upto)
{
/* not enough data there */
return -1;
@ -1138,7 +1265,7 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
else
{
/* enough bytes available to satisfy the request */
count = read_upto - targetPagePtr;
count = read_upto - targetRecPtr;
}
/*
@ -1146,14 +1273,15 @@ read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr,
* as 'count', read the whole page anyway. It's guaranteed to be
* zero-padded up to the page boundary if it's incomplete.
*/
if (!WALRead(state, cur_page, targetPagePtr, XLOG_BLCKSZ, tli,
&errinfo))
WALReadRaiseError(&errinfo);
// if (!He3DBWALRead(state, cur_page, targetRecPtr, XLOG_BLCKSZ, tli,
// &errinfo))
// WALReadRaiseError(&errinfo);
/* number of valid bytes in the buffer */
return count;
}
/*
* Backend-specific convenience code to handle read errors encountered by
* WALRead().

View File

@ -37,6 +37,7 @@
#include "postmaster/bgwriter.h"
#include "postmaster/startup.h"
#include "postmaster/walwriter.h"
#include "postmaster/secondbuffer.h"
#include "replication/walreceiver.h"
#include "storage/bufmgr.h"
#include "storage/bufpage.h"
@ -50,6 +51,7 @@
#include "utils/ps_status.h"
#include "utils/rel.h"
#include "utils/relmapper.h"
#include "access/pagehashqueue.h"
uint32 bootstrap_data_checksum_version = 0; /* No checksum */
@ -333,6 +335,12 @@ AuxiliaryProcessMain(int argc, char *argv[])
case WalReceiverProcess:
MyBackendType = B_WAL_RECEIVER;
break;
case CleanLogIndexProcess:
MyBackendType = B_CLEAN_LOGINDEX;
break;
case SecondBufferProcess:
MyBackendType = B_SECONDBUFFER;
break;
default:
MyBackendType = B_INVALID;
}
@ -467,7 +475,14 @@ AuxiliaryProcessMain(int argc, char *argv[])
case WalReceiverProcess:
WalReceiverMain();
proc_exit(1);
case CleanLogIndexProcess:
CleanLogIndexMain(0,NULL);
proc_exit(1);
case SecondBufferProcess:
SecondBufferMain();
proc_exit(1);
default:
elog(PANIC, "unrecognized process type: %d", (int) MyAuxProcType);
proc_exit(1);
@ -521,7 +536,6 @@ BootstrapModeMain(void)
attrtypes[i] = NULL;
Nulls[i] = false;
}
ufs_init_client();
/*
* Process bootstrap input.
*/

View File

@ -44,8 +44,7 @@ OBJS = \
pg_subscription.o \
pg_type.o \
storage.o \
toasting.o \
pg_hot_data.o
toasting.o
include $(top_srcdir)/src/backend/common.mk
@ -70,7 +69,7 @@ CATALOG_HEADERS := \
pg_default_acl.h pg_init_privs.h pg_seclabel.h pg_shseclabel.h \
pg_collation.h pg_partitioned_table.h pg_range.h pg_transform.h \
pg_sequence.h pg_publication.h pg_publication_rel.h pg_subscription.h \
pg_subscription_rel.h pg_stat_share_storage.h pg_hot_data.h
pg_subscription_rel.h pg_stat_share_storage.h
GENERATED_HEADERS := $(CATALOG_HEADERS:%.h=%_d.h) schemapg.h system_fk_info.h

View File

@ -40,7 +40,6 @@
#include "catalog/pg_stat_share_storage.h"
#include "catalog/pg_tablespace.h"
#include "catalog/pg_type.h"
#include "catalog/pg_hot_data.h"
#include "miscadmin.h"
#include "storage/fd.h"
#include "utils/fmgroids.h"
@ -248,7 +247,6 @@ IsSharedRelation(Oid relationId)
if (relationId == AuthIdRelationId ||
relationId == AuthMemRelationId ||
relationId == DatabaseRelationId ||
relationId == HotDataRelationId ||
relationId == SharedDescriptionRelationId ||
relationId == SharedDependRelationId ||
relationId == SharedSecLabelRelationId ||
@ -265,7 +263,6 @@ IsSharedRelation(Oid relationId)
relationId == AuthMemMemRoleIndexId ||
relationId == DatabaseNameIndexId ||
relationId == DatabaseOidIndexId ||
relationId == HotDataDatnameRelnameIndexId ||
relationId == SharedDescriptionObjIndexId ||
relationId == SharedDependDependerIndexId ||
relationId == SharedDependReferenceIndexId ||

View File

@ -1,276 +0,0 @@
/*-------------------------------------------------------------------------
*
* pg_hot_data.c
* for hot data precache
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "catalog/pg_hot_data.h"
#include "libpq-fe.h"
#include "lib/stringinfo.h"
#include "utils/timestamp.h"
#include "access/xlog.h"
#include "postmaster/postmaster.h"
#include <stdlib.h>
void PrecacheHotData()
{
char instanceName[NAMEDATALEN]; //default:master
char primaryHost[16]; //default:127.0.0.1
char primaryUser[NAMEDATALEN]; //default:postgres
char primaryPw[NAMEDATALEN]; //default:123456
char primaryPort[8]; //default:PostPortNumber
char localPort[8]; //default:master
StringInfoData cmd, primaryConnStr, localConnStr;
initStringInfo(&cmd);
initStringInfo(&primaryConnStr);
initStringInfo(&localConnStr);
memset(instanceName, 0, NAMEDATALEN);
memset(primaryHost, 0, 16);
memset(primaryUser, 0, NAMEDATALEN);
memset(primaryPw, 0, NAMEDATALEN);
memset(primaryPort, 0, 8);
memset(localPort, 0, 8);
//parse
if (strlen(PrimaryConnInfo) > 0)
{
char *temStr;
char *temChr;
int temStrLen;
//instanceName
temStr = strstr(PrimaryConnInfo, "application_name=");
temStrLen = strlen("application_name=");
if (temStr != NULL)
{
temChr = strchr(temStr, ' ');
if (temChr != NULL)
{
memcpy(instanceName, temStr + temStrLen, temChr - temStr - temStrLen);
}
else
{
strcpy(instanceName, temStr + temStrLen);
}
}
else
{
strcpy(instanceName, "master");
}
//primaryHost
temStr = strstr(PrimaryConnInfo, "host=");
temStrLen = strlen("host=");
if (temStr != NULL)
{
temChr = strchr(temStr, ' ');
if (temChr != NULL)
{
memcpy(primaryHost, temStr + temStrLen, temChr - temStr - temStrLen);
}
else
{
strcpy(primaryHost, temStr + temStrLen);
}
}
else
{
strcpy(primaryHost, "127.0.0.1");
}
//primaryUser
temStr = strstr(PrimaryConnInfo, "user=");
temStrLen = strlen("user=");
if (temStr != NULL)
{
temChr = strchr(temStr, ' ');
if (temChr != NULL)
{
memcpy(primaryUser, temStr + temStrLen, temChr - temStr - temStrLen);
}
else
{
strcpy(primaryUser, temStr + temStrLen);
}
}
else
{
strcpy(primaryUser, "postgres");
}
//primaryPw
temStr = strstr(PrimaryConnInfo, "password=");
temStrLen = strlen("password=");
if (temStr != NULL)
{
temChr = strchr(temStr, ' ');
if (temChr != NULL)
{
memcpy(primaryPw, temStr + temStrLen, temChr - temStr - temStrLen);
}
else
{
strcpy(primaryPw, temStr + temStrLen);
}
}
else
{
strcpy(primaryPw, "123456");
}
//primaryPort
temStr = strstr(PrimaryConnInfo, "port=");
temStrLen = strlen("port=");
if (temStr != NULL)
{
temChr = strchr(temStr, ' ');
if (temChr != NULL)
{
memcpy(primaryPort, temStr + temStrLen, temChr - temStr - temStrLen);
}
else
{
strcpy(primaryPort, temStr + temStrLen);
}
}
else
{
sprintf(primaryPort, "%d", PostPortNumber);
}
}
else
{
strcpy(instanceName, "master");
strcpy(primaryHost, "127.0.0.1");
strcpy(primaryUser, "postgres");
strcpy(primaryPw, "123456");
sprintf(primaryPort, "%d", PostPortNumber);
}
//assemble primaryConnStr
appendStringInfoString(&primaryConnStr, "host=");
appendStringInfoString(&primaryConnStr, primaryHost);
appendStringInfoString(&primaryConnStr, " user=");
appendStringInfoString(&primaryConnStr, primaryUser);
appendStringInfoString(&primaryConnStr, " password=");
appendStringInfoString(&primaryConnStr, primaryPw);
appendStringInfoString(&primaryConnStr, " port=");
appendStringInfoString(&primaryConnStr, primaryPort);
appendStringInfoString(&primaryConnStr, " dbname=postgres");
//conn local
sprintf(localPort, "%d", PostPortNumber);
appendStringInfoString(&localConnStr, "host=127.0.0.1 port=");
appendStringInfoString(&localConnStr, localPort);
appendStringInfoString(&localConnStr, " user=postgres dbname=postgres");
PGconn *localConn = PQconnectdb(localConnStr.data);
if (PQstatus(localConn) != CONNECTION_OK)
{
PQfinish(localConn);
//log
return;
}
appendStringInfoString(&cmd, "SELECT datname, relname, crules FROM pg_hot_data WHERE crulessettime>cachetime AND clientname='");
appendStringInfoString(&cmd, instanceName);
appendStringInfoString(&cmd, "'");
//Query the corresponding precache policy
PGresult *ruleRes = PQexec(localConn, cmd.data);
if (PQresultStatus(ruleRes) != PGRES_TUPLES_OK)
{
PQclear(ruleRes);
PQfinish(localConn);
//log
return;
}
int rows = PQntuples(ruleRes);
for(int i=0; i<rows; i++)
{
char *datname;
char *relname;
char *crules;
datname = PQgetvalue(ruleRes, i, 0);
relname = PQgetvalue(ruleRes, i, 1);
crules = PQgetvalue(ruleRes, i, 2);
//precache hot data(table level)
if (strcmp(crules, "t") == 0)
{
//precache
resetStringInfo(&localConnStr);
appendStringInfoString(&localConnStr, "host=127.0.0.1 port=");
appendStringInfoString(&localConnStr, localPort);
appendStringInfoString(&localConnStr, " user=postgres dbname=");
appendStringInfoString(&localConnStr, datname);
PGconn *precacheConn = PQconnectdb(localConnStr.data);
if (PQstatus(precacheConn) != CONNECTION_OK)
{
PQfinish(precacheConn);
//log
continue;
}
resetStringInfo(&cmd);
appendStringInfoString(&cmd, "precache select * from ");
appendStringInfoString(&cmd, relname);
PGresult *precacheRes = PQexec(precacheConn, cmd.data);
if (PQresultStatus(precacheRes) != PGRES_TUPLES_OK)
{
PQclear(precacheRes);
PQfinish(precacheConn);
//log
continue;
}
PQclear(precacheRes);
PQfinish(precacheConn);
//update primary pg_hot_data
const char* currentTime = NULL;
currentTime = timestamptz_to_str(GetCurrentTimestamp());
resetStringInfo(&cmd);
appendStringInfoString(&cmd, "UPDATE pg_hot_data SET cachetime='");
appendStringInfoString(&cmd, currentTime);
appendStringInfoString(&cmd, "' WHERE datname='");
appendStringInfoString(&cmd, datname);
appendStringInfoString(&cmd, "' AND relname='");
appendStringInfoString(&cmd, relname);
appendStringInfoString(&cmd, "' AND crules='");
appendStringInfoString(&cmd, crules);
appendStringInfoString(&cmd, "' AND clientname='");
appendStringInfoString(&cmd, instanceName);
appendStringInfoString(&cmd, "'");
PGconn *primaryConn = PQconnectdb(primaryConnStr.data);
if (PQstatus(primaryConn) != CONNECTION_OK)
{
PQfinish(primaryConn);
//log
continue;
}
PGresult *updateRes=PQexec(primaryConn, cmd.data);
if (PQresultStatus(updateRes) != PGRES_TUPLES_OK)
{
PQclear(updateRes);
PQfinish(primaryConn);
//log
continue;
}
PQclear(updateRes);
PQfinish(primaryConn);
}
}
PQclear(ruleRes);
PQfinish(localConn);
}

View File

@ -33,6 +33,10 @@
#include "utils/hsearch.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/guc.h"
#include "utils/inval.h"
#include "postmaster/bgwriter.h"
#include "postmaster/secondbuffer.h"
/* GUC variables */
int wal_skip_threshold = 2048; /* in kilobytes */
@ -276,12 +280,16 @@ RelationPreserveStorage(RelFileNode rnode, bool atCommit)
void
RelationTruncate(Relation rel, BlockNumber nblocks)
{
int i = 0;
bool fsm;
bool vm;
bool need_fsm_vacuum = false;
ForkNumber forks[MAX_FORKNUM];
BlockNumber blocks[MAX_FORKNUM];
int nforks = 0;
bool disable_cancel_query = false;
LdPageKey ldKey;
/* Open it at the smgr level if not already done */
RelationOpenSmgr(rel);
@ -343,13 +351,20 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
xl_smgr_truncate xlrec;
/*
* He3DB: Disable cancel query during writing truacte XLOG and truncating.
* If standby receive truncate log but master failed to trucate file,
* standby will crash when master write to these blocks which truncated in standby node.
*/
HOLD_INTERRUPTS();
disable_cancel_query = true;
xlrec.blkno = nblocks;
xlrec.rnode = rel->rd_node;
xlrec.flags = SMGR_TRUNCATE_ALL;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, sizeof(xlrec));
lsn = XLogInsert(RM_SMGR_ID,
XLOG_SMGR_TRUNCATE | XLR_SPECIAL_REL_UPDATE);
@ -362,11 +377,49 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
*/
if (fsm || vm)
XLogFlush(lsn);
if (IsBootstrapProcessingMode() != true && InitdbSingle != true) {
RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT
| CHECKPOINT_FLUSH_ALL);
}
}
/* Do the real work to truncate relation forks */
if (IsBootstrapProcessingMode()!=true && InitdbSingle!=true) {
/*
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
* just drop them without bothering to write the contents.
*/
DropRelFileNodeBuffers(rel->rd_smgr, &forks, nforks, &blocks);
/*
* Send a shared-inval message to force other backends to close any smgr
* references they may have for this rel. This is useful because they
* might have open file pointers to segments that got removed, and/or
* smgr_targblock variables pointing past the new rel end. (The inval
* message will come back to our backend, too, causing a
* probably-unnecessary local smgr flush. But we don't expect that this
* is a performance-critical path.) As in the unlink code, we want to be
* sure the message is sent before we start changing things on-disk.
*/
CacheInvalidateSmgr(rel->rd_smgr->smgr_rnode);
smgrtruncatelsn(rel->rd_smgr, forks, nforks, blocks,lsn);
for ( i = 0; i < nforks; i ++)
{
ldKey.sk.dbid = rel->rd_smgr->smgr_rnode.node.dbNode;
ldKey.sk.relid = rel->rd_smgr->smgr_rnode.node.relNode;
ldKey.sk.forkno = forks[i];
ldKey.sk.blkno = blocks[i];
SendInvalPage(&ldKey);
}
ldKey.sk.dbid = 0;
ldKey.sk.relid = 0;
ldKey.sk.forkno = 32;
ldKey.sk.blkno = 0;
SendInvalPage(&ldKey);
} else {
smgrtruncate(rel->rd_smgr, forks, nforks, blocks);
}
@ -379,6 +432,13 @@ RelationTruncate(Relation rel, BlockNumber nblocks)
*/
if (need_fsm_vacuum)
FreeSpaceMapVacuumRange(rel, nblocks, InvalidBlockNumber);
/* He3DB: Resume to enable cancel query */
if (disable_cancel_query)
{
RESUME_INTERRUPTS();
CHECK_FOR_INTERRUPTS();
}
}
/*
@ -448,12 +508,7 @@ RelationCopyStorage(SMgrRelation src, SMgrRelation dst,
CHECK_FOR_INTERRUPTS();
// smgrread(src, forkNum, blkno, buf.data, GetXLogWriteRecPtr());
smgrread(src, forkNum, blkno, &dataPage, InvalidXLogRecPtr);
for(int i = 0; i < BLCKSZ; i ++) {
buf.data[i] = dataPage[i];
}
free(dataPage);
smgrread(src, forkNum, blkno, buf.data);
if (!PageIsVerifiedExtended(page, blkno,
PIV_LOG_WARNING | PIV_REPORT_STAT))
@ -929,13 +984,14 @@ smgr_redo(XLogReaderState *record)
reln = smgropen(xlrec->rnode, InvalidBackendId);
/* He3DB: propeller instance and He3DB slave instance not create rel file*/
if (!EnableHotStandby)
if (*isPromoteIsTriggered || !EnableHotStandby || he3mirror || !he3share)
{
smgrcreate(reln, xlrec->forkNum, true);
}
}
else if (info == XLOG_SMGR_TRUNCATE)
{
int i = 0;
xl_smgr_truncate *xlrec = (xl_smgr_truncate *) XLogRecGetData(record);
SMgrRelation reln;
Relation rel;
@ -944,6 +1000,9 @@ smgr_redo(XLogReaderState *record)
int nforks = 0;
bool need_fsm_vacuum = false;
WalLdPageKey walkey;
LdPageKey ldKey;
reln = smgropen(xlrec->rnode, InvalidBackendId);
/*
@ -953,7 +1012,7 @@ smgr_redo(XLogReaderState *record)
* log as best we can until the drop is seen.
*/
/* He3DB: propeller instance and He3DB slave instance not create rel file*/
if (!EnableHotStandby)
if (*isPromoteIsTriggered || !EnableHotStandby || he3mirror || !he3share)
{
smgrcreate(reln, MAIN_FORKNUM, true);
}
@ -1011,9 +1070,60 @@ smgr_redo(XLogReaderState *record)
}
}
/*
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
* just drop them without bothering to write the contents.
*/
DropRelFileNodeBuffers(reln, &forks, nforks, &blocks);
/*
* Send a shared-inval message to force other backends to close any smgr
* references they may have for this rel. This is useful because they
* might have open file pointers to segments that got removed, and/or
* smgr_targblock variables pointing past the new rel end. (The inval
* message will come back to our backend, too, causing a
* probably-unnecessary local smgr flush. But we don't expect that this
* is a performance-critical path.) As in the unlink code, we want to be
* sure the message is sent before we start changing things on-disk.
*/
CacheInvalidateSmgr(reln->smgr_rnode);
/* Do the real work to truncate relation forks */
if (nforks > 0)
smgrtruncate(reln, forks, nforks, blocks);
if (nforks > 0 && (!EnableHotStandby || *isPromoteIsTriggered || !he3share))
smgrtruncatelsn(reln, forks, nforks, blocks, record->ReadRecPtr);
if (EnableHotStandby && !push_standby)
{
for (i = 0; i < nforks; i ++)
{
ldKey.sk.dbid = reln->smgr_rnode.node.dbNode;
ldKey.sk.relid = reln->smgr_rnode.node.relNode;
ldKey.sk.forkno = forks[i];
ldKey.sk.blkno = blocks[i];
SendInvalPage(&ldKey);
walkey.sk.dbid = reln->smgr_rnode.node.dbNode;
walkey.sk.relid = reln->smgr_rnode.node.relNode;
walkey.sk.forkno = forks[i];
walkey.sk.blkno = blocks[i];
walkey.pageLsn = SwapLsnFromLittleToBig(record->ReadRecPtr);
walkey.partition = 1;
SendInvalWal(&walkey);
}
ldKey.sk.dbid = 0;
ldKey.sk.relid = 0;
ldKey.sk.forkno = 32;
ldKey.sk.blkno = 0;
SendInvalPage(&ldKey);
walkey.sk.dbid = 0;
walkey.sk.relid = 0;
walkey.sk.forkno = 32;
walkey.sk.blkno = 0;
walkey.pageLsn = 0;
walkey.partition = 0;
SendInvalWal(&walkey);
}
/*
* Update upper-level FSM pages to account for the truncation. This is

View File

@ -866,6 +866,24 @@ CREATE VIEW pg_stat_replication AS
JOIN pg_stat_get_wal_senders() AS W ON (S.pid = W.pid)
LEFT JOIN pg_authid AS U ON (S.usesysid = U.oid);
CREATE VIEW pg_stat_he3walwrite AS
SELECT
s.write_lsn,
s.flush_lsn,
s.writekv_totaltimes,
s.writekv_parallels
FROM pg_stat_get_he3walwrite() AS s
;
CREATE VIEW pg_stat_he3_logindex AS
SELECT
s.memtable_total,
s.memtable_used,
s.memtable_start_index,
s.memtable_active_index,
s.page_total
FROM pg_stat_get_he3_logindex() AS s;
CREATE VIEW pg_stat_slru AS
SELECT
s.name,

View File

@ -2236,7 +2236,7 @@ dbase_redo(XLogReaderState *record)
if (stat(dst_path, &st) == 0 && S_ISDIR(st.st_mode))
{
/* He3DB: propeller instance and He3DB slave instance not create db file*/
if (!EnableHotStandby)
if (!EnableHotStandby || *isPromoteIsTriggered)
{
if (!rmtree(dst_path, true))
/* If this failed, copydir() below is going to error. */
@ -2258,8 +2258,28 @@ dbase_redo(XLogReaderState *record)
* We don't need to copy subdirectories
*/
/* He3DB: propeller instance and He3DB slave instance not create db file*/
if (!EnableHotStandby)
if (!EnableHotStandby || *isPromoteIsTriggered || he3mirror || !he3share)
{
// int count = 0;
// for (;;)
// {
// XLogRecPtr pushlsn;
// XLogRecPtr lastlsn = record->currRecPtr;
// pushlsn = QueryPushChkpointLsn();
// if (pushlsn == InvalidXLogRecPtr)
// ereport(ERROR,
// (errcode(ERRCODE_INTERNAL_ERROR),
// errmsg("push standby's latest apply lsn shouldn't be 0")));
// if (lastlsn <= pushlsn)
// break;
// if (count > 100)
// ereport(ERROR,
// (errcode(ERRCODE_INTERNAL_ERROR),
// errmsg("push standby's latest apply lsn(%X/%X) is still behind primary(%X/%X) after try 100 times.",
// LSN_FORMAT_ARGS(pushlsn), LSN_FORMAT_ARGS(lastlsn))));
// pg_usleep(1000000L);
// count++;
// }
copydir(src_path, dst_path, false);
}
}

View File

@ -9,7 +9,7 @@
#include "utils/timestamp.h"
#include "fmgr.h"
#include "utils/fmgrprotos.h"
#include "catalog/indexing.h"
void UpdateStatShareStorage(int64 vcl) {
Relation pg_stat_share_storage_rel = NULL;

View File

@ -1881,9 +1881,6 @@ pg_sequence_last_value(PG_FUNCTION_ARGS)
void
seq_redo(XLogReaderState *record)
{
if (data_buffer_for_replay(record) == false) {
return;
}
XLogRecPtr lsn = record->EndRecPtr;
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
Buffer buffer;

View File

@ -1557,7 +1557,7 @@ ExecutePlan(EState *estate,
if (TupIsNull(slot))
break;
if (!isPreCache)
if (!isPreCacheTable && !isPreCacheIndex)
{
/*
* If we have a junk filter, then project a new tuple with the junk

View File

@ -51,6 +51,7 @@
#include "utils/rel.h"
#include "utils/snapmgr.h"
#include "utils/spccache.h"
#include "storage/bufmgr.h"
static TupleTableSlot *BitmapHeapNext(BitmapHeapScanState *node);
static inline void BitmapDoneInitializingSharedState(ParallelBitmapHeapState *pstate);
@ -81,6 +82,28 @@ BitmapHeapNext(BitmapHeapScanState *node)
ParallelBitmapHeapState *pstate = node->pstate;
dsa_area *dsa = node->ss.ps.state->es_query_dsa;
/* set preCacheNodeOid */
if (isPreCacheIndex && preCacheNodeOid == 0)
{
preCacheNodeOid = ((BitmapIndexScanState *)((PlanState *)(node))->lefttree)->biss_ScanDesc->indexRelation->rd_node.relNode;
if (isPreCacheAction)
{
preCacheNodesPtr[(*preCacheNodesCountPtr)++] = preCacheNodeOid;
}
else
{
for(int i = 0; i < *preCacheNodesCountPtr; i++)
{
if (preCacheNodesPtr[i] == preCacheNodeOid)
{
preCacheNodesPtr[i] = preCacheNodesPtr[*preCacheNodesCountPtr - 1];
(*preCacheNodesCountPtr)--;
break;
}
}
}
}
/*
* extract necessary information from index scan node
*/

View File

@ -66,6 +66,28 @@ IndexOnlyNext(IndexOnlyScanState *node)
TupleTableSlot *slot;
ItemPointer tid;
/* set preCacheNodeOid */
if (isPreCacheIndex && preCacheNodeOid == 0)
{
preCacheNodeOid = node->ioss_RelationDesc->rd_node.relNode;
if (isPreCacheAction)
{
preCacheNodesPtr[(*preCacheNodesCountPtr)++] = preCacheNodeOid;
}
else
{
for(int i = 0; i < *preCacheNodesCountPtr; i++)
{
if (preCacheNodesPtr[i] == preCacheNodeOid)
{
preCacheNodesPtr[i] = preCacheNodesPtr[*preCacheNodesCountPtr - 1];
(*preCacheNodesCountPtr)--;
break;
}
}
}
}
/*
* extract necessary information from index scan node
*/

View File

@ -43,6 +43,7 @@
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "storage/bufmgr.h"
/*
* When an ordering operator is used, tuples fetched from the index that
@ -86,6 +87,28 @@ IndexNext(IndexScanState *node)
IndexScanDesc scandesc;
TupleTableSlot *slot;
/* set preCacheNodeOid */
if (isPreCacheIndex && preCacheNodeOid == 0)
{
preCacheNodeOid = node->iss_RelationDesc->rd_node.relNode;
if (isPreCacheAction)
{
preCacheNodesPtr[(*preCacheNodesCountPtr)++] = preCacheNodeOid;
}
else
{
for(int i = 0; i < *preCacheNodesCountPtr; i++)
{
if (preCacheNodesPtr[i] == preCacheNodeOid)
{
preCacheNodesPtr[i] = preCacheNodesPtr[*preCacheNodesCountPtr - 1];
(*preCacheNodesCountPtr)--;
break;
}
}
}
}
/*
* extract necessary information from index scan node
*/

View File

@ -32,6 +32,7 @@
#include "executor/execdebug.h"
#include "executor/nodeSeqscan.h"
#include "utils/rel.h"
#include "storage/bufmgr.h"
static TupleTableSlot *SeqNext(SeqScanState *node);
@ -54,6 +55,28 @@ SeqNext(SeqScanState *node)
ScanDirection direction;
TupleTableSlot *slot;
/* set preCacheTableNode */
if (isPreCacheTable && preCacheNodeOid == 0)
{
preCacheNodeOid = node->ss.ss_currentRelation->rd_node.relNode;
if (isPreCacheAction)
{
preCacheNodesPtr[(*preCacheNodesCountPtr)++] = preCacheNodeOid;
}
else
{
for(int i = 0; i < *preCacheNodesCountPtr; i++)
{
if (preCacheNodesPtr[i] == preCacheNodeOid)
{
preCacheNodesPtr[i] = preCacheNodesPtr[*preCacheNodesCountPtr - 1];
(*preCacheNodesCountPtr)--;
break;
}
}
}
}
/*
* get information from the estate and scan state
*/

View File

@ -21,6 +21,7 @@ OBJS = \
interrupt.o \
pgarch.o \
pgstat.o \
secondbuffer.o \
postmaster.o \
startup.o \
syslogger.o \

View File

@ -241,11 +241,11 @@ BackgroundWriterMain(void)
/*
* Do one cycle of dirty-buffer writing.
*/
if (push_standby == true) {
can_hibernate = BgBufferSync(&wb_context);
} else {
//if (push_standby == true) {
// can_hibernate = BgBufferSync(&wb_context);
//} else {
can_hibernate = true;
}
//}
/*
* Send off activity statistics to the stats collector

View File

@ -493,7 +493,7 @@ CheckpointerMain(void)
}
/* Check for archive_timeout and switch xlog files if necessary. */
CheckArchiveTimeout();
//CheckArchiveTimeout();
/*
* Send off activity statistics to the stats collector. (The reason
@ -719,7 +719,7 @@ CheckpointWriteDelay(int flags, double progress)
AbsorbSyncRequests();
absorb_counter = WRITES_PER_ABSORB;
CheckArchiveTimeout();
//CheckArchiveTimeout();
/*
* Report interim activity statistics to the stats collector.
@ -1346,3 +1346,9 @@ FirstCallSinceLastCheckpoint(void)
return FirstCall;
}
pid_t
He3DBQueryCkpPid(void)
{
return CheckpointerShmem->checkpointer_pid;
}

View File

@ -65,7 +65,6 @@
#include "postgres.h"
#include "utils/ufs.h"
#include <unistd.h>
#include <signal.h>
#include <time.h>
@ -115,6 +114,7 @@
#include "postmaster/interrupt.h"
#include "postmaster/pgarch.h"
#include "postmaster/postmaster.h"
#include "postmaster/secondbuffer.h"
#include "postmaster/syslogger.h"
#include "replication/logicallauncher.h"
#include "replication/walsender.h"
@ -133,7 +133,7 @@
#include "utils/timeout.h"
#include "utils/timestamp.h"
#include "utils/varlena.h"
#include "access/pagehashqueue.h"
#ifdef EXEC_BACKEND
#include "storage/spin.h"
#endif
@ -148,7 +148,8 @@
#define BACKEND_TYPE_AUTOVAC 0x0002 /* autovacuum worker process */
#define BACKEND_TYPE_WALSND 0x0004 /* walsender process */
#define BACKEND_TYPE_BGWORKER 0x0008 /* bgworker process */
#define BACKEND_TYPE_ALL 0x000F /* OR of all the above */
#define BACKEND_TYPE_FLUSHPAGE 0x0010 /* parallel flush pid*/
#define BACKEND_TYPE_ALL 0x001F /* OR of all the above */
/*
* List of active backends (or child processes anyway; we don't actually
@ -200,6 +201,8 @@ BackgroundWorker *MyBgworkerEntry = NULL;
/* The socket number we are listening for connections on */
int PostPortNumber;
int flag = 1;
/* The directory names for Unix socket(s) */
char *Unix_socket_directories;
@ -255,6 +258,8 @@ static pid_t StartupPID = 0,
AutoVacPID = 0,
PgArchPID = 0,
PgStatPID = 0,
SecondBufferPID = 0,
CleanLogIndexPID = 0,
SysLoggerPID = 0;
/* Startup process's status */
@ -376,6 +381,10 @@ static volatile bool avlauncher_needs_signal = false;
/* received START_WALRECEIVER signal */
static volatile sig_atomic_t WalReceiverRequested = false;
/* received START_PARALLEL PUSH signal */
static volatile sig_atomic_t PageParallelPush = false;
/* set when there's a worker that needs to be started up */
static volatile bool StartWorkerNeeded = true;
static volatile bool HaveCrashedWorker = false;
@ -438,6 +447,7 @@ static pid_t StartChildProcess(AuxProcType type);
static void StartAutovacuumWorker(void);
static void MaybeStartWalReceiver(void);
static void InitPostmasterDeathWatchHandle(void);
static void StartALLPageFlushWorker(void);
/*
* Archiver is allowed to start up at the current postmaster state?
@ -558,6 +568,8 @@ static void ShmemBackendArrayRemove(Backend *bn);
#define StartCheckpointer() StartChildProcess(CheckpointerProcess)
#define StartWalWriter() StartChildProcess(WalWriterProcess)
#define StartWalReceiver() StartChildProcess(WalReceiverProcess)
#define StartSecondBuffer() StartChildProcess(SecondBufferProcess)
#define StartCleanLogIndex() StartChildProcess(CleanLogIndexProcess)
/* Macros to check exit status of a child process */
#define EXIT_STATUS_0(st) ((st) == 0)
@ -589,7 +601,6 @@ PostmasterMain(int argc, char *argv[])
char *output_config_variable = NULL;
InitProcessGlobals();
PostmasterPid = MyProcPid;
IsPostmasterEnvironment = true;
@ -1772,6 +1783,10 @@ ServerLoop(void)
CheckpointerPID = StartCheckpointer();
if (BgWriterPID == 0)
BgWriterPID = StartBackgroundWriter();
if (CleanLogIndexPID == 0)
CleanLogIndexPID = StartCleanLogIndex();
if (SecondBufferPID == 0)
SecondBufferPID = StartSecondBuffer();
}
/*
@ -1782,6 +1797,9 @@ ServerLoop(void)
if (WalWriterPID == 0 && pmState == PM_RUN)
WalWriterPID = StartWalWriter();
// if(SecondBufferPID == 0 && pmState == PM_RUN)
// SecondBufferPID = StartSecondBuffer();
/*
* If we have lost the autovacuum launcher, try to start a new one. We
* don't want autovacuum to run in binary upgrade mode because
@ -2733,6 +2751,11 @@ SIGHUP_handler(SIGNAL_ARGS)
signal_child(SysLoggerPID, SIGHUP);
if (PgStatPID != 0)
signal_child(PgStatPID, SIGHUP);
if (SecondBufferPID != 0)
signal_child(SecondBufferPID, SIGHUP);
if (CleanLogIndexPID != 0 )
signal_child(CleanLogIndexPID, SIGHUP);
/* Reload authentication config files too */
if (!load_hba())
@ -3052,6 +3075,10 @@ reaper(SIGNAL_ARGS)
BgWriterPID = StartBackgroundWriter();
if (WalWriterPID == 0)
WalWriterPID = StartWalWriter();
if (SecondBufferPID == 0)
SecondBufferPID = StartSecondBuffer(); //作用?
if (CleanLogIndexPID == 0)
CleanLogIndexPID = StartCleanLogIndex();
/*
* Likewise, start other special children as needed. In a restart
@ -3094,6 +3121,7 @@ reaper(SIGNAL_ARGS)
continue;
}
/*
* Was it the checkpointer?
*/
@ -3128,7 +3156,7 @@ reaper(SIGNAL_ARGS)
*/
SignalChildren(SIGUSR2);
pmState = PM_SHUTDOWN_2;
// pmState = PM_SHUTDOWN_2;
/*
* We can also shut down the stats collector now; there's
@ -3164,6 +3192,25 @@ reaper(SIGNAL_ARGS)
continue;
}
if (pid == SecondBufferPID)
{
SecondBufferPID = 0;
if (EXIT_STATUS_0(exitstatus) && pmState == PM_SHUTDOWN)
{
Assert(Shutdown > NoShutdown);
pmState = PM_SHUTDOWN_2;
}
else
{
/*
* Any unexpected exit of the checkpointer (including FATAL
* exit) is treated as a crash.
*/
HandleChildCrash(pid, exitstatus,
_("second buffer process"));
}
}
/*
* Was it the wal receiver? If exit status is zero (normal) or one
* (FATAL exit), we assume everything is all right just like normal
@ -3691,7 +3738,29 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
signal_child(PgStatPID, SIGQUIT);
allow_immediate_pgstat_restart();
}
/* Take care of the clean logindex too */
if (pid == CleanLogIndexPID)
CleanLogIndexPID = 0;
else if (CleanLogIndexPID != 0 && take_action)
{
ereport(DEBUG2,
(errmsg_internal("sending %s to process %d",
(SendStop ? "SIGSTOP" : "SIGQUIT"),
(int) CleanLogIndexPID)));
signal_child(CleanLogIndexPID, (SendStop ? SIGSTOP : SIGQUIT));
}
/* Take care of the walwriter too*/
if (pid == SecondBufferPID)
SecondBufferPID = 0;
else if (SecondBufferPID != 0 && take_action)
{
ereport(DEBUG2,
(errmsg_internal("sending %s to process %d",
(SendStop ? "SIGSTOP" : "SIGQUIT"),
(int) SecondBufferPID)));
signal_child(SecondBufferPID, (SendStop ? SIGSTOP : SIGQUIT));
}
/* We do NOT restart the syslogger */
if (Shutdown != ImmediateShutdown)
@ -3839,8 +3908,14 @@ PostmasterStateMachine(void)
signal_child(StartupPID, SIGTERM);
if (WalReceiverPID != 0)
signal_child(WalReceiverPID, SIGTERM);
if (CleanLogIndexPID != 0)
signal_child(CleanLogIndexPID, SIGTERM);
/*and the secondbuffer too*/
if (SecondBufferPID != 0)
signal_child(SecondBufferPID,SIGTERM);
/* checkpointer, archiver, stats, and syslogger may continue for now */
/* Now transition to PM_WAIT_BACKENDS state to wait for them to die */
pmState = PM_WAIT_BACKENDS;
}
@ -4176,6 +4251,10 @@ TerminateChildren(int signal)
signal_child(PgArchPID, signal);
if (PgStatPID != 0)
signal_child(PgStatPID, signal);
if (CleanLogIndexPID !=0)
signal_child(CleanLogIndexPID, signal);
if (SecondBufferPID != 0)
signal_child(SecondBufferPID, signal);
}
/*
@ -4273,7 +4352,6 @@ BackendStartup(Port *port)
report_fork_failure_to_client(port, save_errno);
return STATUS_ERROR;
}
/* in parent, successful fork */
ereport(DEBUG2,
(errmsg_internal("forked new backend, pid=%d socket=%d",
@ -4530,7 +4608,7 @@ BackendRun(Port *port)
if (port->privateConn == true) {
privateConn = true;
}
client_application_name = port->application_name;
/*
* Make sure we aren't in PostmasterContext anymore. (We can't delete it
* just yet, though, because InitPostgres will need the HBA data.)
@ -5139,6 +5217,11 @@ SubPostmasterMain(int argc, char *argv[])
static void
ExitPostmaster(int status)
{
ClosePageDBEnv();
CloseWalDBEnv();
#ifdef HAVE_PTHREAD_IS_THREADED_NP
/*
@ -5203,7 +5286,6 @@ sigusr1_handler(SIGNAL_ARGS)
CheckpointerPID = StartCheckpointer();
Assert(BgWriterPID == 0);
BgWriterPID = StartBackgroundWriter();
/*
* Start the archiver if we're responsible for (re-)archiving received
* files.
@ -5299,7 +5381,24 @@ sigusr1_handler(SIGNAL_ARGS)
/* The autovacuum launcher wants us to start a worker process. */
StartAutovacuumWorker();
}
/* start Flush Page */
if (!PageParallelPush && CheckPostmasterSignal(PMSIGNAL_PARALLEL_FLUSH_WORKER)) {
PageParallelPush = true;
StartALLPageFlushWorker();
}
if (CheckPostmasterSignal(PMSIGNAL_CLEAN_LOGINDEX_WORKER)) {
if ( CleanLogIndexPID == 0) {
CleanLogIndexPID = StartCleanLogIndex();
}
}
// if (CheckPostmasterSignal(PMSIGNAL_SECONDBUFFER_WORKER)) {
// if (SecondBufferPID == 0) {
// SecondBufferPID = StartSecondBuffer();
// }
// }
if (CheckPostmasterSignal(PMSIGNAL_START_WALRECEIVER))
{
/* Startup Process wants us to start the walreceiver process. */
@ -5464,7 +5563,15 @@ StartChildProcess(AuxProcType type)
av[ac++] = "--forkboot";
av[ac++] = NULL; /* filled in by postmaster_forkexec */
#endif
if (pageEnv == NULL)
{
InitPageDBEnv();
}
if (walEnv == NULL)
{
InitWalDBEnv();
}
snprintf(typebuf, sizeof(typebuf), "-x%d", type);
av[ac++] = typebuf;
@ -5487,7 +5594,6 @@ StartChildProcess(AuxProcType type)
MemoryContextSwitchTo(TopMemoryContext);
MemoryContextDelete(PostmasterContext);
PostmasterContext = NULL;
AuxiliaryProcessMain(ac, av); /* does not return */
}
#endif /* EXEC_BACKEND */
@ -5545,6 +5651,67 @@ StartChildProcess(AuxProcType type)
return pid;
}
static void StartALLPageFlushWorker(void) {
for(int i = 0;i<PARALLEL_NUM;i++) {
/*
* Compute the cancel key that will be assigned to this session. We
* probably don't need cancel keys for autovac workers, but we'd
* better have something random in the field to prevent unfriendly
* people from sending cancels to them.
*/
Backend *bn;
/*
* Compute the cancel key that will be assigned to this session. We
* probably don't need cancel keys for autovac workers, but we'd
* better have something random in the field to prevent unfriendly
* people from sending cancels to them.
*/
if (!RandomCancelKey(&MyCancelKey))
{
ereport(LOG,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("could not generate random cancel key")));
return;
}
bn = (Backend *) malloc(sizeof(Backend));
if (bn)
{
bn->cancel_key = MyCancelKey;
/* parallel workers are not dead_end and need a child slot */
bn->dead_end = false;
bn->child_slot = MyPMChildSlot = AssignPostmasterChildSlot();
bn->bgworker_notify = false;
bn->pid = StartPageFlushWorker();
if (bn->pid > 0)
{
bn->bkend_type = BACKEND_TYPE_FLUSHPAGE;
dlist_push_head(&BackendList, &bn->elem);
#ifdef EXEC_BACKEND
ShmemBackendArrayAdd(bn);
#endif
/* all OK */
continue;
}
/*
* fork failed, fall through to report -- actual error message was
* logged by StartAutoVacWorker
*/
(void) ReleasePostmasterChildSlot(bn->child_slot);
free(bn);
}
else
ereport(LOG,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
}
}
/*
* StartAutovacuumWorker
* Start an autovac worker process.
@ -5712,7 +5879,7 @@ int
MaxLivePostmasterChildren(void)
{
return 2 * (MaxConnections + autovacuum_max_workers + 1 +
max_wal_senders + max_worker_processes);
max_wal_senders + max_parallel_flush_process + max_worker_processes);
}
/*

File diff suppressed because it is too large Load Diff

View File

@ -32,7 +32,8 @@
#include "storage/standby.h"
#include "utils/guc.h"
#include "utils/timeout.h"
#include "access/pagehashqueue.h"
#include "utils/resowner_private.h"
#ifndef USE_POSTMASTER_DEATH_SIGNAL
/*
@ -51,6 +52,7 @@
static volatile sig_atomic_t got_SIGHUP = false;
static volatile sig_atomic_t shutdown_requested = false;
static volatile sig_atomic_t promote_signaled = false;
static volatile sig_atomic_t proc_exit_success = false;
/*
* Flag set when executing a restore command, to tell SIGTERM signal handler
@ -103,8 +105,10 @@ StartupProcShutdownHandler(SIGNAL_ARGS)
if (in_restore_command)
proc_exit(1);
else
else {
shutdown_requested = true;
startup_shutdown_requested = true;
}
WakeupRecovery();
errno = save_errno;
@ -164,8 +168,10 @@ HandleStartupProcInterrupts(void)
/*
* Check if we were requested to exit without finishing recovery.
*/
if (shutdown_requested)
if (shutdown_requested) {
proc_exit_success = true;
proc_exit(1);
}
/*
* Emergency bailout if postmaster has died. This is to avoid the
@ -185,6 +191,9 @@ HandleStartupProcInterrupts(void)
ProcessProcSignalBarrier();
}
bool ProcHasReleaseFlag(void) {
return proc_exit_success;
}
/* --------------------------------
* signal handler routines
@ -237,7 +246,18 @@ StartupProcessMain(void)
* Unblock signals (they were blocked when the postmaster forked us)
*/
PG_SETMASK(&UnBlockSig);
//start flushWork
#ifndef PG_NOREPLAY
if (IsBootstrapProcessingMode() != true && InitdbSingle!=true) {
//if (push_standby == true) {
SignalStartFlushWork();
//}
pg_usleep(1000);
SignalStartCleanLogIndexWork();
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
}
#endif
/*
* Do what we came for.
*/

View File

@ -272,7 +272,7 @@ WalWriterMain(void)
(void) WaitLatch(MyLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
cur_timeout,
1000,
WAIT_EVENT_WAL_WRITER_MAIN);
}
}

View File

@ -233,9 +233,7 @@ pg_logical_slot_get_changes_guts(FunctionCallInfo fcinfo, bool confirm, bool bin
ctx = CreateDecodingContext(InvalidXLogRecPtr,
options,
false,
XL_ROUTINE(.page_read = read_local_xlog_page,
.segment_open = wal_segment_open,
.segment_close = wal_segment_close),
XL_ROUTINE(.page_read = read_local_xlog_batch),
LogicalOutputPrepareWrite,
LogicalOutputWrite, NULL);

View File

@ -153,9 +153,7 @@ create_logical_replication_slot(char *name, char *plugin,
ctx = CreateInitDecodingContext(plugin, NIL,
false, /* just catalogs is OK */
restart_lsn,
XL_ROUTINE(.page_read = read_local_xlog_page,
.segment_open = wal_segment_open,
.segment_close = wal_segment_close),
XL_ROUTINE(.page_read = read_local_xlog_batch),
NULL, NULL, NULL);
/*
@ -512,9 +510,7 @@ pg_logical_replication_slot_advance(XLogRecPtr moveto)
ctx = CreateDecodingContext(InvalidXLogRecPtr,
NIL,
true, /* fast_forward */
XL_ROUTINE(.page_read = read_local_xlog_page,
.segment_open = wal_segment_open,
.segment_close = wal_segment_close),
XL_ROUTINE(.page_read = read_local_xlog_batch),
NULL, NULL, NULL);
/*

View File

@ -75,6 +75,7 @@
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/guc.h"
#include "utils/hfs.h"
#include "utils/pg_lsn.h"
#include "utils/ps_status.h"
#include "utils/resowner.h"
@ -94,14 +95,15 @@ bool hot_standby_feedback;
static WalReceiverConn *wrconn = NULL;
WalReceiverFunctionsType *WalReceiverFunctions = NULL;
#define NAPTIME_PER_CYCLE 100 /* max sleep time between cycles (100ms) */
//#define NAPTIME_PER_CYCLE 100 /* max sleep time between cycles (100ms) */
#define NAPTIME_PER_CYCLE 10 /* max sleep time between cycles (10ms) */
/*
* These variables are used similarly to openLogFile/SegNo,
* but for walreceiver to write the XLOG. recvFileTLI is the TimeLineID
* corresponding the filename of recvFile.
*/
static int recvFile = -1;
static int64_t recvFile = -1;
static TimeLineID recvFileTLI = 0;
static XLogSegNo recvSegNo = 0;
@ -408,7 +410,14 @@ WalReceiverMain(void)
first_stream = false;
/* Initialize LogstreamResult and buffers for processing messages */
LogstreamResult.Write = LogstreamResult.Flush = GetXLogReplayRecPtr(NULL);
if (he3mirror){
/* when he3db restart, ReplayRecPtr may too bigger, so LogstreamResult.Write < LogstreamResult.Flush,
* can not flush wal normally and can not do replay. RedoRecPtr is suitable value.
*/
LogstreamResult.Write = LogstreamResult.Flush = GetFileReplayLsn();
} else{
LogstreamResult.Write = LogstreamResult.Flush = GetXLogReplayRecPtr(NULL);
}
initStringInfo(&reply_message);
initStringInfo(&incoming_message);
@ -823,7 +832,11 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
case 'w': /* WAL records */
{
/* copy message to StringInfo */
hdrlen = sizeof(int64) + sizeof(int64) + sizeof(int64);
if (he3mirror) {
hdrlen = sizeof(int64) + sizeof(int64) + sizeof(int64);
} else {
hdrlen = sizeof(int64) + sizeof(int64) + sizeof(int64) + sizeof(int64);
}
if (len < hdrlen)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
@ -833,12 +846,21 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
/* read the fields */
dataStart = pq_getmsgint64(&incoming_message);
walEnd = pq_getmsgint64(&incoming_message);
if (he3mirror){
len -= hdrlen;
} else{
len = pq_getmsgint64(&incoming_message);
}
sendTime = pq_getmsgint64(&incoming_message);
ProcessWalSndrMessage(walEnd, sendTime);
buf += hdrlen;
len -= hdrlen;
XLogWalRcvWrite(buf, len, dataStart);
if (he3mirror) {
XLogWalRcvWrite(buf, len, dataStart);
} else {
LogstreamResult.Write = dataStart+len;
/* Update shared-memory status */
pg_atomic_write_u64(&WalRcv->writtenUpto, LogstreamResult.Write);
}
break;
}
case 'k': /* Keepalive */
@ -871,7 +893,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
}
}
XLogRecPtr GetFlushXlogPtr() {
XLogRecPtr GetFlushXlogPtr(void) {
SpinLockAcquire(&WalRcv->mutex);
XLogRecPtr rcvlsn = WalRcv->flushedUpto;
SpinLockRelease(&WalRcv->mutex);
@ -917,6 +939,8 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
errno = 0;
byteswritten = pg_pwrite(recvFile, buf, segbytes, (off_t) startoff);
// byteswritten = writefs(recvFile, buf, segbytes, (off_t) startoff);
if (byteswritten <= 0)
{
char xlogfname[MAXFNAMELEN];
@ -967,12 +991,12 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
static void
XLogWalRcvFlush(bool dying)
{
WalRcvData *walrcv = WalRcv;
if (LogstreamResult.Flush < LogstreamResult.Write)
{
WalRcvData *walrcv = WalRcv;
{
#ifdef PG_NOREPLAY
issue_xlog_fsync(recvFile, recvSegNo);
#endif
LogstreamResult.Flush = LogstreamResult.Write;
/* Update shared-memory status */
@ -1108,6 +1132,11 @@ XLogWalRcvSendReply(bool force, bool requestReply)
writePtr = LogstreamResult.Write;
flushPtr = LogstreamResult.Flush;
applyPtr = GetXLogReplayRecPtr(NULL);
#ifndef PG_NOREPLAY
if (!he3mirror && push_standby == true) {
applyPtr = GetXLogPushToDisk();
}
#endif
resetStringInfo(&reply_message);
pq_sendbyte(&reply_message, 'r');

File diff suppressed because it is too large Load Diff

View File

@ -141,6 +141,22 @@ InitBufferPool(void)
/* Init other shared buffer-management stuff */
StrategyInitialize(!foundDescs);
isPromoteIsTriggered = (bool *)
ShmemInitStruct("isPromoteIsTriggered",
sizeof(bool), &foundBufCkpt);
memset(isPromoteIsTriggered, 0, sizeof(bool));
/* Init preCacheNodes arrays */
preCacheNodesPtr = (Oid *)
ShmemInitStruct("preCacheNodesPtr",
NPreCacheNodes * sizeof(Oid), &foundBufCkpt);
memset(preCacheNodesPtr, 0, NPreCacheNodes * sizeof(Oid));
preCacheNodesCountPtr = (uint16 *)
ShmemInitStruct("preCacheNodesCountPtr",
sizeof(uint16), &foundBufCkpt);
memset(preCacheNodesCountPtr, 0, sizeof(uint16));
/* Initialize per-backend file flush context */
WritebackContextInit(&BackendWritebackContext,
&backend_flush_after);
@ -167,6 +183,7 @@ BufferShmemSize(void)
/* size of stuff controlled by freelist.c */
size = add_size(size, StrategyShmemSize());
size = add_size(size, sizeof(bool));
/* size of I/O condition variables */
size = add_size(size, mul_size(NBuffers,
@ -177,5 +194,8 @@ BufferShmemSize(void)
/* size of checkpoint sort array in bufmgr.c */
size = add_size(size, mul_size(NBuffers, sizeof(CkptSortItem)));
/* size of preCacheNodes */
size = add_size(size, mul_size(NPreCacheNodes, sizeof(Oid)) + sizeof(uint16));
return size;
}

View File

@ -20,7 +20,7 @@
* is using it.
*
* ReleaseBuffer() -- unpin a buffer
*
*f
* MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
* The disk write is delayed until buffer replacement or checkpoint.
*
@ -49,6 +49,7 @@
#include "storage/proc.h"
#include "storage/smgr.h"
#include "storage/standby.h"
#include "storage/md.h"
#include "utils/memdebug.h"
#include "utils/ps_status.h"
#include "utils/rel.h"
@ -57,8 +58,11 @@
#include "access/xlog_internal.h"
#include "access/pushpage.h"
#include "utils/memutils.h"
#include "utils/hfs.h"
#include "storage/he3db_logindex.h"
#include "access/ringbuffer.h"
bool *isPromoteIsTriggered;
/* Note: these two macros only work on shared buffers, not local ones! */
#define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
#define BufferGetLSN(bufHdr) (PageGetLSN(BufHdrGetBlock(bufHdr)))
@ -492,7 +496,7 @@ static BufferDesc *BufferAlloc(SMgrRelation smgr,
ForkNumber forkNum,
BlockNumber blockNum,
BufferAccessStrategy strategy,
bool *foundPtr);
bool *foundPtr,bool *exist);
static BufferDesc *He3DBBufferAlloc_replay(SMgrRelation smgr,
char relpersistence,
ForkNumber forkNum,
@ -515,7 +519,9 @@ static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
static void he3db_apply_page(BufferDesc *bufHdr, char *pageXlogBuf, int nbytes);
static int he3db_apply_one_record(XLogReaderState *state, Buffer buffer, char *pageXlogBuf);
bool PinBufferForPush(void *buf, BufferAccessStrategy strategy) {
return PinBuffer(buf,strategy);
}
/*
* Implementation of PrefetchBuffer() for shared buffers.
*/
@ -788,6 +794,18 @@ ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum,
* miss.
*/
pgstat_count_buffer_read(reln);
/* precache or unprecache index */
if (isPreCacheIndex && !isPreCacheIndexDone && preCacheNodeOid == reln->rd_node.relNode)
{
BlockNumber precacheblocks;
precacheblocks = smgrnblocks(reln->rd_smgr, forkNum);
for(BlockNumber i=0; i < precacheblocks; i++)
{
ReleaseBuffer(ReadBuffer_common(reln->rd_smgr, reln->rd_rel->relpersistence, forkNum, i, mode, strategy, &hit));
}
isPreCacheIndexDone = true;
}
buf = ReadBuffer_common(reln->rd_smgr, reln->rd_rel->relpersistence,
forkNum, blockNum, mode, strategy, &hit);
if (hit)
@ -836,8 +854,7 @@ He3DBReadBufferWithoutRelcache_replay(RelFileNode rnode, ForkNumber forkNum,
return He3DBReadBuffer_replay(smgr, RELPERSISTENCE_PERMANENT, forkNum, blockNum,
mode, strategy, hit);
}
/*
* ReadBuffer_common -- common logic for all ReadBuffer variants
*
@ -850,17 +867,12 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
{
BufferDesc *bufHdr;
Block bufBlock;
bool exist = false;
bool found;
bool isExtend;
bool isLocalBuf = SmgrIsTemp(smgr);
/* he3db: local tem buffer for pageXlog */
char *pageXlogBuf;
/* he3db: Bytes he3dbsmgrread actually read */
int nbytes;
*hit = false;
pageXlogBuf = NULL;
/* Make sure we will have room to remember the buffer pin */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
@ -893,11 +905,11 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
relpath(smgr->smgr_rnode, forkNum),
P_NEW)));
}
if (isLocalBuf)
{
bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, &found);
if (found)
if (found)
pgBufferUsage.local_blks_hit++;
else if (isExtend)
pgBufferUsage.local_blks_written++;
@ -912,7 +924,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* not currently in memory.
*/
bufHdr = BufferAlloc(smgr, relpersistence, forkNum, blockNum,
strategy, &found);
strategy, &found,&exist);
if (found)
pgBufferUsage.shared_blks_hit++;
else if (isExtend)
@ -920,6 +932,16 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
mode == RBM_ZERO_ON_ERROR)
pgBufferUsage.shared_blks_read++;
// for precache: buf not be eliminated by clock algorithm
if (needPreCacheEscape && preCacheNodeOid == bufHdr->tag.rnode.relNode)
{
bufHdr->isPreCacheEscape=true;
}
// for unprecache: buf be eliminated by clock algorithm
if (needUnpreCacheEscape && preCacheNodeOid == bufHdr->tag.rnode.relNode)
{
bufHdr->isPreCacheEscape=false;
}
}
/* At this point we do NOT hold any locks. */
@ -1012,7 +1034,7 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
} while (!StartBufferIO(bufHdr, true));
}
}
/*
* if we have gotten to this point, we have allocated a buffer for the
* page but its contents are not yet valid. IO_IN_PROGRESS is set for it,
@ -1028,13 +1050,44 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
Assert(!(pg_atomic_read_u32(&bufHdr->state) & BM_VALID)); /* spinlock not needed */
bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
XLogRecPtr replayLsn = InvalidXLogRecPtr;
TimeLineID tli;
int lsnLen = 0;
bool outdata = true;
Bufrd tWalRecord;
tWalRecord.count = 0;
tWalRecord.buf = NULL;
LsnNode* head = NULL;
char* pageXlogPtr = NULL;
int nbytes = 0;
walRecord_t walRecord;
walRecord.cap = 0;
walRecord.buf = NULL;
walRecord.count = 0;
if (isExtend)
{
/* new buffers are zero-filled */
MemSet((char *) bufBlock, 0, BLCKSZ);
/* don't set checksum for all-zero page */
smgrextend(smgr, forkNum, blockNum, (char *) bufBlock, false);
/* don't set checksum for all-zero page */
/* for new page precache */
if (*preCacheNodesCountPtr > 0)
{
uint16 preCacheNodei = 0;
while (preCacheNodei < *preCacheNodesCountPtr)
{
if (preCacheNodesPtr[preCacheNodei] == bufHdr->tag.rnode.relNode)
{
bufHdr->isPreCacheEscape=true;
break;
}
preCacheNodei++;
}
}
/*
* NB: we're *not* doing a ScheduleBufferTagForWriteback here;
@ -1055,52 +1108,76 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
{
instr_time io_start,
io_time;
/*
* he3db: alloc local tem buffer for pageXlog
* first 8K is page data, after 8k is xlog data
* He3FS abandon
pageXlogBuf = (char *) palloc_extended(PAGEXLOG_BLCKSZ, MCXT_ALLOC_NO_OOM);
if (!pageXlogBuf)
{
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("invalid pageXlog local buffer alloc")));
}
*/
if (track_io_timing)
INSTR_TIME_SET_CURRENT(io_start);
/* he3db: read page and xlog Associated with it */
if (EnableHotStandby == true)
{
/* propeller and slave instance */
//nbytes = he3dbsmgrread(smgr, forkNum, blockNum, bufBlock, InvalidXLogRecPtr);
XLogRecPtr replayLsn = GetXLogReplayRecPtr(NULL);
if (IsBootstrapProcessingMode() || InitdbSingle) {
replayLsn = GetXLogWriteRecPtr();
}
nbytes = he3dbsmgrread(smgr, forkNum, blockNum, &pageXlogBuf,replayLsn);
memcpy((char *) bufBlock, pageXlogBuf, BLCKSZ);
/* propeller instance no page xlog replay */
if (push_standby)
{
free(pageXlogBuf);
pageXlogBuf = NULL;
}
}
else
{
/* primary instance */
XLogRecPtr replayLsn = GetXLogWriteRecPtr();
nbytes = he3dbsmgrread(smgr, forkNum, blockNum, &pageXlogBuf, replayLsn);
memcpy((char *) bufBlock, pageXlogBuf, BLCKSZ);
if (nbytes <= BLCKSZ)
{
free(pageXlogBuf);
pageXlogBuf = NULL;
if ((EnableHotStandby == true && *isPromoteIsTriggered == false) || InRecovery) {
if (IsBootstrapProcessingMode() == true || InitdbSingle == true) {
smgrread(smgr, forkNum, blockNum, (char *) bufBlock);
} else {
replayLsn = GetXLogReplayRecPtr(&tli);
if (exist == true) {
BufferTag pageTag;
pageTag.rnode = smgr->smgr_rnode.node;
pageTag.forkNum = forkNum;
pageTag.blockNum = blockNum;
// XLogRecPtr pageLsn = BufferGetLSN(bufHdr);
XLogRecPtr pageLsn = Max(GetXLogPushToDisk(), BufferGetLSN(bufHdr));
head = GetLogIndexByPage(&pageTag,pageLsn,replayLsn);
if ((EnableHotStandby == true && *isPromoteIsTriggered == false) && push_standby == false) {
if (head->next != NULL) {
tWalRecord = ReadWalsByPage(pageTag.rnode.dbNode,pageTag.rnode.relNode,forkNum,blockNum,tli,head);
}
} else {
LsnNode* next = head->next;
if (next != NULL) {
walRecord.cap = 8192;
walRecord.buf = malloc(walRecord.cap);
}
while(next!=NULL) {
int count = walRecordQuery(&walRecord.buf,&walRecord.count,&walRecord.cap,next->lsn);
if (count == -1) {
elog(FATAL,"======walRecordQuery query wal Faild %X/%X===2===",LSN_FORMAT_ARGS(next->lsn));
}
next = next->next;
}
}
} else {
nbytes = he3db_mdread(smgr, forkNum, blockNum, &pageXlogPtr,true, replayLsn);
if (nbytes < BLCKSZ) {
elog(FATAL,"smgrextend=>he3dbsmgrread rel %d flk %d blk %d nbytes %d",smgr->smgr_rnode.node.relNode,forkNum, blockNum,nbytes);
} else {
memcpy(bufBlock,pageXlogPtr,BLCKSZ);
if (push_standby == true || EnableHotStandby == false || *isPromoteIsTriggered) {
BufferTag pageTag;
pageTag.rnode = smgr->smgr_rnode.node;
pageTag.forkNum = forkNum;
pageTag.blockNum = blockNum;
// XLogRecPtr pageLsn = BufferGetLSN(bufHdr);
XLogRecPtr pageLsn = Max(GetXLogPushToDisk(), BufferGetLSN(bufHdr));
head = GetLogIndexByPage(&pageTag,pageLsn,replayLsn);
if (head->next!=NULL) {
LsnNode* next = head->next;
if (next != NULL) {
walRecord.cap = 8192;
walRecord.buf = malloc(walRecord.cap);
}
while(next!=NULL) {
int count = walRecordQuery(&walRecord.buf,&walRecord.count,&walRecord.cap,next->lsn);
if (count == -1) {
elog(FATAL,"======walRecordQuery query wal Faild %X/%X===3===",LSN_FORMAT_ARGS(next->lsn));
}
next = next->next;
}
}
}
}
}
bufHdr->pageIsVaild = true;
}
} else {
smgrread(smgr, forkNum, blockNum, (char *) bufBlock);
}
if (track_io_timing)
@ -1123,12 +1200,6 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
blockNum,
relpath(smgr->smgr_rnode, forkNum))));
MemSet((char *) bufBlock, 0, BLCKSZ);
/* He3DB: He3FS */
if(pageXlogBuf != NULL)
{
free(pageXlogBuf);
pageXlogBuf = NULL;
}
}
else
ereport(ERROR,
@ -1150,11 +1221,12 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* that we cannot use LockBuffer() or LockBufferForCleanup() here, because
* they assert that the buffer is already valid.)
*/
if ((mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK) &&
!isLocalBuf)
{
LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_EXCLUSIVE);
}
}
if (isLocalBuf)
{
@ -1166,24 +1238,40 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
}
else
{
//todo: read related wals in standby instance.
/*
* He3DB: page-replay.
*
* apply logs to this old page when read from disk.
*
*/
if (pageXlogBuf)
if (pageXlogPtr != NULL || tWalRecord.count != 0 || walRecord.count != 0)
{
/* UnpinBuffer for xlog replay */
//UnpinBuffer(bufHdr, true);
he3db_apply_page(bufHdr, pageXlogBuf + BLCKSZ, nbytes - BLCKSZ);
free(pageXlogBuf);
/* re pin */
//PinBuffer(bufHdr, strategy);
XLogRecPtr pageLsn = BufferGetLSN(bufHdr);
char *xlogStart = NULL;
if (pageXlogPtr != NULL) {
xlogStart = pageXlogPtr + BLCKSZ;
nbytes = nbytes - BLCKSZ;
} else if (tWalRecord.count != 0) {
xlogStart = tWalRecord.buf;
nbytes = tWalRecord.count;
}
if (walRecord.count != 0) {
xlogStart = walRecord.buf;
nbytes = walRecord.count;
}
he3db_apply_page(bufHdr, xlogStart, nbytes);
if (pageXlogPtr != NULL) {
free(pageXlogPtr);
pageXlogPtr = NULL;
} else if (tWalRecord.count != 0) {
free_dataRead(tWalRecord.buf,tWalRecord.count,tWalRecord.cap);
FreeLsnNode(head);
}
if (walRecord.count != 0) {
free(walRecord.buf);
FreeLsnNode(head);
}
}
/* He3DB end */
/* Set BM_VALID, terminate IO, and wake up any waiters */
@ -1336,7 +1424,7 @@ static BufferDesc *
BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
BlockNumber blockNum,
BufferAccessStrategy strategy,
bool *foundPtr)
bool *foundPtr,bool *exist)
{
BufferTag newTag; /* identity of requested block */
uint32 newHash; /* hash value for newTag */
@ -1391,6 +1479,11 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* If we get here, previous attempts to read the buffer must
* have failed ... but we shall bravely try again.
*/
if (buf->pageIsVaild == false) {
*exist = false;
} else {
*exist = true;
}
*foundPtr = false;
}
}
@ -1487,12 +1580,12 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
smgr->smgr_rnode.node.relNode);
/* he3db: FlushBuffer to He3DBFlushBuffer*/
if (push_standby == true) {
// if (push_standby == true) {
// master/slave/push standby need to flush dirty page to release space
FlushBuffer(buf, NULL);
} else {
He3DBFlushBuffer(buf, NULL);
}
// } else {
// He3DBFlushBuffer(buf, NULL);
// }
LWLockRelease(BufferDescriptorGetContentLock(buf));
@ -1610,6 +1703,11 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* If we get here, previous attempts to read the buffer
* must have failed ... but we shall bravely try again.
*/
if (buf->pageIsVaild == false) {
*exist = false;
} else {
*exist = true;
}
*foundPtr = false;
}
}
@ -1662,7 +1760,7 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
buf_state |= BM_TAG_VALID | BM_PERMANENT | BUF_USAGECOUNT_ONE;
else
buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
buf->pageIsVaild = false;
UnlockBufHdr(buf, buf_state);
if (oldPartitionLock != NULL)
@ -1680,8 +1778,14 @@ BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
* to read it before we did, so there's nothing left for BufferAlloc() to
* do.
*/
if (StartBufferIO(buf, true))
if (StartBufferIO(buf, true)) {
if (buf->pageIsVaild == false) {
*exist = false;
} else {
*exist = true;
}
*foundPtr = false;
}
else
*foundPtr = true;
@ -1897,8 +2001,9 @@ MarkBufferDirty(Buffer buffer)
bufHdr = GetBufferDescriptor(buffer - 1);
Assert(BufferIsPinned(buffer));
Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
LW_EXCLUSIVE));
//this assert will crash for mode == RBM_NORMAL_VALID
// Assert(LWLockHeldByMeInMode(BufferDescriptorGetContentLock(bufHdr),
// LW_EXCLUSIVE));
old_buf_state = pg_atomic_read_u32(&bufHdr->state);
for (;;)
@ -2077,40 +2182,6 @@ PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
ref->refcount++;
Assert(ref->refcount > 0);
// for precache: buf not be eliminated by clock algorithm
if (needPreCacheEscape)
{
uint32 buf_state;
uint32 old_buf_state;
old_buf_state = pg_atomic_read_u32(&buf->state);
for (;;)
{
if (old_buf_state & BM_LOCKED)
old_buf_state = WaitBufHdrUnlocked(buf);
buf_state = old_buf_state;
/* increase refcount */
buf_state += BUF_REFCOUNT_ONE;
if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
buf_state))
{
result = (buf_state & BM_VALID) != 0;
/*
* Assume that we acquired a buffer pin for the purposes of
* Valgrind buffer client checks (even in !result case) to
* keep things simple. Buffers that are unsafe to access are
* not generally guaranteed to be marked undefined or
* non-accessible in any case.
*/
VALGRIND_MAKE_MEM_DEFINED(BufHdrGetBlock(buf), BLCKSZ);
break;
}
}
}
ResourceOwnerRememberBuffer(CurrentResourceOwner, b);
return result;
@ -2164,11 +2235,6 @@ PinBuffer_Locked(BufferDesc *buf)
buf_state = pg_atomic_read_u32(&buf->state);
Assert(buf_state & BM_LOCKED);
buf_state += BUF_REFCOUNT_ONE;
// for precache: buf not be eliminated by clock algorithm
if (needPreCacheEscape)
{
buf_state += BUF_REFCOUNT_ONE;
}
UnlockBufHdr(buf, buf_state);
b = BufferDescriptorGetBuffer(buf);
@ -2912,11 +2978,11 @@ SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
PinBuffer_Locked(bufHdr);
LWLockAcquire(BufferDescriptorGetContentLock(bufHdr), LW_SHARED);
/* he3db: FlushBuffer to He3DBFlushBuffer*/
if (push_standby == true) {
//if (push_standby == true) {
FlushBuffer(bufHdr, NULL);
} else {
/*} else {
He3DBFlushBuffer(bufHdr, NULL);
}
}*/
LWLockRelease(BufferDescriptorGetContentLock(bufHdr));
@ -3257,6 +3323,14 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
* buffer, other processes might be updating hint bits in it, so we must
* copy the page to private storage if we do checksumming.
*/
// PageKey pageKey;
// pageKey.relfileNode.dbNode = buf->tag.rnode.dbNode;;
// pageKey.relfileNode.relNode = buf->tag.rnode.relNode;
// pageKey.relfileNode.spcNode = buf->tag.rnode.spcNode;
// pageKey.blkNo = buf->tag.blockNum;
// pageKey.forkNo = buf->tag.forkNum;
// pageKey.pageLsn = recptr;
bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
if (track_io_timing)
@ -3265,11 +3339,15 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
/*
* bufToWrite is either the shared buffer or a copy, as appropriate.
*/
smgrwrite(reln,
he3dbsmgrwrite(reln,
buf->tag.forkNum,
buf->tag.blockNum,
bufToWrite,
false);
false, recptr);
//将page放到本地盘
// EvictOnePageOutOfMemory(pageKey, bufToWrite);
if (track_io_timing)
{
@ -4047,12 +4125,13 @@ FlushRelationBuffers(Relation rel)
error_context_stack = &errcallback;
PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
XLogRecPtr lsn = BufferGetLSN(bufHdr);
smgrwrite(rel->rd_smgr,
he3dbsmgrwrite(rel->rd_smgr,
bufHdr->tag.forkNum,
bufHdr->tag.blockNum,
localpage,
false);
false, lsn);
buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
@ -5008,7 +5087,18 @@ TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
buf_state &= ~(BM_DIRTY | BM_CHECKPOINT_NEEDED);
buf_state |= set_flag_bits;
if (!(IsBootstrapProcessingMode() == true || InitdbSingle == true) && (InRecovery || (EnableHotStandby && *isPromoteIsTriggered == false)) && set_flag_bits == BM_VALID)
{
XLogRecPtr pageLsn = BufferGetLSN(buf);
XLogRecPtr replayLsn = GetXLogReplayRecPtr(NULL);
bool hasdata = CheckBufTagExistByLsnRange(&buf->tag, pageLsn, replayLsn);
if (hasdata)
buf_state &= ~BM_VALID;
else
buf_state |= set_flag_bits;
} else {
buf_state |= set_flag_bits;
}
UnlockBufHdr(buf, buf_state);
if (!bulk_io_is_in_progress)
{
@ -5435,8 +5525,11 @@ TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
* He3DB: page-replay.
*/
static void
he3db_apply_page(BufferDesc *bufHdr, char *pageXlogBuf, int nbytes)
he3db_apply_page(BufferDesc *bufHdr, char *pageXlogBuf, int nbyte)
{
if (nbyte == 0) {
return;
}
XLogReaderState *state;
Buffer buffer;
@ -5456,7 +5549,7 @@ he3db_apply_page(BufferDesc *bufHdr, char *pageXlogBuf, int nbytes)
state->tag = &tag;
state->buffer = buffer;
memcpy(state->tag,&buf_desc->tag,sizeof(buf_desc->tag));
while (nbytes > 0)
while (nbyte > 0)
{
int recordLen;
recordLen = he3db_apply_one_record(state, buffer, pageXlogBuf);
@ -5465,7 +5558,7 @@ he3db_apply_page(BufferDesc *bufHdr, char *pageXlogBuf, int nbytes)
break;
}
pageXlogBuf += recordLen;
nbytes -= recordLen;
nbyte -= recordLen;
}
/* set page lsn to read point lsn */

View File

@ -324,7 +324,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state)
*/
local_buf_state = LockBufHdr(buf);
if (BUF_STATE_GET_REFCOUNT(local_buf_state) == 0)
if (buf->isPreCacheEscape == false && BUF_STATE_GET_REFCOUNT(local_buf_state) == 0)
{
if (BUF_STATE_GET_USAGECOUNT(local_buf_state) != 0)
{

View File

@ -223,13 +223,15 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
oreln = smgropen(bufHdr->tag.rnode, MyBackendId);
PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
XLogRecPtr lsn = PageGetLSN(localpage);
/* And write... */
smgrwrite(oreln,
he3dbsmgrwrite(oreln,
bufHdr->tag.forkNum,
bufHdr->tag.blockNum,
localpage,
false);
false,
lsn);
/* Mark not-dirty now in case we error out below */
buf_state &= ~BM_DIRTY;

View File

@ -11,7 +11,7 @@
subdir = src/backend/storage/file
top_builddir = ../../../..
include $(top_builddir)/src/Makefile.global
override CPPFLAGS := -lfs $(CPPFLAGS)
override CPPFLAGS := -lrust_log $(CPPFLAGS)
OBJS = \
buffile.o \

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -30,6 +30,7 @@
#include "storage/fsm_internals.h"
#include "storage/lmgr.h"
#include "storage/smgr.h"
#include "postmaster/secondbuffer.h"
/*
@ -641,6 +642,18 @@ fsm_extend(Relation rel, BlockNumber fsm_nblocks)
smgrextend(rel->rd_smgr, FSM_FORKNUM, fsm_nblocks_now,
pg.data, false);
if (!(InitdbSingle || IsBootstrapProcessingMode() == true) && !push_standby && !he3mirror)
{
PageKey pageKey;
pageKey.relfileNode.dbNode = rel->rd_smgr->smgr_rnode.node.dbNode;
pageKey.relfileNode.relNode = rel->rd_smgr->smgr_rnode.node.relNode;
pageKey.blkNo = fsm_nblocks_now;
pageKey.forkNo = FSM_FORKNUM;
pageKey.pageLsn = 0;
ReceivePageFromDataBuffer(&pageKey, (uint8_t *) pg.data);
}
fsm_nblocks_now++;
}

View File

@ -29,6 +29,7 @@
#include "postmaster/bgworker_internals.h"
#include "postmaster/bgwriter.h"
#include "postmaster/postmaster.h"
#include "postmaster/secondbuffer.h"
#include "replication/logicallauncher.h"
#include "replication/origin.h"
#include "replication/slot.h"
@ -36,6 +37,7 @@
#include "replication/walsender.h"
#include "storage/bufmgr.h"
#include "storage/dsm.h"
#include "storage/he3db_logindex.h"
#include "storage/ipc.h"
#include "storage/pg_shmem.h"
#include "storage/pmsignal.h"
@ -46,7 +48,9 @@
#include "storage/sinvaladt.h"
#include "storage/spin.h"
#include "utils/snapmgr.h"
#include "access/pagehashqueue.h"
#include "access/ringbuffer.h"
#include "storage/filecache.h"
/* GUCs */
int shared_memory_type = DEFAULT_SHARED_MEMORY_TYPE;
@ -150,6 +154,11 @@ CreateSharedMemoryAndSemaphores(void)
size = add_size(size, BTreeShmemSize());
size = add_size(size, SyncScanShmemSize());
size = add_size(size, AsyncShmemSize());
//size = add_size(size, RelCutShmemSize());
size = add_size(size, PageHashQueueShmemSize());
size = add_size(size, PageHashMapSize());
//size = add_size(size, LogindexHashAllShmemSize());
size = add_size(size,WalReadBufferShmemSize());
#ifdef EXEC_BACKEND
size = add_size(size, ShmemBackendArraySize());
#endif
@ -158,6 +167,15 @@ CreateSharedMemoryAndSemaphores(void)
addin_request_allowed = false;
size = add_size(size, total_addin_request);
/* secondbufferhash code. */
//TODO the size should be calculated base on data buffer size.
size = add_size(size, SecondBufferShmemSize());
size = add_size(size, SecondBufferLWLockShmemSize());
size = add_size(size, He3dbLogIndexShmemSize());
/* cache file size */
size = add_size(size, FileCacheSize());
/* might as well round it off to a multiple of a typical page size */
size = add_size(size, 8192 - (size % 8192));
@ -206,6 +224,8 @@ CreateSharedMemoryAndSemaphores(void)
*/
CreateLWLocks();
CreateSecondBufferLWLocks();
/*
* Set up shmem.c index hashtable
*/
@ -227,7 +247,25 @@ CreateSharedMemoryAndSemaphores(void)
* Set up lock manager
*/
InitLocks();
InitCacheRel();
/*
* set up wal log hash
*/
He3dbLogIndexTblListInit();
//InitCleanupInfo();
/*
* set up second buffer hash
*/
InitSecondBufferHash();
InitSecondBufferMeta();
InitDPageKeyArray();
/*
* set up fs meta
*/
// InitFSMetaHash();
/*
* Set up predicate lock manager
*/
@ -255,6 +293,10 @@ CreateSharedMemoryAndSemaphores(void)
ProcSignalShmemInit();
CheckpointerShmemInit();
AutoVacuumShmemInit();
PageHashQueueShmemInit();
InitBufferPoolHashMap();
//InitLogindexHashBrucket();
InitRingBufferSpace();
ReplicationSlotsShmemInit();
ReplicationOriginShmemInit();
WalSndShmemInit();

View File

@ -22,7 +22,8 @@ OBJS = \
predicate.o \
proc.o \
s_lock.o \
spin.o
spin.o \
he3db_logindex.o
include $(top_srcdir)/src/backend/common.mk

View File

@ -0,0 +1,861 @@
#include "postgres.h"
#include "storage/he3db_logindex.h"
#include "storage/shmem.h"
#include "storage/spin.h"
static LogIndexMemList *log_index_mem_list;
static uint64 logindex_mem_tbl_size;
static Size
LogIndexMemListSize(uint64 he3db_logindex_mem_size)
{
Size size;
logindex_mem_tbl_size = (he3db_logindex_mem_size * 1024L * 1024L) / sizeof(LogIndexMemTBL);
size = offsetof(LogIndexMemList, mem_table); // 去除柔性数组之外的空间大小
size = add_size(size, mul_size(sizeof(LogIndexMemTBL), logindex_mem_tbl_size));
size = MAXALIGN(size);//为了使sizeof(struct)向上对齐成为8的倍数的大小
/* The number of logindex memory table is at least 3 */
if (logindex_mem_tbl_size < 3)
elog(FATAL, "The number=%ld of logindex memory table is less than 3", logindex_mem_tbl_size);
else
ereport(LOG, (errmsg("The total log index memory table size is %ld, number logindex mem-table size is %ld", size, logindex_mem_tbl_size)));
return size;
}
static void SetNewPageItem(LogIndexMemTBL *mem_tbl, const BufferTag *page)
{
// set page item
LogIndexMemItemHead *page_head = &(mem_tbl->page_head[mem_tbl->meta.page_free_head-1]);
memcpy(&(page_head->tag), page, sizeof(BufferTag));
page_head->next_item = LOG_INDEX_TBL_INVALID_SEG;
page_head->next_seg = mem_tbl->meta.lsn_free_head;
page_head->tail_seg = mem_tbl->meta.lsn_free_head;
}
// When active table is full, get next free mem table and will change to active mem.
static LogIndexMemTBL *GetNextFreeMemTbl(void)
{
// TODO change to Lightweight Lock
uint64 active_tbl_index = (log_index_mem_list->active_table_index + 1)%(log_index_mem_list->table_cap);
// if all mem table is full, waiting for recycle
if(active_tbl_index == log_index_mem_list->table_start_index)
{
elog(LOG, "Mem table is full, waiting for cleanup. Total size: %ld", logindex_mem_tbl_size);
}
while(active_tbl_index == log_index_mem_list->table_start_index)
{
pg_usleep(10); /* 10 us */
}
elog(DEBUG5, "Find next free mem table and set active_table_index + 1: %ld", active_tbl_index);
LWLockAcquire(LogIndexMemListLock,LW_EXCLUSIVE);
// Circular List
log_index_mem_list->active_table_index = active_tbl_index;
LWLockRelease(LogIndexMemListLock);
// if it finds free mem table will return directly.
return &(log_index_mem_list->mem_table[log_index_mem_list->active_table_index]);
}
static void SetLsnSeg(LogIndexMemItemSeg *lsn_seg, XLogRecPtr lsn){
LOG_INDEX_INSERT_LSN_INFO(lsn_seg, lsn_seg->number, lsn);
lsn_seg->number++;
}
static void SetNewLsnSeg(LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
// set lsn seg
// first seg index start with 0, seg_item[0]
LogIndexMemItemSeg *lsn_seg = &(mem_tbl->seg_item[mem_tbl->meta.lsn_free_head-1]);
lsn_seg->prev_seg = LOG_INDEX_TBL_INVALID_SEG;
lsn_seg->next_seg = LOG_INDEX_TBL_INVALID_SEG;
SetLsnSeg(lsn_seg, lsn);
}
static void SetNextLsnSeg(LogIndexMemItemHead *page_head, LogIndexMemItemSeg *lsn_seg_old, LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
// set lsn next seg
LogIndexMemItemSeg *lsn_seg_next = &(mem_tbl->seg_item[mem_tbl->meta.lsn_free_head-1]);
lsn_seg_old->next_seg = mem_tbl->meta.lsn_free_head;
lsn_seg_next->prev_seg = page_head->tail_seg;
lsn_seg_next->next_seg = LOG_INDEX_TBL_INVALID_SEG;
page_head->tail_seg = mem_tbl->meta.lsn_free_head;
SetLsnSeg(lsn_seg_next, lsn);
}
static void UpdateMemTableMetaWithNewPage(LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
// set metadata for active mem table
SpinLockAcquire(&(mem_tbl->meta.meta_lock));
// set prefix_lsn, min_lsn and max_lsn
LOG_INDEX_MEM_TBL_SET_PREFIX_LSN(mem_tbl, lsn);
mem_tbl->meta.max_lsn = Max(lsn, mem_tbl->meta.max_lsn);
mem_tbl->meta.min_lsn = Min(lsn, mem_tbl->meta.min_lsn);
// page,lsn free index ++
mem_tbl->meta.page_free_head++;
mem_tbl->meta.lsn_free_head++;
SpinLockRelease(&(mem_tbl->meta.meta_lock));
}
static void UpdateMemTableMetaWithNextPage(LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
// set metadata for active mem table
SpinLockAcquire(&(mem_tbl->meta.meta_lock));
// set prefix_lsn, min_lsn and max_lsn
mem_tbl->meta.max_lsn = Max(lsn, mem_tbl->meta.max_lsn);
mem_tbl->meta.min_lsn = Min(lsn, mem_tbl->meta.min_lsn);
// page,lsn free index ++
mem_tbl->meta.page_free_head++;
mem_tbl->meta.lsn_free_head++;
SpinLockRelease(&(mem_tbl->meta.meta_lock));
}
static void UpdateMemTableMetaWithNextSeg(LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
// set metadata for active mem table
SpinLockAcquire(&(mem_tbl->meta.meta_lock));
mem_tbl->meta.max_lsn = Max(lsn, mem_tbl->meta.max_lsn);
mem_tbl->meta.min_lsn = Min(lsn, mem_tbl->meta.min_lsn);
mem_tbl->meta.lsn_free_head++;
SpinLockRelease(&(mem_tbl->meta.meta_lock));
}
static void UpdateMemTableMetaWithCurrentSeg(LogIndexMemTBL *mem_tbl, XLogRecPtr lsn)
{
// set metadata for active mem table
SpinLockAcquire(&(mem_tbl->meta.meta_lock));
mem_tbl->meta.max_lsn = Max(lsn, mem_tbl->meta.max_lsn);
mem_tbl->meta.min_lsn = Min(lsn, mem_tbl->meta.min_lsn);
SpinLockRelease(&(mem_tbl->meta.meta_lock));
}
static void SetActiveTblWithFirstPage(LogIndexMemTBL *mem_tbl, const BufferTag *page, XLogRecPtr lsn)
{
uint32 hash_key;
// set mem table state to active
pg_atomic_write_u32(&(mem_tbl->meta.state), LOG_INDEX_MEM_TBL_STATE_ACTIVE);
// index start with 1, 0 means INVALID. hash[] all values will be 0 after init, so set to 1 when first use.
mem_tbl->meta.id = log_index_mem_list->active_table_index;
mem_tbl->meta.lsn_free_head = 1;
mem_tbl->meta.page_free_head = 1;
// calculate hashcode by buffer tag
hash_key = LOG_INDEX_MEM_TBL_HASH_PAGE(page);
mem_tbl->hash[hash_key] = mem_tbl->meta.page_free_head;
// set page item
SetNewPageItem(mem_tbl, page);
// set lsn seg
SetNewLsnSeg(mem_tbl, lsn);
// set metadata for active mem table
UpdateMemTableMetaWithNewPage(mem_tbl, lsn);
}
static void InsertLsnWhenOldTblIsFull(LogIndexMemTBL *mem_tbl_old, const BufferTag *page, XLogRecPtr lsn)
{
LogIndexMemTBL *mem_tbl_new;
// set mem table state to inactive
pg_atomic_write_u32(&(mem_tbl_old->meta.state), LOG_INDEX_MEM_TBL_STATE_INACTIVE);
mem_tbl_new = GetNextFreeMemTbl();
SetActiveTblWithFirstPage(mem_tbl_new, page, lsn);
}
static void SetNextPageItem(LogIndexMemTBL *mem_tbl, const BufferTag *page, XLogRecPtr lsn)
{
// there's no free page_head or lsn_seg, means current active is full, will apply for new mem table as active table
if (mem_tbl->meta.page_free_head > LOG_INDEX_MEM_TBL_PAGE_NUM || mem_tbl->meta.lsn_free_head > LOG_INDEX_MEM_TBL_SEG_NUM)
{
// no free page head in active mem table, will apply for new mem table
InsertLsnWhenOldTblIsFull(mem_tbl, page, lsn);
}
else
{
// set new page and lsn seg when active mem table have free resource
SetNewPageItem(mem_tbl, page);
SetNewLsnSeg(mem_tbl, lsn);
UpdateMemTableMetaWithNewPage(mem_tbl, lsn);
}
}
static void RestMemTable(LogIndexMemTBL *mem_tbl)
{
// reset table's metadata
mem_tbl->meta.id = LOG_INDEX_TABLE_INVALID_ID;
pg_atomic_write_u32(&(mem_tbl->meta.state), LOG_INDEX_MEM_TBL_STATE_FREE);
mem_tbl->meta.page_free_head = LOG_INDEX_TBL_INVALID_SEG;
mem_tbl->meta.lsn_free_head = LOG_INDEX_TBL_INVALID_SEG;
mem_tbl->meta.min_lsn = UINT64_MAX;
mem_tbl->meta.max_lsn = InvalidXLogRecPtr;
mem_tbl->meta.prefix_lsn = 0;
// reset hash[] and page head[]
for(int i = 0; i < LOG_INDEX_MEM_TBL_PAGE_NUM; i++)
{
mem_tbl->hash[i] = LOG_INDEX_TBL_INVALID_SEG;
CLEAR_BUFFERTAG(mem_tbl->page_head[i].tag);
mem_tbl->page_head[i].next_item = LOG_INDEX_TBL_INVALID_SEG;
mem_tbl->page_head[i].next_seg = LOG_INDEX_TBL_INVALID_SEG;
mem_tbl->page_head[i].tail_seg = LOG_INDEX_TBL_INVALID_SEG;
// reset seg_item[]
mem_tbl->seg_item[i].prev_seg = LOG_INDEX_TBL_INVALID_SEG;
mem_tbl->seg_item[i].next_seg = LOG_INDEX_TBL_INVALID_SEG;
mem_tbl->seg_item[i].number = 0;
}
// reset seg_item[]
for(int i = LOG_INDEX_MEM_TBL_PAGE_NUM; i < LOG_INDEX_MEM_TBL_SEG_NUM; i++){
mem_tbl->seg_item[i].prev_seg = LOG_INDEX_TBL_INVALID_SEG;
mem_tbl->seg_item[i].next_seg = LOG_INDEX_TBL_INVALID_SEG;
mem_tbl->seg_item[i].number = 0;
}
}
static LsnNode *InitLsnNode()
{
LsnNode *head;
head = (LsnNode *)malloc(sizeof(LsnNode));
head->next = NULL;
return head;
}
// insert nodelist from head, eg: before: head-->node1-->NULL, after: head-->newNode-->node1-->NULL
static void InsertLsnNodeByHead(LsnNode *head, XLogRecPtr lsn)
{
LsnNode *new_node;
new_node = (LsnNode *)malloc(sizeof(LsnNode));
new_node->lsn = lsn;
new_node->next = head->next;
head->next = new_node;
}
// eg: before: head-->node1-->NULL, after: head-->node1-->newNode-->NULL
static LsnNode *InsertLsnNodeByTail(LsnNode *head, XLogRecPtr lsn)
{
LsnNode *new_node;
new_node = (LsnNode *)malloc(sizeof(LsnNode));
head->next = new_node;
new_node->lsn = lsn;
new_node->next = NULL;
return new_node;
}
// print nodelist
static void PrintLsnNode(LsnNode *head)
{
LsnNode *p;
p = head->next;
while (p) {
printf(" %d\t ", p->lsn);
p = p->next;
}
}
static void ReverseLsnNode(LsnNode *head)
{
if (head == NULL || head->next == NULL) {
return;
}
LsnNode *p = NULL;
LsnNode *q = head->next;
LsnNode *next ;
while (q != NULL) {
next = q->next;
q->next = p;
p = q;
q = next;
}
head->next=p;
}
static uint16 FindFirstLsnSegInMemTblByPageTag(LogIndexMemTBL *mem_tbl, const BufferTag *page, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
{
LogIndexMemItemHead *page_head;
uint32 hash_key;
// end_lsn <= min_lsn or start_lsn > max_lsn means the request lsn region not in this mem table
if(mem_tbl->meta.min_lsn >= end_lsn || mem_tbl->meta.max_lsn < start_lsn)
{
return LOG_INDEX_TBL_INVALID_SEG;
}else{
hash_key = LOG_INDEX_MEM_TBL_HASH_PAGE(page);
if(mem_tbl->hash[hash_key] != LOG_INDEX_TBL_INVALID_SEG)
{
page_head = &(mem_tbl->page_head[mem_tbl->hash[hash_key]-1]);
while(!BUFFERTAGS_EQUAL(page_head->tag, *page)){
if(page_head->next_item == LOG_INDEX_TBL_INVALID_SEG)
{
return LOG_INDEX_TBL_INVALID_SEG;
}
page_head = &(mem_tbl->page_head[page_head->next_item-1]);
}
// find request page, return lsn seg
return (page_head->next_seg);
}else
{
return LOG_INDEX_TBL_INVALID_SEG;
}
}
}
static TagNode *InitTagNode()
{
TagNode *head;
head = (TagNode *)malloc(sizeof(TagNode));
head->next = NULL;
return head;
}
// insert nodelist from head, eg: before: head-->node1-->NULL, after: head-->newNode-->node1-->NULL
static void InsertTagNodeByHead(TagNode *head, BufferTag tag)
{
TagNode *new_node;
new_node = (TagNode *)malloc(sizeof(TagNode));
new_node->tag.tag = tag;
new_node->next = head->next;
head->next = new_node;
}
void He3dbLogIndexTblListInit(void)
{
bool found_logindex;
log_index_mem_list = (LogIndexMemList *)
ShmemInitStruct("log index", LogIndexMemListSize(he3db_logindex_mem_size), &found_logindex);
Assert(log_index_mem_list != NULL);
log_index_mem_list->table_start_index = 0;
log_index_mem_list->active_table_index = 0;
log_index_mem_list->table_cap = logindex_mem_tbl_size;
//SpinLockInit(&(log_index_mem_list->lock));
for (uint64 i = 0; i < log_index_mem_list->table_cap; i++) {
// set mem table init values
SpinLockInit(&(log_index_mem_list->mem_table[i].meta.meta_lock));
log_index_mem_list->mem_table[i].meta.id = i + 1;
log_index_mem_list->mem_table[i].meta.min_lsn = UINT64_MAX;
log_index_mem_list->mem_table[i].meta.max_lsn = InvalidXLogRecPtr;
SpinLockInit(&(log_index_mem_list->mem_table[i].meta.meta_lock));
pg_atomic_write_u32(&(log_index_mem_list->mem_table[i].meta.state), LOG_INDEX_MEM_TBL_STATE_FREE);
}
//SpinLockInit(&(log_index_mem_list->lock));
}
uint64 GetMemTblSize(void)
{
return log_index_mem_list->table_cap;
}
void InsertLogIndexByPage(const BufferTag *page, XLogRecPtr lsn)
{
LogIndexMemItemSeg *lsn_seg;
uint32 hash_key;
LogIndexMemTBL *mem_tbl;
LogIndexMemItemHead *page_head;
// calculate hashcode by buffer tag
hash_key = LOG_INDEX_MEM_TBL_HASH_PAGE(page);
// get active mem table
mem_tbl = &(log_index_mem_list->mem_table[log_index_mem_list->active_table_index]);
// first time to use active mem table
if(pg_atomic_read_u32(&mem_tbl->meta.state) == LOG_INDEX_MEM_TBL_STATE_FREE)
{
SetActiveTblWithFirstPage(mem_tbl, page, lsn);
}
else
{
// if have same lsn prefix with active table
if(LOG_INDEX_SAME_TABLE_LSN_PREFIX(mem_tbl, lsn))
{
// 0 means INVALID, also means page don't exist in active mem table
if(mem_tbl->hash[hash_key] == 0)
{
// set hash value to next free head
if (!(mem_tbl->meta.page_free_head > LOG_INDEX_MEM_TBL_PAGE_NUM || mem_tbl->meta.lsn_free_head > LOG_INDEX_MEM_TBL_SEG_NUM))
mem_tbl->hash[hash_key] = mem_tbl->meta.page_free_head;
SetNextPageItem(mem_tbl, page, lsn);
}
else
{
// page already exist or hash conflict
// get exist page item
page_head = &(mem_tbl->page_head[mem_tbl->hash[hash_key]-1]);
/* if item page tag equal to current tag, true insert lsn to lsn_seg,
* false loop for next_item until equal or not found one. Then apply new page_item and lsn_seg.
*/
while(!BUFFERTAGS_EQUAL(page_head->tag, *page)){
if(page_head->next_item == LOG_INDEX_TBL_INVALID_SEG)
{
// apply new page item
// there's no free page_head or lsn_seg, means current active is full, will apply for new mem table as active table
if (mem_tbl->meta.page_free_head > LOG_INDEX_MEM_TBL_PAGE_NUM || mem_tbl->meta.lsn_free_head > LOG_INDEX_MEM_TBL_SEG_NUM)
{
// no free page head in active mem table, will apply for new mem table
InsertLsnWhenOldTblIsFull(mem_tbl, page, lsn);
}
else
{
// set new page and lsn seg when active mem table have free resource
// set old page item's next_item to new one.
page_head->next_item = mem_tbl->meta.page_free_head;
// set page item
SetNewPageItem(mem_tbl, page);
SetNewLsnSeg(mem_tbl, lsn);
UpdateMemTableMetaWithNextPage(mem_tbl, lsn);
}
return;
}
page_head = &(mem_tbl->page_head[page_head->next_item-1]);
}
// find same tag's page_head
lsn_seg = &(mem_tbl->seg_item[page_head->tail_seg-1]);
// if current seg full?
if(lsn_seg->number < LOG_INDEX_MEM_ITEM_SEG_LSN_NUM)
{
// insert lsn to seg
SetLsnSeg(lsn_seg, lsn);
UpdateMemTableMetaWithCurrentSeg(mem_tbl, lsn);
}
else
{
if(mem_tbl->meta.lsn_free_head > LOG_INDEX_MEM_TBL_SEG_NUM)
{
// no free page head in active mem table, will apply for new mem table
InsertLsnWhenOldTblIsFull(mem_tbl, page, lsn);
}
else
{
// apply new seg and insert lsn
SetNextLsnSeg(page_head, lsn_seg, mem_tbl, lsn);
UpdateMemTableMetaWithNextSeg(mem_tbl, lsn);
}
}
}
}
else
{
// prefix of lsn is different, so cannot use current active table, will apply new mem table
InsertLsnWhenOldTblIsFull(mem_tbl, page, lsn);
}
}
}
LsnNode *GetLogIndexByPage(const BufferTag *page, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
{
LsnNode *head_node;
LsnNode *tail;
uint64 tbl_index;
// Prevent metadata changes during discovery.
// TODO change to Lightweight Lock
head_node = InitLsnNode();
tail = head_node;
LWLockAcquire(LogIndexMemListLock,LW_SHARED);
tbl_index = log_index_mem_list->table_start_index;
while(tbl_index != log_index_mem_list->active_table_index)
{
LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
tbl_index = (tbl_index + 1)%(log_index_mem_list->table_cap);
// current mem table no suitability lsn_list
if(mem_tbl->meta.max_lsn < start_lsn)
{
continue;
}else if(mem_tbl->meta.min_lsn >= end_lsn)
{
// there is no suitability lsn_list after this mem table
break;
} else
{
// get index of current table's seg
uint16 seg_index = FindFirstLsnSegInMemTblByPageTag(mem_tbl, page, start_lsn, end_lsn);
while (seg_index != LOG_INDEX_TBL_INVALID_SEG)
{
LogIndexMemItemSeg *item_seg = &(mem_tbl->seg_item[seg_index - 1]);
// loop for lsn list
for(int i=0; i < item_seg->number; i++){
XLogRecPtr lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, item_seg->suffix_lsn[i]);
if(lsn >= start_lsn)
{
if(lsn < end_lsn)
{
tail = InsertLsnNodeByTail(tail, lsn);
}else{
LWLockRelease(LogIndexMemListLock);
return head_node;
}
}else
{
continue;
}
}
seg_index = item_seg->next_seg;
}
}
}
// loop for active table
if(tbl_index == log_index_mem_list->active_table_index)
{
LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[log_index_mem_list->active_table_index]);
// get index of current table's seg
uint16 seg_index = FindFirstLsnSegInMemTblByPageTag(mem_tbl, page, start_lsn, end_lsn);
while (seg_index != LOG_INDEX_TBL_INVALID_SEG)
{
LogIndexMemItemSeg *item_seg = &(mem_tbl->seg_item[seg_index - 1]);
// loop for lsn list
for(int i=0; i < item_seg->number; i++){
XLogRecPtr lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, item_seg->suffix_lsn[i]);
if(lsn >= start_lsn)
{
if(lsn < end_lsn)
{
tail = InsertLsnNodeByTail(tail, lsn);
}else{
LWLockRelease(LogIndexMemListLock);
return head_node;
}
}else
{
continue;
}
}
seg_index = item_seg->next_seg;
}
LWLockRelease(LogIndexMemListLock);
return head_node;
}
LWLockRelease(LogIndexMemListLock);
return head_node;
}
/* cleanup useless mem table which max_lsn less than consist_lsn,
* and reset mem table to reuse.
*/
void CleanLogIndexByPage(XLogRecPtr consist_lsn)
{
// TODO change to Lightweight Lock
LWLockAcquire(LogIndexMemListLock,LW_EXCLUSIVE);
// loop mem table from table_start_index
while(log_index_mem_list->table_start_index != log_index_mem_list->active_table_index)
{
LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[log_index_mem_list->table_start_index]);
// max_lsn large than consistLsn? true: cannot cleanup and reuse just break; false: cleanup
if (mem_tbl->meta.max_lsn >= consist_lsn || pg_atomic_read_u32(&mem_tbl->meta.state) != LOG_INDEX_MEM_TBL_STATE_INACTIVE)
{
break;
}
elog(DEBUG5, "Reset Mem table id=%ld by consist_lsn=%ld ", mem_tbl->meta.id, consist_lsn);
RestMemTable(mem_tbl);
log_index_mem_list->table_start_index = (log_index_mem_list->table_start_index + 1)%(log_index_mem_list->table_cap);
}
LWLockRelease(LogIndexMemListLock);
}
Size He3dbLogIndexShmemSize(void)
{
Size size = 0;
if (he3db_logindex_mem_size <= 0)
return size;
size = LogIndexMemListSize(he3db_logindex_mem_size);
size = CACHELINEALIGN(size);
elog(DEBUG5, "Mem table size=%ld in share memory", size);
return size;
}
void FreeLsnNode(LsnNode *head)
{
LsnNode* ln;
while (head != NULL)
{
ln = head;
head = head->next;
free(ln);
ln = NULL;
}
}
TagNode *GetBufTagByLsnRange(XLogRecPtr start_lsn, XLogRecPtr end_lsn)
{
TagNode *head_node;
uint64 tbl_index;
LogIndexMemItemHead *item_page;
LogIndexMemItemSeg *first_seg;
LogIndexMemItemSeg *last_seg;
XLogRecPtr page_min_lsn;
XLogRecPtr page_max_lsn;
// Prevent metadata changes during discovery.
// change to Lightweight Lock
head_node = InitTagNode();
if (end_lsn < start_lsn)
{
return head_node;
}
LWLockAcquire(LogIndexMemListLock,LW_SHARED);
tbl_index = log_index_mem_list->table_start_index;
while(tbl_index != log_index_mem_list->active_table_index)
{
LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
tbl_index = (tbl_index + 1)%(log_index_mem_list->table_cap);
// current mem table no suitability lsn_list
if(mem_tbl->meta.max_lsn < start_lsn)
{
continue;
}else if(mem_tbl->meta.min_lsn > end_lsn)
{
// there is no suitability lsn_list after this mem table
LWLockRelease(LogIndexMemListLock);
return head_node;
}
else
{
end_lsn = Min(end_lsn, mem_tbl->meta.max_lsn);
head_node->tag.lsn = end_lsn;
// loop for page list
for(int i = 0; i < (mem_tbl->meta.page_free_head - 1); i++)
{
item_page = &(mem_tbl->page_head[i]);
if(item_page->next_seg == LOG_INDEX_TBL_INVALID_SEG || item_page->tail_seg == LOG_INDEX_TBL_INVALID_SEG)
{
continue;
}
else
{
first_seg = &(mem_tbl->seg_item[item_page->next_seg - 1]);
last_seg = &(mem_tbl->seg_item[item_page->tail_seg - 1]);
page_min_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, first_seg->suffix_lsn[0]);
uint8 id = Min(LOG_INDEX_MEM_ITEM_SEG_LSN_NUM - 1, last_seg->number - 1);
page_max_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, last_seg->suffix_lsn[id]);
if(page_min_lsn > end_lsn || page_max_lsn < start_lsn)
{
continue;
}
else
{
InsertTagNodeByHead(head_node, item_page->tag);
}
}
}
LWLockRelease(LogIndexMemListLock);
return head_node;
}
}
if (tbl_index == log_index_mem_list->active_table_index){
LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
// current mem table no suitability lsn_list
if(!(mem_tbl->meta.max_lsn < start_lsn || mem_tbl->meta.min_lsn > end_lsn))
{
end_lsn = Min(end_lsn, mem_tbl->meta.max_lsn);
head_node->tag.lsn = end_lsn;
// loop for page list
for(int i = 0; i < (mem_tbl->meta.page_free_head - 1); i++)
{
item_page = &(mem_tbl->page_head[i]);
if(item_page->next_seg == LOG_INDEX_TBL_INVALID_SEG || item_page->tail_seg == LOG_INDEX_TBL_INVALID_SEG)
{
continue;
}
else
{
first_seg = &(mem_tbl->seg_item[item_page->next_seg - 1]);
last_seg = &(mem_tbl->seg_item[item_page->tail_seg - 1]);
page_min_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, first_seg->suffix_lsn[0]);
uint8 id = Min(LOG_INDEX_MEM_ITEM_SEG_LSN_NUM - 1, last_seg->number - 1);
page_max_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, last_seg->suffix_lsn[id]);
if(page_min_lsn > end_lsn || page_max_lsn < start_lsn)
{
continue;
}
else
{
InsertTagNodeByHead(head_node, item_page->tag);
}
}
}
}
}
LWLockRelease(LogIndexMemListLock);
return head_node;
}
bool CheckBufTagExistByLsnRange(const BufferTag *page, XLogRecPtr start_lsn, XLogRecPtr end_lsn)
{
uint64 tbl_index;
LogIndexMemItemSeg *first_seg;
LogIndexMemItemSeg *last_seg;
XLogRecPtr page_min_lsn;
XLogRecPtr page_max_lsn;
uint32 hash_key;
LogIndexMemItemHead *page_head;
// Prevent metadata changes during discovery.
LWLockAcquire(LogIndexMemListLock,LW_SHARED);
tbl_index = log_index_mem_list->table_start_index;
loop:
while(tbl_index != log_index_mem_list->active_table_index)
{
LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
tbl_index = (tbl_index + 1)%(log_index_mem_list->table_cap);
// current mem table no suitability lsn_list
if(mem_tbl->meta.max_lsn < start_lsn)
{
continue;
}else if(mem_tbl->meta.min_lsn >= end_lsn)
{
// there is no suitability lsn_list after this mem table
goto outerloop;
}
else
{
// find page from current mem table
hash_key = LOG_INDEX_MEM_TBL_HASH_PAGE(page);
if(mem_tbl->hash[hash_key] != LOG_INDEX_TBL_INVALID_SEG)
{
page_head = &(mem_tbl->page_head[mem_tbl->hash[hash_key]-1]);
while(!BUFFERTAGS_EQUAL(page_head->tag, *page)){
if(page_head->next_item == LOG_INDEX_TBL_INVALID_SEG)
{
// cannot find page from current mem table
goto loop;
}
page_head = &(mem_tbl->page_head[page_head->next_item-1]);
}
// find request page, but not lsn
if(page_head->next_seg == LOG_INDEX_TBL_INVALID_SEG || page_head->tail_seg == LOG_INDEX_TBL_INVALID_SEG)
{
continue;
}
else
{
first_seg = &(mem_tbl->seg_item[page_head->next_seg - 1]);
last_seg = &(mem_tbl->seg_item[page_head->tail_seg - 1]);
page_min_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, first_seg->suffix_lsn[0]);
uint8 id = Min(LOG_INDEX_MEM_ITEM_SEG_LSN_NUM - 1, last_seg->number - 1);
page_max_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, last_seg->suffix_lsn[id]);
// lsn not correspond with request
if(page_min_lsn >= end_lsn || page_max_lsn < start_lsn)
{
continue;
}
else
{
// find one
LWLockRelease(LogIndexMemListLock);
return true;
}
}
}else
{
continue;
}
}
}
if (tbl_index == log_index_mem_list->active_table_index){
LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
tbl_index = (tbl_index + 1)%(log_index_mem_list->table_cap);
// current mem table no suitability lsn_list
if(mem_tbl->meta.max_lsn < start_lsn)
{
goto outerloop;
}else if(mem_tbl->meta.min_lsn >= end_lsn)
{
// there is no suitability lsn_list after this mem table
goto outerloop;
}
else
{
// find page from current mem table
hash_key = LOG_INDEX_MEM_TBL_HASH_PAGE(page);
if(mem_tbl->hash[hash_key] != LOG_INDEX_TBL_INVALID_SEG)
{
page_head = &(mem_tbl->page_head[mem_tbl->hash[hash_key]-1]);
while(!BUFFERTAGS_EQUAL(page_head->tag, *page)){
if(page_head->next_item == LOG_INDEX_TBL_INVALID_SEG)
{
// cannot find page from current mem table
goto outerloop;
}
page_head = &(mem_tbl->page_head[page_head->next_item-1]);
}
// find request page
if(page_head->next_seg == LOG_INDEX_TBL_INVALID_SEG || page_head->tail_seg == LOG_INDEX_TBL_INVALID_SEG)
{
goto outerloop;
}
else
{
first_seg = &(mem_tbl->seg_item[page_head->next_seg - 1]);
last_seg = &(mem_tbl->seg_item[page_head->tail_seg - 1]);
page_min_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, first_seg->suffix_lsn[0]);
uint8 id = Min(LOG_INDEX_MEM_ITEM_SEG_LSN_NUM - 1, last_seg->number - 1);
page_max_lsn = LOG_INDEX_COMBINE_LSN(mem_tbl, last_seg->suffix_lsn[id]);
if(page_min_lsn >= end_lsn || page_max_lsn < start_lsn)
{
goto outerloop;
}
else
{
// find one
LWLockRelease(LogIndexMemListLock);
return true;
}
}
}else
{
goto outerloop;
}
}
}
outerloop:
LWLockRelease(LogIndexMemListLock);
return false;
}
void FreeTagNode(TagNode *head)
{
TagNode* tn;
while (head != NULL)
{
tn = head;
head = head->next;
free(tn);
tn = NULL;
}
}
void He3DBGetLogindexStats(uint64 *memtable_total, uint64 *memtable_used, uint64 *memtable_active_index,
uint64 *memtable_start_index, uint64 *page_total)
{
LWLockAcquire(LogIndexMemListLock,LW_SHARED);
*memtable_start_index = log_index_mem_list->table_start_index;
*memtable_active_index = log_index_mem_list->active_table_index;
*memtable_total = log_index_mem_list->table_cap;
LWLockRelease(LogIndexMemListLock);
*memtable_used = ((*memtable_active_index - *memtable_start_index) + *memtable_total)%*memtable_total + 1;
uint64 tbl_index = *memtable_start_index;
uint64 page_num = 0;
while(tbl_index != *memtable_active_index)
{
LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
tbl_index = (tbl_index + 1)%(*memtable_total);
page_num = page_num + mem_tbl->meta.page_free_head - 2;
}
if (tbl_index == *memtable_active_index)
{
LogIndexMemTBL *mem_tbl = &(log_index_mem_list->mem_table[tbl_index]);
if (pg_atomic_read_u32(&mem_tbl->meta.state) != LOG_INDEX_MEM_TBL_STATE_FREE){
page_num = page_num + mem_tbl->meta.page_free_head - 2;
}
}
*page_total = page_num;
}

File diff suppressed because it is too large Load Diff

View File

@ -53,3 +53,4 @@ XactTruncationLock 44
# 45 was XactTruncationLock until removal of BackendRandomLock
WrapLimitsVacuumLock 46
NotifyQueueTailLock 47
LogIndexMemListLock 48

View File

@ -260,6 +260,13 @@ InitProcGlobal(void)
ProcGlobal->bgworkerFreeProcs = &procs[i];
procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
}
else if (i < MaxConnections + autovacuum_max_workers + 1 + max_worker_processes + max_parallel_flush_process)
{
/* PGPROC for parallel flush, add to parallelFlushProcs list */
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->parallelFlushFreeProcs;
ProcGlobal->parallelFlushFreeProcs = &procs[i];
procs[i].procgloballist = &ProcGlobal->parallelFlushFreeProcs;
}
else if (i < MaxBackends)
{
/* PGPROC for walsender, add to walsenderFreeProcs list */
@ -319,6 +326,8 @@ InitProcess(void)
procgloballist = &ProcGlobal->autovacFreeProcs;
else if (IsBackgroundWorker)
procgloballist = &ProcGlobal->bgworkerFreeProcs;
else if (IsParallelFlushWorker)
procgloballist = &ProcGlobal->parallelFlushFreeProcs;
else if (am_walsender)
procgloballist = &ProcGlobal->walsenderFreeProcs;
else

View File

@ -1,5 +1,6 @@
#include "storage/sharedisk.h"
#include "utils/palloc.h"
#include "storage/shmem.h"
static ShareDiskInfo *ShareDiskCtl = NULL;
Size

View File

@ -14,6 +14,7 @@ include $(top_builddir)/src/Makefile.global
OBJS = \
md.o \
smgr.o
smgr.o \
filecache.o
include $(top_srcdir)/src/backend/common.mk

View File

@ -0,0 +1,70 @@
#include "storage/filecache.h"
#include <stdlib.h>
#include "utils/palloc.h"
#include "storage/shmem.h"
static HTAB *CacheRelHash;
Size FileCacheSize(void) {
return mul_size(MAX_CACHE_RELATION, sizeof(RelFileNode) + sizeof(CachedRelInfo));
}
void
InitCacheRel(void)
{
HASHCTL info;
long init_table_size,
max_table_size;
info.keysize = sizeof(RelFileNode);
info.entrysize = sizeof(CachedRelInfo);
init_table_size = 100;
max_table_size = MAX_CACHE_RELATION;
CacheRelHash = ShmemInitHash("CacheRel",
init_table_size,
max_table_size,
&info,
HASH_ELEM | HASH_BLOBS);
}
CachedRelInfo *
SetupRelCache(const RelFileNode *reln, ForkNumber forkno, BlockNumber nblocks)
{
CachedRelInfo *ri;
bool found;
ri = (CachedRelInfo *)
hash_search(CacheRelHash, reln, HASH_ENTER, &found);
if (!found)
{
for (int i = 0; i <= MAX_FORKNUM; ++i)
ri->cached_nblocks[i] = InvalidBlockNumber;
ri->cached_nblocks[forkno] = nblocks;
}
return ri;
}
void RemoveCacheRel(const RelFileNode *reln)
{
hash_search(CacheRelHash, (const void *) reln,
HASH_REMOVE, NULL);
}
CachedRelInfo *
FindCacheRel(const RelFileNode *reln)
{
CachedRelInfo *ri;
bool found;
ri = (CachedRelInfo *)
hash_search(CacheRelHash, reln, HASH_FIND, &found);
return ri;
}

File diff suppressed because it is too large Load Diff

View File

@ -22,7 +22,12 @@
#include "storage/bufmgr.h"
#include "storage/ipc.h"
#include "storage/md.h"
#include "storage/pmsignal.h"
#include "storage/smgr.h"
#include "storage/filecache.h"
#include "postmaster/secondbuffer.h"
//#include "utils/hfs.h"
#include "utils/backend_status.h"
#include "utils/hsearch.h"
#include "utils/inval.h"
#include "utils/guc.h"
@ -53,8 +58,8 @@ typedef struct f_smgr
BlockNumber blocknum, char *buffer, bool skipFsync);
bool (*smgr_prefetch) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum);
int (*smgr_read) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char **buffer, bool onlyPage, XLogRecPtr lsn);
void (*smgr_read) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer);
void (*smgr_write) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
void (*smgr_writeback) (SMgrRelation reln, ForkNumber forknum,
@ -77,7 +82,8 @@ static const f_smgr smgrsw[] = {
.smgr_unlink = mdunlink,
.smgr_extend = mdextend,
.smgr_prefetch = mdprefetch,
.smgr_read = he3db_mdread,
// .smgr_read = he3db_mdread,
.smgr_read = mdread,
.smgr_write = mdwrite,
.smgr_writeback = mdwriteback,
.smgr_nblocks = mdnblocks,
@ -390,7 +396,6 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
if (nrels == 0)
return;
/*
* Get rid of any remaining buffers for the relations. bufmgr will just
* drop them without bothering to write the contents.
@ -443,7 +448,7 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
int which = rels[i]->smgr_which;
for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
smgrsw[which].smgr_unlink(rnodes[i], forknum, isRedo);
smgrsw[which].smgr_unlink(rnodes[i], forknum, isRedo);
}
pfree(rnodes);
@ -467,9 +472,10 @@ smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
// return;
// }
if ((push_standby != true && EnableHotStandby != true) || IsBootstrapProcessingMode() || InitdbSingle) {
if (!he3share || (push_standby != true && (EnableHotStandby != true || *isPromoteIsTriggered)) || IsBootstrapProcessingMode() || InitdbSingle || he3mirror) {
smgrsw[reln->smgr_which].smgr_extend(reln, forknum, blocknum,
buffer, skipFsync);
// elog(LOG,"smgrextend reln %d,flk %d,blk %d",reln->smgr_rnode.node.relNode,forknum,blocknum);
}
/*
@ -481,6 +487,14 @@ smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
reln->smgr_cached_nblocks[forknum] = blocknum + 1;
else
reln->smgr_cached_nblocks[forknum] = InvalidBlockNumber;
// CachedRelInfo *cached_reln;
// cached_reln = FindCacheRel(&reln->smgr_rnode.node);
// if (cached_reln != NULL)
// cached_reln->cached_nblocks[forknum] = blocknum +1;
// else
// SetupRelCache(&reln->smgr_rnode.node, forknum, blocknum+1);
}
/*
@ -506,9 +520,9 @@ smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
*/
void
smgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
char **buffer, XLogRecPtr lsn)
char *buffer)
{
smgrsw[reln->smgr_which].smgr_read(reln, forknum, blocknum, buffer, false, lsn);
smgrsw[reln->smgr_which].smgr_read(reln, forknum, blocknum, buffer);
}
/*
@ -517,12 +531,12 @@ smgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
* Modified points:
* 1)return read bytes
*/
int
he3dbsmgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
char **buffer, XLogRecPtr lsn)
{
return smgrsw[reln->smgr_which].smgr_read(reln, forknum, blocknum, buffer, true, lsn);
}
// int
// he3dbsmgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
// char **buffer, XLogRecPtr lsn)
// {
// return smgrsw[reln->smgr_which].smgr_read(reln, forknum, blocknum, buffer, true, lsn);
// }
/*
* smgrwrite() -- Write the supplied buffer out.
@ -543,12 +557,33 @@ void
smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
char *buffer, bool skipFsync)
{
if (push_standby == true || SmgrIsTemp(reln)) {
smgrsw[reln->smgr_which].smgr_write(reln, forknum, blocknum,
buffer, skipFsync);
}
smgrsw[reln->smgr_which].smgr_write(reln, forknum, blocknum, buffer, skipFsync);
}
void
he3dbsmgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
char *buffer, bool skipFsync, XLogRecPtr lsn)
{
if (!(InitdbSingle || IsBootstrapProcessingMode() == true)) {
PageKey pageKey;
pageKey.relfileNode.dbNode = reln->smgr_rnode.node.dbNode;
pageKey.relfileNode.relNode = reln->smgr_rnode.node.relNode;
pageKey.blkNo = blocknum;
pageKey.forkNo = forknum;
pageKey.pageLsn = lsn;
if (push_standby || he3mirror) {
smgrsw[reln->smgr_which].smgr_write(reln, forknum, blocknum, buffer, skipFsync);
} else {
ReceivePageFromDataBuffer(&pageKey, (uint8_t *) buffer);
}
}
else
{
smgrsw[reln->smgr_which].smgr_write(reln, forknum, blocknum, buffer, skipFsync);
}
}
/*
* smgrwriteback() -- Trigger kernel writeback for the supplied range of
@ -558,10 +593,11 @@ void
smgrwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
BlockNumber nblocks)
{
if (push_standby == true || SmgrIsTemp(reln)) {
//if (push_standby == true || SmgrIsTemp(reln)) {
smgrsw[reln->smgr_which].smgr_writeback(reln, forknum, blocknum,
nblocks);
}
// elog(LOG,"smgrwriteback reln %d,flk %d,blk %d",reln->smgr_rnode.node.relNode,forknum,blocknum);
//}
}
/*
@ -573,6 +609,42 @@ smgrnblocks(SMgrRelation reln, ForkNumber forknum)
{
BlockNumber result;
/* Check and return if we get the cached value for the number of blocks. */
//if (push_standby != true )
//{
// result = smgrnblocks_cached(reln, forknum);
// if (result != InvalidBlockNumber)
// return result;
//}
// CachedRelInfo *cached_reln = NULL;
// cached_reln = FindCacheRel(&reln->smgr_rnode.node);
// if (cached_reln != NULL && cached_reln->cached_nblocks[forknum] != InvalidBlockNumber)
// {
// reln->smgr_cached_nblocks[forknum] = cached_reln->cached_nblocks[forknum];
// return cached_reln->cached_nblocks[forknum];
// }
result = smgrsw[reln->smgr_which].smgr_nblocks(reln, forknum);
// elog(LOG, "===exec lseek ===");
// if (cached_reln == NULL)
// SetupRelCache(&reln->smgr_rnode.node, forknum, result);
// else
// cached_reln->cached_nblocks[forknum] = result;
reln->smgr_cached_nblocks[forknum] = result;
return result;
}
/*
* smgrnblocks() -- Calculate the number of blocks in the
* supplied relation.
*/
BlockNumber
startupsmgrnblocks(SMgrRelation reln, ForkNumber forknum)
{
BlockNumber result;
/* Check and return if we get the cached value for the number of blocks. */
result = smgrnblocks_cached(reln, forknum);
if (result != InvalidBlockNumber)
@ -585,6 +657,7 @@ smgrnblocks(SMgrRelation reln, ForkNumber forknum)
return result;
}
/*
* smgrnblocks_cached() -- Get the cached number of blocks in the supplied
* relation.
@ -619,6 +692,8 @@ void
smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nblocks)
{
int i;
PageKey pk;
OriginDPageKey odpk;
/*
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
@ -646,6 +721,18 @@ smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nb
smgrsw[reln->smgr_which].smgr_truncate(reln, forknum[i], nblocks[i]);
//remove unused pages and related wals in localdisk cache.
// RemoveBufferFromLocal(reln->smgr_rnode.node.dbNode, reln->smgr_rnode.node.relNode, forknum[i], nblocks[i]);
if (IsBootstrapProcessingMode() != true && InitdbSingle != true)
{
pk.relfileNode.dbNode = reln->smgr_rnode.node.dbNode;
pk.relfileNode.relNode = reln->smgr_rnode.node.relNode;
pk.forkNo = forknum[i];
pk.blkNo = nblocks[i];
odpk.pk = pk;
odpk.opration = (int)TRUNCATE;
AddOneItemToDPArray(odpk);
}
/*
* We might as well update the local smgr_cached_nblocks values. The
* smgr cache inval message that this function sent will cause other
@ -671,24 +758,6 @@ void
smgrtruncatelsn(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nblocks,XLogRecPtr lsn)
{
int i;
/*
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
* just drop them without bothering to write the contents.
*/
DropRelFileNodeBuffers(reln, forknum, nforks, nblocks);
/*
* Send a shared-inval message to force other backends to close any smgr
* references they may have for this rel. This is useful because they
* might have open file pointers to segments that got removed, and/or
* smgr_targblock variables pointing past the new rel end. (The inval
* message will come back to our backend, too, causing a
* probably-unnecessary local smgr flush. But we don't expect that this
* is a performance-critical path.) As in the unlink code, we want to be
* sure the message is sent before we start changing things on-disk.
*/
CacheInvalidateSmgr(reln->smgr_rnode);
//push to truncate
bool flag = false;
/* Do the truncation */
@ -698,17 +767,23 @@ smgrtruncatelsn(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber
reln->smgr_cached_nblocks[forknum[i]] = InvalidBlockNumber;
if(!SmgrIsTemp(reln)) {
if (false == flag) {
XLogRecPtr pushLsn;
XLogRecPtr minApplyLsn;
do {
sleep(1);
pushLsn = QueryPushLsn();
printf("====pushlsn=%lx==lsn==%lx==\n",pushLsn,lsn);
} while(pushLsn!=InvalidXLogRecPtr && pushLsn<lsn);
pg_usleep(200000);
if (!EnableHotStandby || *isPromoteIsTriggered)
minApplyLsn = QueryPushChkpointLsn();
else
minApplyLsn = He3DBQueryMinLsnFromAllStanby();
elog(LOG,"====pushlsn=%lx==lsn==%lx==\n",minApplyLsn,lsn);
if (IsUnderPostmaster && !PostmasterIsAlive())
{
elog(FATAL, "stop waiting pushstandby because postmaster process isn't alive");
}
} while(minApplyLsn<lsn);
flag = true;
}
}
smgrsw[reln->smgr_which].smgr_truncate(reln, forknum[i], nblocks[i]);
smgrsw[reln->smgr_which].smgr_truncate(reln, forknum[i], nblocks[i]);
/*
* We might as well update the local smgr_cached_nblocks values. The
* smgr cache inval message that this function sent will cause other

View File

@ -221,20 +221,23 @@ SyncPostCheckpoint(void)
break;
/* Unlink the file */
if (syncsw[entry->tag.handler].sync_unlinkfiletag(&entry->tag,
path) < 0)
if (push_standby)
{
/*
* There's a race condition, when the database is dropped at the
* same time that we process the pending unlink requests. If the
* DROP DATABASE deletes the file before we do, we will get ENOENT
* here. rmtree() also has to ignore ENOENT errors, to deal with
* the possibility that we delete the file first.
*/
if (errno != ENOENT)
ereport(WARNING,
(errcode_for_file_access(),
errmsg("could not remove file \"%s\": %m", path)));
if (syncsw[entry->tag.handler].sync_unlinkfiletag(&entry->tag,
path) < 0)
{
/*
* There's a race condition, when the database is dropped at the
* same time that we process the pending unlink requests. If the
* DROP DATABASE deletes the file before we do, we will get ENOENT
* here. rmtree() also has to ignore ENOENT errors, to deal with
* the possibility that we delete the file first.
*/
if (errno != ENOENT)
ereport(WARNING,
(errcode_for_file_access(),
errmsg("could not remove file \"%s\": %m", path)));
}
}
/* Mark the list entry as canceled, just in case */
@ -376,7 +379,7 @@ ProcessSyncRequests(void)
* all. (We delay checking until this point so that changing fsync on
* the fly behaves sensibly.)
*/
if (enableFsync)
if (enableFsync && push_standby)
{
/*
* If in checkpointer, we want to absorb pending requests every so

View File

@ -42,6 +42,7 @@
#include "utils/portal.h"
#include "utils/guc.h"
#include "access/xlog.h"
#include "storage/bufmgr.h"
/* ----------------
@ -282,7 +283,7 @@ ReadyForQuery(CommandDest dest,bool PrivateConn)
if (PrivateConn == true) {
StringInfoData privateBuf;
pq_beginmessage(&privateBuf, 'L');
if (EnableHotStandby == false) {
if (EnableHotStandby == false || *isPromoteIsTriggered) {
pq_sendint64(&privateBuf,(uint64)GetXLogWriteRecPtr());
} else {
pq_sendint64(&privateBuf,(uint64)GetXLogReplayRecPtr(NULL));

View File

@ -86,8 +86,15 @@
* global variables
* ----------------
*/
bool isPreCache = false;
bool isPreCacheTable = false;
bool isPreCacheIndex = false;
bool isPreCacheIndexDone = false;
bool needPreCacheEscape = false;
bool needUnpreCacheEscape = false;
bool isPreCacheAction = true;
Oid preCacheNodeOid = 0;
uint16 *preCacheNodesCountPtr = NULL;
Oid *preCacheNodesPtr = NULL;
const char *debug_query_string; /* client-supplied query string */
/* Note: whereToSendOutput is initialized for the bootstrap/standalone case */
@ -1213,9 +1220,23 @@ exec_simple_query(const char *query_string)
*/
MemoryContextSwitchTo(oldcontext);
if (isPreCache)
if (isPreCacheTable || isPreCacheIndex)
{
needPreCacheEscape = true;
if (isPreCacheAction)
{
needPreCacheEscape = true;
needUnpreCacheEscape = false;
}
else
{
needPreCacheEscape = false;
needUnpreCacheEscape = true;
}
}
else
{
needPreCacheEscape = false;
needUnpreCacheEscape = false;
}
/*
* Run the portal to completion, and then drop it (and the receiver).
@ -1228,9 +1249,10 @@ exec_simple_query(const char *query_string)
receiver,
&qc);
if (isPreCache)
if (isPreCacheTable || isPreCacheIndex)
{
needPreCacheEscape = false;
needUnpreCacheEscape = false;
}
receiver->rDestroy(receiver);
@ -1329,6 +1351,55 @@ exec_simple_query(const char *query_string)
debug_query_string = NULL;
}
static void
he3_exec_simple_query(const char *query_string)
{
if (strstr(query_string, "precache table ") != NULL && query_string - strstr(query_string, "precache table ") == 0)
{
isPreCacheTable = true;
preCacheNodeOid = 0;
isPreCacheAction = true;
exec_simple_query(query_string + strlen("precache table "));
preCacheNodeOid = 0;
isPreCacheTable = false;
}
else if (strstr(query_string, "precache index ") != NULL && query_string - strstr(query_string, "precache index ") == 0)
{
isPreCacheIndex = true;
isPreCacheIndexDone = false;
preCacheNodeOid = 0;
isPreCacheAction = true;
exec_simple_query(query_string + strlen("precache index "));
preCacheNodeOid = 0;
isPreCacheIndexDone = false;
isPreCacheIndex = false;
}
else if (strstr(query_string, "unprecache table ") != NULL && query_string - strstr(query_string, "unprecache table ") == 0)
{
isPreCacheTable = true;
preCacheNodeOid = 0;
isPreCacheAction = false;
exec_simple_query(query_string + strlen("unprecache table "));
preCacheNodeOid = 0;
isPreCacheTable = false;
}
else if (strstr(query_string, "unprecache index ") != NULL && query_string - strstr(query_string, "unprecache index ") == 0)
{
isPreCacheIndex = true;
isPreCacheIndexDone = false;
preCacheNodeOid = 0;
isPreCacheAction = false;
exec_simple_query(query_string + strlen("unprecache index "));
preCacheNodeOid = 0;
isPreCacheIndexDone = false;
isPreCacheIndex = false;
}
else
{
exec_simple_query(query_string);
}
}
/*
* exec_parse_message
*
@ -4504,16 +4575,7 @@ PostgresMain(int argc, char *argv[], bool PrivateConn,
}
else
{
if (strstr(query_string, "precache ") != NULL && query_string - strstr(query_string, "precache ") == 0)
{
isPreCache = true;
exec_simple_query(query_string + strlen("precache "));
isPreCache = false;
}
else
{
exec_simple_query(query_string);
}
he3_exec_simple_query(query_string);
}
send_ready_for_query = true;

View File

@ -17,6 +17,8 @@
#include "pg_trace.h"
#include "pgstat.h"
#include "port/atomics.h" /* for memory barriers */
#include "replication/walsender.h"
#include "replication/walsender_private.h"
#include "storage/ipc.h"
#include "storage/proc.h" /* for MyProc */
#include "storage/sinvaladt.h"
@ -1148,3 +1150,55 @@ pgstat_clip_activity(const char *raw_activity)
return activity;
}
XLogRecPtr He3DBQueryMinLsnFromAllStanby()
{
int i;
XLogRecPtr minApplyLsn = 0;
int *procpids;
int maxid = 0;
procpids = (int *) malloc(max_wal_senders * sizeof(int));
for (i = 0; i < NumBackendStatSlots; i++)
{
if (strcmp(BackendStatusArray[i].st_appname, "pgmirror") == 0 || memcmp(BackendStatusArray[i].st_appname, "priv", 4) == 0)
{
procpids[maxid] = BackendStatusArray[i].st_procpid;
maxid++;
}
}
Assert(WalSndCtl != NULL);
for (i = 0; i < max_wal_senders; i++)
{
int pid;
XLogRecPtr apply;
WalSnd *walsnd = &WalSndCtl->walsnds[i];
SpinLockAcquire(&walsnd->mutex);
if (walsnd->pid == 0)
{
SpinLockRelease(&walsnd->mutex);
continue;
}
pid = walsnd->pid;
apply = walsnd->apply;
SpinLockRelease(&walsnd->mutex);
int j;
bool exist = false;
for (j = 0; j < maxid; j++)
{
if (pid == procpids[j])
{
exist = true;
break;
}
}
if (!exist)
{
if (apply < minApplyLsn || minApplyLsn == 0)
minApplyLsn = apply;
}
}
free(procpids);
return minApplyLsn;
}

View File

@ -248,6 +248,15 @@ pgstat_get_wait_activity(WaitEventActivity w)
case WAIT_EVENT_WAL_WRITER_MAIN:
event_name = "WalWriterMain";
break;
case WAIT_EVENT_PAGEFLUSH_MAIN:
event_name = "PageFlushMain";
break;
case WAIT_EVENT_CLEAN_LOGINDEX_MAIN:
event_name = "CleanLogindexMain";
break;
case WAIT_EVENT_SECONDBUFFER_MAIN:
event_name = "SecondBufferMain";
break;
/* no default case, so that compiler will warn */
}

View File

@ -25,12 +25,15 @@
#include "postmaster/bgworker_internals.h"
#include "postmaster/postmaster.h"
#include "replication/slot.h"
#include "storage/bufmgr.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "utils/acl.h"
#include "utils/builtins.h"
#include "utils/inet.h"
#include "utils/pg_lsn.h"
#include "utils/timestamp.h"
#include "storage/he3db_logindex.h"
#define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32 *)&(var))))
@ -2381,3 +2384,98 @@ pg_stat_get_replication_slot(PG_FUNCTION_ARGS)
/* Returns the record as Datum */
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
}
/*
* Returns statistics of WAL activity
*/
Datum
pg_stat_get_he3walwrite(PG_FUNCTION_ARGS)
{
#define PG_STAT_GET_HE3WALWRITE_COLS 4
TupleDesc tupdesc;
Datum values[PG_STAT_GET_HE3WALWRITE_COLS];
bool nulls[PG_STAT_GET_HE3WALWRITE_COLS];
XLogRecPtr writtenlsn, flushlsn;
uint64 writtenTimes;
int parallels;
/* Initialise values and NULL flags arrays */
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
/* Initialise attributes information in the tuple descriptor */
tupdesc = CreateTemplateTupleDesc(PG_STAT_GET_HE3WALWRITE_COLS);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "write_lsn",
PG_LSNOID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "flush_lsn",
PG_LSNOID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 3, "writekv_totaltimes",
INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 4, "writekv_parallels",
INT4OID, -1, 0);
BlessTupleDesc(tupdesc);
/* Get statistics about WAL Write */
if (EnableHotStandby && *isPromoteIsTriggered == false)
PG_RETURN_NULL();
He3DBGetWalWriteStats(&writtenlsn, &flushlsn, &writtenTimes, &parallels);
/* Fill values and NULLs */
values[0] = LSNGetDatum(writtenlsn);
values[1] = LSNGetDatum(flushlsn);
values[2] = UInt64GetDatum(writtenTimes);
values[3] = Int32GetDatum(parallels);
/* Returns the record as Datum */
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
}
/*
* Returns statistics of logindex
*/
Datum
pg_stat_get_he3_logindex(PG_FUNCTION_ARGS)
{
#define PG_STAT_GET_HE3_LOGINDEX_COLS 5
TupleDesc tupdesc;
Datum values[PG_STAT_GET_HE3_LOGINDEX_COLS];
bool nulls[PG_STAT_GET_HE3_LOGINDEX_COLS];
uint64 memtable_total;
uint64 memtable_used;
uint64 memtable_active_index;
uint64 memtable_start_index;
uint64 page_total;
/* Initialise values and NULL flags arrays */
MemSet(values, 0, sizeof(values));
MemSet(nulls, 0, sizeof(nulls));
/* Initialise attributes information in the tuple descriptor */
tupdesc = CreateTemplateTupleDesc(PG_STAT_GET_HE3_LOGINDEX_COLS);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "memtable_total",
INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "memtable_used",
INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 3, "memtable_start_index",
INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 4, "memtable_active_index",
INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 5, "page_total",
INT8OID, -1, 0);
BlessTupleDesc(tupdesc);
He3DBGetLogindexStats(&memtable_total, &memtable_used, &memtable_active_index, &memtable_start_index, &page_total);
/* Fill values and NULLs */
values[0] = UInt64GetDatum(memtable_total);
values[1] = UInt64GetDatum(memtable_used);
values[2] = UInt64GetDatum(memtable_start_index);
values[3] = UInt64GetDatum(memtable_active_index);
values[4] = UInt64GetDatum(page_total);
/* Returns the record as Datum */
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
}

View File

@ -1032,11 +1032,18 @@ relmap_redo(XLogReaderState *record)
* There shouldn't be anyone else updating relmaps during WAL replay,
* but grab the lock to interlock against load_relmap_file().
*/
LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE);
write_relmap_file((xlrec->dbid == InvalidOid), &newmap,
false, true, false,
xlrec->dbid, xlrec->tsid, dbpath);
LWLockRelease(RelationMappingLock);
if (EnableHotStandby && he3share)
{
CacheInvalidateRelmap(xlrec->dbid);
}
else
{
LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE);
write_relmap_file((xlrec->dbid == InvalidOid), &newmap,
false, true, false,
xlrec->dbid, xlrec->tsid, dbpath);
LWLockRelease(RelationMappingLock);
}
pfree(dbpath);
}

View File

@ -74,7 +74,6 @@
#include "catalog/pg_ts_template.h"
#include "catalog/pg_type.h"
#include "catalog/pg_user_mapping.h"
#include "catalog/pg_hot_data.h"
#include "lib/qunique.h"
#include "utils/catcache.h"
#include "utils/rel.h"
@ -476,17 +475,6 @@ static const struct cachedesc cacheinfo[] = {
},
4
},
{HotDataRelationId, /* HOTDATADATNAMERELNAME */
HotDataDatnameRelnameIndexId,
2,
{
Anum_pg_hot_data_datname,
Anum_pg_hot_data_relname,
0,
0
},
4
},
{IndexRelationId, /* INDEXRELID */
IndexRelidIndexId,
1,

View File

@ -112,6 +112,7 @@ bool IsPostmasterEnvironment = false;
bool IsUnderPostmaster = false;
bool IsBinaryUpgrade = false;
bool IsBackgroundWorker = false;
bool IsParallelFlushWorker = false;
bool ExitOnAnyError = false;
@ -136,6 +137,7 @@ int NBuffers = 1000;
int MaxConnections = 90;
int max_worker_processes = 8;
int max_parallel_workers = 8;
int max_parallel_flush_process = 32;
int MaxBackends = 0;
int VacuumCostPageHit = 1; /* GUC parameters for vacuum */

Some files were not shown because too many files have changed in this diff Show More